repository_name stringclasses 316 values | func_path_in_repository stringlengths 6 223 | func_name stringlengths 1 134 | language stringclasses 1 value | func_code_string stringlengths 57 65.5k | func_documentation_string stringlengths 1 46.3k | split_name stringclasses 1 value | func_code_url stringlengths 91 315 | called_functions listlengths 1 156 ⌀ | enclosing_scope stringlengths 2 1.48M |
|---|---|---|---|---|---|---|---|---|---|
ribozz/sphinx-argparse | sphinxarg/markdown.py | finalizeSection | python | def finalizeSection(section):
cur = section.first_child
last = section.last_child
if last is not None:
last.nxt = None
while cur is not None:
cur.parent = section
cur = cur.nxt | Correct the nxt and parent for each child | train | https://github.com/ribozz/sphinx-argparse/blob/178672cd5c846440ff7ecd695e3708feea13e4b4/sphinxarg/markdown.py#L314-L325 | null | try:
from commonmark import Parser
except ImportError:
from CommonMark import Parser # >= 0.5.6
try:
from commonmark.node import Node
except ImportError:
from CommonMark.node import Node
from docutils import nodes
from docutils.utils.code_analyzer import Lexer
def customWalker(node, space=''):
"""
A convenience function to ease debugging. It will print the node structure that's returned from CommonMark
The usage would be something like:
>>> content = Parser().parse('Some big text block\n===================\n\nwith content\n')
>>> customWalker(content)
document
heading
text Some big text block
paragraph
text with content
Spaces are used to convey nesting
"""
txt = ''
try:
txt = node.literal
except:
pass
if txt is None or txt == '':
print('{}{}'.format(space, node.t))
else:
print('{}{}\t{}'.format(space, node.t, txt))
cur = node.first_child
if cur:
while cur is not None:
customWalker(cur, space + ' ')
cur = cur.nxt
def paragraph(node):
"""
Process a paragraph, which includes all content under it
"""
text = ''
if node.string_content is not None:
text = node.string_content
o = nodes.paragraph('', ' '.join(text))
o.line = node.sourcepos[0][0]
for n in MarkDown(node):
o.append(n)
return o
def text(node):
"""
Text in a paragraph
"""
return nodes.Text(node.literal)
def hardbreak(node):
"""
A <br /> in html or "\n" in ascii
"""
return nodes.Text('\n')
def softbreak(node):
"""
A line ending or space.
"""
return nodes.Text('\n')
def reference(node):
"""
A hyperlink. Note that alt text doesn't work, since there's no apparent way to do that in docutils
"""
o = nodes.reference()
o['refuri'] = node.destination
if node.title:
o['name'] = node.title
for n in MarkDown(node):
o += n
return o
def emphasis(node):
"""
An italicized section
"""
o = nodes.emphasis()
for n in MarkDown(node):
o += n
return o
def strong(node):
"""
A bolded section
"""
o = nodes.strong()
for n in MarkDown(node):
o += n
return o
def literal(node):
"""
Inline code
"""
rendered = []
try:
if node.info is not None:
l = Lexer(node.literal, node.info, tokennames="long")
for _ in l:
rendered.append(node.inline(classes=_[0], text=_[1]))
except:
pass
classes = ['code']
if node.info is not None:
classes.append(node.info)
if len(rendered) > 0:
o = nodes.literal(classes=classes)
for element in rendered:
o += element
else:
o = nodes.literal(text=node.literal, classes=classes)
for n in MarkDown(node):
o += n
return o
def literal_block(node):
"""
A block of code
"""
rendered = []
try:
if node.info is not None:
l = Lexer(node.literal, node.info, tokennames="long")
for _ in l:
rendered.append(node.inline(classes=_[0], text=_[1]))
except:
pass
classes = ['code']
if node.info is not None:
classes.append(node.info)
if len(rendered) > 0:
o = nodes.literal_block(classes=classes)
for element in rendered:
o += element
else:
o = nodes.literal_block(text=node.literal, classes=classes)
o.line = node.sourcepos[0][0]
for n in MarkDown(node):
o += n
return o
def raw(node):
"""
Add some raw html (possibly as a block)
"""
o = nodes.raw(node.literal, node.literal, format='html')
if node.sourcepos is not None:
o.line = node.sourcepos[0][0]
for n in MarkDown(node):
o += n
return o
def transition(node):
"""
An <hr> tag in html. This has no children
"""
return nodes.transition()
def title(node):
"""
A title node. It has no children
"""
return nodes.title(node.first_child.literal, node.first_child.literal)
def section(node):
"""
A section in reStructuredText, which needs a title (the first child)
This is a custom type
"""
title = '' # All sections need an id
if node.first_child is not None:
if node.first_child.t == u'heading':
title = node.first_child.first_child.literal
o = nodes.section(ids=[title], names=[title])
for n in MarkDown(node):
o += n
return o
def block_quote(node):
"""
A block quote
"""
o = nodes.block_quote()
o.line = node.sourcepos[0][0]
for n in MarkDown(node):
o += n
return o
def image(node):
"""
An image element
The first child is the alt text. reStructuredText can't handle titles
"""
o = nodes.image(uri=node.destination)
if node.first_child is not None:
o['alt'] = node.first_child.literal
return o
def listItem(node):
"""
An item in a list
"""
o = nodes.list_item()
for n in MarkDown(node):
o += n
return o
def listNode(node):
"""
A list (numbered or not)
For numbered lists, the suffix is only rendered as . in html
"""
if node.list_data['type'] == u'bullet':
o = nodes.bullet_list(bullet=node.list_data['bullet_char'])
else:
o = nodes.enumerated_list(suffix=node.list_data['delimiter'], enumtype='arabic', start=node.list_data['start'])
for n in MarkDown(node):
o += n
return o
def MarkDown(node):
"""
Returns a list of nodes, containing CommonMark nodes converted to docutils nodes
"""
cur = node.first_child
# Go into each child, in turn
output = []
while cur is not None:
t = cur.t
if t == 'paragraph':
output.append(paragraph(cur))
elif t == 'text':
output.append(text(cur))
elif t == 'softbreak':
output.append(softbreak(cur))
elif t == 'linebreak':
output.append(hardbreak(cur))
elif t == 'link':
output.append(reference(cur))
elif t == 'heading':
output.append(title(cur))
elif t == 'emph':
output.append(emphasis(cur))
elif t == 'strong':
output.append(strong(cur))
elif t == 'code':
output.append(literal(cur))
elif t == 'code_block':
output.append(literal_block(cur))
elif t == 'html_inline' or t == 'html_block':
output.append(raw(cur))
elif t == 'block_quote':
output.append(block_quote(cur))
elif t == 'thematic_break':
output.append(transition(cur))
elif t == 'image':
output.append(image(cur))
elif t == 'list':
output.append(listNode(cur))
elif t == 'item':
output.append(listItem(cur))
elif t == 'MDsection':
output.append(section(cur))
else:
print('Received unhandled type: {}. Full print of node:'.format(t))
cur.pretty()
cur = cur.nxt
return output
def nestSections(block, level=1):
"""
Sections aren't handled by CommonMark at the moment.
This function adds sections to a block of nodes.
'title' nodes with an assigned level below 'level' will be put in a child section.
If there are no child nodes with titles of level 'level' then nothing is done
"""
cur = block.first_child
if cur is not None:
children = []
# Do we need to do anything?
nest = False
while cur is not None:
if cur.t == 'heading' and cur.level == level:
nest = True
break
cur = cur.nxt
if not nest:
return
section = Node('MDsection', 0)
section.parent = block
cur = block.first_child
while cur is not None:
if cur.t == 'heading' and cur.level == level:
# Found a split point, flush the last section if needed
if section.first_child is not None:
finalizeSection(section)
children.append(section)
section = Node('MDsection', 0)
nxt = cur.nxt
# Avoid adding sections without titles at the start
if section.first_child is None:
if cur.t == 'heading' and cur.level == level:
section.append_child(cur)
else:
children.append(cur)
else:
section.append_child(cur)
cur = nxt
# If there's only 1 child then don't bother
if section.first_child is not None:
finalizeSection(section)
children.append(section)
block.first_child = None
block.last_child = None
nextLevel = level + 1
for child in children:
# Handle nesting
if child.t == 'MDsection':
nestSections(child, level=nextLevel)
# Append
if block.first_child is None:
block.first_child = child
else:
block.last_child.nxt = child
child.parent = block
child.nxt = None
child.prev = block.last_child
block.last_child = child
def parseMarkDownBlock(text):
"""
Parses a block of text, returning a list of docutils nodes
>>> parseMarkdownBlock("Some\n====\n\nblock of text\n\nHeader\n======\n\nblah\n")
[]
"""
block = Parser().parse(text)
# CommonMark can't nest sections, so do it manually
nestSections(block)
return MarkDown(block)
|
ribozz/sphinx-argparse | sphinxarg/markdown.py | nestSections | python | def nestSections(block, level=1):
cur = block.first_child
if cur is not None:
children = []
# Do we need to do anything?
nest = False
while cur is not None:
if cur.t == 'heading' and cur.level == level:
nest = True
break
cur = cur.nxt
if not nest:
return
section = Node('MDsection', 0)
section.parent = block
cur = block.first_child
while cur is not None:
if cur.t == 'heading' and cur.level == level:
# Found a split point, flush the last section if needed
if section.first_child is not None:
finalizeSection(section)
children.append(section)
section = Node('MDsection', 0)
nxt = cur.nxt
# Avoid adding sections without titles at the start
if section.first_child is None:
if cur.t == 'heading' and cur.level == level:
section.append_child(cur)
else:
children.append(cur)
else:
section.append_child(cur)
cur = nxt
# If there's only 1 child then don't bother
if section.first_child is not None:
finalizeSection(section)
children.append(section)
block.first_child = None
block.last_child = None
nextLevel = level + 1
for child in children:
# Handle nesting
if child.t == 'MDsection':
nestSections(child, level=nextLevel)
# Append
if block.first_child is None:
block.first_child = child
else:
block.last_child.nxt = child
child.parent = block
child.nxt = None
child.prev = block.last_child
block.last_child = child | Sections aren't handled by CommonMark at the moment.
This function adds sections to a block of nodes.
'title' nodes with an assigned level below 'level' will be put in a child section.
If there are no child nodes with titles of level 'level' then nothing is done | train | https://github.com/ribozz/sphinx-argparse/blob/178672cd5c846440ff7ecd695e3708feea13e4b4/sphinxarg/markdown.py#L328-L390 | [
"def finalizeSection(section):\n \"\"\"\n Correct the nxt and parent for each child\n \"\"\"\n cur = section.first_child\n last = section.last_child\n if last is not None:\n last.nxt = None\n\n while cur is not None:\n cur.parent = section\n cur = cur.nxt\n",
"def nestSections(block, level=1):\n \"\"\"\n Sections aren't handled by CommonMark at the moment.\n This function adds sections to a block of nodes.\n 'title' nodes with an assigned level below 'level' will be put in a child section.\n If there are no child nodes with titles of level 'level' then nothing is done\n \"\"\"\n cur = block.first_child\n if cur is not None:\n children = []\n # Do we need to do anything?\n nest = False\n while cur is not None:\n if cur.t == 'heading' and cur.level == level:\n nest = True\n break\n cur = cur.nxt\n if not nest:\n return\n\n section = Node('MDsection', 0)\n section.parent = block\n cur = block.first_child\n while cur is not None:\n if cur.t == 'heading' and cur.level == level:\n # Found a split point, flush the last section if needed\n if section.first_child is not None:\n finalizeSection(section)\n children.append(section)\n section = Node('MDsection', 0)\n nxt = cur.nxt\n # Avoid adding sections without titles at the start\n if section.first_child is None:\n if cur.t == 'heading' and cur.level == level:\n section.append_child(cur)\n else:\n children.append(cur)\n else:\n section.append_child(cur)\n cur = nxt\n\n # If there's only 1 child then don't bother\n if section.first_child is not None:\n finalizeSection(section)\n children.append(section)\n\n block.first_child = None\n block.last_child = None\n nextLevel = level + 1\n for child in children:\n # Handle nesting\n if child.t == 'MDsection':\n nestSections(child, level=nextLevel)\n\n # Append\n if block.first_child is None:\n block.first_child = child\n else:\n block.last_child.nxt = child\n child.parent = block\n child.nxt = None\n child.prev = block.last_child\n block.last_child = child\n"
] | try:
from commonmark import Parser
except ImportError:
from CommonMark import Parser # >= 0.5.6
try:
from commonmark.node import Node
except ImportError:
from CommonMark.node import Node
from docutils import nodes
from docutils.utils.code_analyzer import Lexer
def customWalker(node, space=''):
"""
A convenience function to ease debugging. It will print the node structure that's returned from CommonMark
The usage would be something like:
>>> content = Parser().parse('Some big text block\n===================\n\nwith content\n')
>>> customWalker(content)
document
heading
text Some big text block
paragraph
text with content
Spaces are used to convey nesting
"""
txt = ''
try:
txt = node.literal
except:
pass
if txt is None or txt == '':
print('{}{}'.format(space, node.t))
else:
print('{}{}\t{}'.format(space, node.t, txt))
cur = node.first_child
if cur:
while cur is not None:
customWalker(cur, space + ' ')
cur = cur.nxt
def paragraph(node):
"""
Process a paragraph, which includes all content under it
"""
text = ''
if node.string_content is not None:
text = node.string_content
o = nodes.paragraph('', ' '.join(text))
o.line = node.sourcepos[0][0]
for n in MarkDown(node):
o.append(n)
return o
def text(node):
"""
Text in a paragraph
"""
return nodes.Text(node.literal)
def hardbreak(node):
"""
A <br /> in html or "\n" in ascii
"""
return nodes.Text('\n')
def softbreak(node):
"""
A line ending or space.
"""
return nodes.Text('\n')
def reference(node):
"""
A hyperlink. Note that alt text doesn't work, since there's no apparent way to do that in docutils
"""
o = nodes.reference()
o['refuri'] = node.destination
if node.title:
o['name'] = node.title
for n in MarkDown(node):
o += n
return o
def emphasis(node):
"""
An italicized section
"""
o = nodes.emphasis()
for n in MarkDown(node):
o += n
return o
def strong(node):
"""
A bolded section
"""
o = nodes.strong()
for n in MarkDown(node):
o += n
return o
def literal(node):
"""
Inline code
"""
rendered = []
try:
if node.info is not None:
l = Lexer(node.literal, node.info, tokennames="long")
for _ in l:
rendered.append(node.inline(classes=_[0], text=_[1]))
except:
pass
classes = ['code']
if node.info is not None:
classes.append(node.info)
if len(rendered) > 0:
o = nodes.literal(classes=classes)
for element in rendered:
o += element
else:
o = nodes.literal(text=node.literal, classes=classes)
for n in MarkDown(node):
o += n
return o
def literal_block(node):
"""
A block of code
"""
rendered = []
try:
if node.info is not None:
l = Lexer(node.literal, node.info, tokennames="long")
for _ in l:
rendered.append(node.inline(classes=_[0], text=_[1]))
except:
pass
classes = ['code']
if node.info is not None:
classes.append(node.info)
if len(rendered) > 0:
o = nodes.literal_block(classes=classes)
for element in rendered:
o += element
else:
o = nodes.literal_block(text=node.literal, classes=classes)
o.line = node.sourcepos[0][0]
for n in MarkDown(node):
o += n
return o
def raw(node):
"""
Add some raw html (possibly as a block)
"""
o = nodes.raw(node.literal, node.literal, format='html')
if node.sourcepos is not None:
o.line = node.sourcepos[0][0]
for n in MarkDown(node):
o += n
return o
def transition(node):
"""
An <hr> tag in html. This has no children
"""
return nodes.transition()
def title(node):
"""
A title node. It has no children
"""
return nodes.title(node.first_child.literal, node.first_child.literal)
def section(node):
"""
A section in reStructuredText, which needs a title (the first child)
This is a custom type
"""
title = '' # All sections need an id
if node.first_child is not None:
if node.first_child.t == u'heading':
title = node.first_child.first_child.literal
o = nodes.section(ids=[title], names=[title])
for n in MarkDown(node):
o += n
return o
def block_quote(node):
"""
A block quote
"""
o = nodes.block_quote()
o.line = node.sourcepos[0][0]
for n in MarkDown(node):
o += n
return o
def image(node):
"""
An image element
The first child is the alt text. reStructuredText can't handle titles
"""
o = nodes.image(uri=node.destination)
if node.first_child is not None:
o['alt'] = node.first_child.literal
return o
def listItem(node):
"""
An item in a list
"""
o = nodes.list_item()
for n in MarkDown(node):
o += n
return o
def listNode(node):
"""
A list (numbered or not)
For numbered lists, the suffix is only rendered as . in html
"""
if node.list_data['type'] == u'bullet':
o = nodes.bullet_list(bullet=node.list_data['bullet_char'])
else:
o = nodes.enumerated_list(suffix=node.list_data['delimiter'], enumtype='arabic', start=node.list_data['start'])
for n in MarkDown(node):
o += n
return o
def MarkDown(node):
"""
Returns a list of nodes, containing CommonMark nodes converted to docutils nodes
"""
cur = node.first_child
# Go into each child, in turn
output = []
while cur is not None:
t = cur.t
if t == 'paragraph':
output.append(paragraph(cur))
elif t == 'text':
output.append(text(cur))
elif t == 'softbreak':
output.append(softbreak(cur))
elif t == 'linebreak':
output.append(hardbreak(cur))
elif t == 'link':
output.append(reference(cur))
elif t == 'heading':
output.append(title(cur))
elif t == 'emph':
output.append(emphasis(cur))
elif t == 'strong':
output.append(strong(cur))
elif t == 'code':
output.append(literal(cur))
elif t == 'code_block':
output.append(literal_block(cur))
elif t == 'html_inline' or t == 'html_block':
output.append(raw(cur))
elif t == 'block_quote':
output.append(block_quote(cur))
elif t == 'thematic_break':
output.append(transition(cur))
elif t == 'image':
output.append(image(cur))
elif t == 'list':
output.append(listNode(cur))
elif t == 'item':
output.append(listItem(cur))
elif t == 'MDsection':
output.append(section(cur))
else:
print('Received unhandled type: {}. Full print of node:'.format(t))
cur.pretty()
cur = cur.nxt
return output
def finalizeSection(section):
"""
Correct the nxt and parent for each child
"""
cur = section.first_child
last = section.last_child
if last is not None:
last.nxt = None
while cur is not None:
cur.parent = section
cur = cur.nxt
def parseMarkDownBlock(text):
"""
Parses a block of text, returning a list of docutils nodes
>>> parseMarkdownBlock("Some\n====\n\nblock of text\n\nHeader\n======\n\nblah\n")
[]
"""
block = Parser().parse(text)
# CommonMark can't nest sections, so do it manually
nestSections(block)
return MarkDown(block)
|
ribozz/sphinx-argparse | sphinxarg/markdown.py | parseMarkDownBlock | python | def parseMarkDownBlock(text):
block = Parser().parse(text)
# CommonMark can't nest sections, so do it manually
nestSections(block)
return MarkDown(block) | Parses a block of text, returning a list of docutils nodes
>>> parseMarkdownBlock("Some\n====\n\nblock of text\n\nHeader\n======\n\nblah\n")
[] | train | https://github.com/ribozz/sphinx-argparse/blob/178672cd5c846440ff7ecd695e3708feea13e4b4/sphinxarg/markdown.py#L393-L404 | [
"def MarkDown(node):\n \"\"\"\n Returns a list of nodes, containing CommonMark nodes converted to docutils nodes\n \"\"\"\n cur = node.first_child\n\n # Go into each child, in turn\n output = []\n while cur is not None:\n t = cur.t\n if t == 'paragraph':\n output.append(paragraph(cur))\n elif t == 'text':\n output.append(text(cur))\n elif t == 'softbreak':\n output.append(softbreak(cur))\n elif t == 'linebreak':\n output.append(hardbreak(cur))\n elif t == 'link':\n output.append(reference(cur))\n elif t == 'heading':\n output.append(title(cur))\n elif t == 'emph':\n output.append(emphasis(cur))\n elif t == 'strong':\n output.append(strong(cur))\n elif t == 'code':\n output.append(literal(cur))\n elif t == 'code_block':\n output.append(literal_block(cur))\n elif t == 'html_inline' or t == 'html_block':\n output.append(raw(cur))\n elif t == 'block_quote':\n output.append(block_quote(cur))\n elif t == 'thematic_break':\n output.append(transition(cur))\n elif t == 'image':\n output.append(image(cur))\n elif t == 'list':\n output.append(listNode(cur))\n elif t == 'item':\n output.append(listItem(cur))\n elif t == 'MDsection':\n output.append(section(cur))\n else:\n print('Received unhandled type: {}. Full print of node:'.format(t))\n cur.pretty()\n\n cur = cur.nxt\n\n return output\n",
"def nestSections(block, level=1):\n \"\"\"\n Sections aren't handled by CommonMark at the moment.\n This function adds sections to a block of nodes.\n 'title' nodes with an assigned level below 'level' will be put in a child section.\n If there are no child nodes with titles of level 'level' then nothing is done\n \"\"\"\n cur = block.first_child\n if cur is not None:\n children = []\n # Do we need to do anything?\n nest = False\n while cur is not None:\n if cur.t == 'heading' and cur.level == level:\n nest = True\n break\n cur = cur.nxt\n if not nest:\n return\n\n section = Node('MDsection', 0)\n section.parent = block\n cur = block.first_child\n while cur is not None:\n if cur.t == 'heading' and cur.level == level:\n # Found a split point, flush the last section if needed\n if section.first_child is not None:\n finalizeSection(section)\n children.append(section)\n section = Node('MDsection', 0)\n nxt = cur.nxt\n # Avoid adding sections without titles at the start\n if section.first_child is None:\n if cur.t == 'heading' and cur.level == level:\n section.append_child(cur)\n else:\n children.append(cur)\n else:\n section.append_child(cur)\n cur = nxt\n\n # If there's only 1 child then don't bother\n if section.first_child is not None:\n finalizeSection(section)\n children.append(section)\n\n block.first_child = None\n block.last_child = None\n nextLevel = level + 1\n for child in children:\n # Handle nesting\n if child.t == 'MDsection':\n nestSections(child, level=nextLevel)\n\n # Append\n if block.first_child is None:\n block.first_child = child\n else:\n block.last_child.nxt = child\n child.parent = block\n child.nxt = None\n child.prev = block.last_child\n block.last_child = child\n"
] | try:
from commonmark import Parser
except ImportError:
from CommonMark import Parser # >= 0.5.6
try:
from commonmark.node import Node
except ImportError:
from CommonMark.node import Node
from docutils import nodes
from docutils.utils.code_analyzer import Lexer
def customWalker(node, space=''):
"""
A convenience function to ease debugging. It will print the node structure that's returned from CommonMark
The usage would be something like:
>>> content = Parser().parse('Some big text block\n===================\n\nwith content\n')
>>> customWalker(content)
document
heading
text Some big text block
paragraph
text with content
Spaces are used to convey nesting
"""
txt = ''
try:
txt = node.literal
except:
pass
if txt is None or txt == '':
print('{}{}'.format(space, node.t))
else:
print('{}{}\t{}'.format(space, node.t, txt))
cur = node.first_child
if cur:
while cur is not None:
customWalker(cur, space + ' ')
cur = cur.nxt
def paragraph(node):
"""
Process a paragraph, which includes all content under it
"""
text = ''
if node.string_content is not None:
text = node.string_content
o = nodes.paragraph('', ' '.join(text))
o.line = node.sourcepos[0][0]
for n in MarkDown(node):
o.append(n)
return o
def text(node):
"""
Text in a paragraph
"""
return nodes.Text(node.literal)
def hardbreak(node):
"""
A <br /> in html or "\n" in ascii
"""
return nodes.Text('\n')
def softbreak(node):
"""
A line ending or space.
"""
return nodes.Text('\n')
def reference(node):
"""
A hyperlink. Note that alt text doesn't work, since there's no apparent way to do that in docutils
"""
o = nodes.reference()
o['refuri'] = node.destination
if node.title:
o['name'] = node.title
for n in MarkDown(node):
o += n
return o
def emphasis(node):
"""
An italicized section
"""
o = nodes.emphasis()
for n in MarkDown(node):
o += n
return o
def strong(node):
"""
A bolded section
"""
o = nodes.strong()
for n in MarkDown(node):
o += n
return o
def literal(node):
"""
Inline code
"""
rendered = []
try:
if node.info is not None:
l = Lexer(node.literal, node.info, tokennames="long")
for _ in l:
rendered.append(node.inline(classes=_[0], text=_[1]))
except:
pass
classes = ['code']
if node.info is not None:
classes.append(node.info)
if len(rendered) > 0:
o = nodes.literal(classes=classes)
for element in rendered:
o += element
else:
o = nodes.literal(text=node.literal, classes=classes)
for n in MarkDown(node):
o += n
return o
def literal_block(node):
"""
A block of code
"""
rendered = []
try:
if node.info is not None:
l = Lexer(node.literal, node.info, tokennames="long")
for _ in l:
rendered.append(node.inline(classes=_[0], text=_[1]))
except:
pass
classes = ['code']
if node.info is not None:
classes.append(node.info)
if len(rendered) > 0:
o = nodes.literal_block(classes=classes)
for element in rendered:
o += element
else:
o = nodes.literal_block(text=node.literal, classes=classes)
o.line = node.sourcepos[0][0]
for n in MarkDown(node):
o += n
return o
def raw(node):
"""
Add some raw html (possibly as a block)
"""
o = nodes.raw(node.literal, node.literal, format='html')
if node.sourcepos is not None:
o.line = node.sourcepos[0][0]
for n in MarkDown(node):
o += n
return o
def transition(node):
"""
An <hr> tag in html. This has no children
"""
return nodes.transition()
def title(node):
"""
A title node. It has no children
"""
return nodes.title(node.first_child.literal, node.first_child.literal)
def section(node):
"""
A section in reStructuredText, which needs a title (the first child)
This is a custom type
"""
title = '' # All sections need an id
if node.first_child is not None:
if node.first_child.t == u'heading':
title = node.first_child.first_child.literal
o = nodes.section(ids=[title], names=[title])
for n in MarkDown(node):
o += n
return o
def block_quote(node):
"""
A block quote
"""
o = nodes.block_quote()
o.line = node.sourcepos[0][0]
for n in MarkDown(node):
o += n
return o
def image(node):
"""
An image element
The first child is the alt text. reStructuredText can't handle titles
"""
o = nodes.image(uri=node.destination)
if node.first_child is not None:
o['alt'] = node.first_child.literal
return o
def listItem(node):
"""
An item in a list
"""
o = nodes.list_item()
for n in MarkDown(node):
o += n
return o
def listNode(node):
"""
A list (numbered or not)
For numbered lists, the suffix is only rendered as . in html
"""
if node.list_data['type'] == u'bullet':
o = nodes.bullet_list(bullet=node.list_data['bullet_char'])
else:
o = nodes.enumerated_list(suffix=node.list_data['delimiter'], enumtype='arabic', start=node.list_data['start'])
for n in MarkDown(node):
o += n
return o
def MarkDown(node):
"""
Returns a list of nodes, containing CommonMark nodes converted to docutils nodes
"""
cur = node.first_child
# Go into each child, in turn
output = []
while cur is not None:
t = cur.t
if t == 'paragraph':
output.append(paragraph(cur))
elif t == 'text':
output.append(text(cur))
elif t == 'softbreak':
output.append(softbreak(cur))
elif t == 'linebreak':
output.append(hardbreak(cur))
elif t == 'link':
output.append(reference(cur))
elif t == 'heading':
output.append(title(cur))
elif t == 'emph':
output.append(emphasis(cur))
elif t == 'strong':
output.append(strong(cur))
elif t == 'code':
output.append(literal(cur))
elif t == 'code_block':
output.append(literal_block(cur))
elif t == 'html_inline' or t == 'html_block':
output.append(raw(cur))
elif t == 'block_quote':
output.append(block_quote(cur))
elif t == 'thematic_break':
output.append(transition(cur))
elif t == 'image':
output.append(image(cur))
elif t == 'list':
output.append(listNode(cur))
elif t == 'item':
output.append(listItem(cur))
elif t == 'MDsection':
output.append(section(cur))
else:
print('Received unhandled type: {}. Full print of node:'.format(t))
cur.pretty()
cur = cur.nxt
return output
def finalizeSection(section):
"""
Correct the nxt and parent for each child
"""
cur = section.first_child
last = section.last_child
if last is not None:
last.nxt = None
while cur is not None:
cur.parent = section
cur = cur.nxt
def nestSections(block, level=1):
"""
Sections aren't handled by CommonMark at the moment.
This function adds sections to a block of nodes.
'title' nodes with an assigned level below 'level' will be put in a child section.
If there are no child nodes with titles of level 'level' then nothing is done
"""
cur = block.first_child
if cur is not None:
children = []
# Do we need to do anything?
nest = False
while cur is not None:
if cur.t == 'heading' and cur.level == level:
nest = True
break
cur = cur.nxt
if not nest:
return
section = Node('MDsection', 0)
section.parent = block
cur = block.first_child
while cur is not None:
if cur.t == 'heading' and cur.level == level:
# Found a split point, flush the last section if needed
if section.first_child is not None:
finalizeSection(section)
children.append(section)
section = Node('MDsection', 0)
nxt = cur.nxt
# Avoid adding sections without titles at the start
if section.first_child is None:
if cur.t == 'heading' and cur.level == level:
section.append_child(cur)
else:
children.append(cur)
else:
section.append_child(cur)
cur = nxt
# If there's only 1 child then don't bother
if section.first_child is not None:
finalizeSection(section)
children.append(section)
block.first_child = None
block.last_child = None
nextLevel = level + 1
for child in children:
# Handle nesting
if child.t == 'MDsection':
nestSections(child, level=nextLevel)
# Append
if block.first_child is None:
block.first_child = child
else:
block.last_child.nxt = child
child.parent = block
child.nxt = None
child.prev = block.last_child
block.last_child = child
|
ribozz/sphinx-argparse | sphinxarg/ext.py | renderList | python | def renderList(l, markDownHelp, settings=None):
if len(l) == 0:
return []
if markDownHelp:
from sphinxarg.markdown import parseMarkDownBlock
return parseMarkDownBlock('\n\n'.join(l) + '\n')
else:
all_children = []
for element in l:
if isinstance(element, str):
if settings is None:
settings = OptionParser(components=(Parser,)).get_default_values()
document = new_document(None, settings)
Parser().parse(element + '\n', document)
all_children += document.children
elif isinstance(element, nodes.definition):
all_children += element
return all_children | Given a list of reStructuredText or MarkDown sections, return a docutils node list | train | https://github.com/ribozz/sphinx-argparse/blob/178672cd5c846440ff7ecd695e3708feea13e4b4/sphinxarg/ext.py#L54-L75 | [
"def parseMarkDownBlock(text):\n \"\"\"\n Parses a block of text, returning a list of docutils nodes\n\n >>> parseMarkdownBlock(\"Some\\n====\\n\\nblock of text\\n\\nHeader\\n======\\n\\nblah\\n\")\n []\n \"\"\"\n block = Parser().parse(text)\n # CommonMark can't nest sections, so do it manually\n nestSections(block)\n\n return MarkDown(block)\n"
] | import sys
from argparse import ArgumentParser
import os
from docutils import nodes
from docutils.statemachine import StringList
from docutils.parsers.rst.directives import flag, unchanged
from docutils.parsers.rst import Parser, Directive
from docutils.utils import new_document
from docutils.frontend import OptionParser
from sphinx.util.nodes import nested_parse_with_titles
from sphinxarg.parser import parse_parser, parser_navigate
def map_nested_definitions(nested_content):
if nested_content is None:
raise Exception('Nested content should be iterable, not null')
# build definition dictionary
definitions = {}
for item in nested_content:
if not isinstance(item, nodes.definition_list):
continue
for subitem in item:
if not isinstance(subitem, nodes.definition_list_item):
continue
if not len(subitem.children) > 0:
continue
classifier = '@after'
idx = subitem.first_child_matching_class(nodes.classifier)
if idx is not None:
ci = subitem[idx]
if len(ci.children) > 0:
classifier = ci.children[0].astext()
if classifier is not None and classifier not in (
'@replace', '@before', '@after', '@skip'):
raise Exception('Unknown classifier: %s' % classifier)
idx = subitem.first_child_matching_class(nodes.term)
if idx is not None:
term = subitem[idx]
if len(term.children) > 0:
term = term.children[0].astext()
idx = subitem.first_child_matching_class(nodes.definition)
if idx is not None:
subContent = []
for _ in subitem[idx]:
if isinstance(_, nodes.definition_list):
subContent.append(_)
definitions[term] = (classifier, subitem[idx], subContent)
return definitions
def print_action_groups(data, nested_content, markDownHelp=False, settings=None):
"""
Process all 'action groups', which are also include 'Options' and 'Required
arguments'. A list of nodes is returned.
"""
definitions = map_nested_definitions(nested_content)
nodes_list = []
if 'action_groups' in data:
for action_group in data['action_groups']:
# Every action group is comprised of a section, holding a title, the description, and the option group (members)
section = nodes.section(ids=[action_group['title']])
section += nodes.title(action_group['title'], action_group['title'])
desc = []
if action_group['description']:
desc.append(action_group['description'])
# Replace/append/prepend content to the description according to nested content
subContent = []
if action_group['title'] in definitions:
classifier, s, subContent = definitions[action_group['title']]
if classifier == '@replace':
desc = [s]
elif classifier == '@after':
desc.append(s)
elif classifier == '@before':
desc.insert(0, s)
elif classifier == '@skip':
continue
if len(subContent) > 0:
for k, v in map_nested_definitions(subContent).items():
definitions[k] = v
# Render appropriately
for element in renderList(desc, markDownHelp):
section += element
localDefinitions = definitions
if len(subContent) > 0:
localDefinitions = {k: v for k, v in definitions.items()}
for k, v in map_nested_definitions(subContent).items():
localDefinitions[k] = v
items = []
# Iterate over action group members
for entry in action_group['options']:
"""
Members will include:
default The default value. This may be ==SUPPRESS==
name A list of option names (e.g., ['-h', '--help']
help The help message string
There may also be a 'choices' member.
"""
# Build the help text
arg = []
if 'choices' in entry:
arg.append('Possible choices: {}\n'.format(", ".join([str(c) for c in entry['choices']])))
if 'help' in entry:
arg.append(entry['help'])
if entry['default'] is not None and entry['default'] not in ['"==SUPPRESS=="', '==SUPPRESS==']:
if entry['default'] == '':
arg.append('Default: ""')
else:
arg.append('Default: {}'.format(entry['default']))
# Handle nested content, the term used in the dict has the comma removed for simplicity
desc = arg
term = ' '.join(entry['name'])
if term in localDefinitions:
classifier, s, subContent = localDefinitions[term]
if classifier == '@replace':
desc = [s]
elif classifier == '@after':
desc.append(s)
elif classifier == '@before':
desc.insert(0, s)
term = ', '.join(entry['name'])
n = nodes.option_list_item('',
nodes.option_group('', nodes.option_string(text=term)),
nodes.description('', *renderList(desc, markDownHelp, settings)))
items.append(n)
section += nodes.option_list('', *items)
nodes_list.append(section)
return nodes_list
def print_subcommands(data, nested_content, markDownHelp=False, settings=None):
"""
Each subcommand is a dictionary with the following keys:
['usage', 'action_groups', 'bare_usage', 'name', 'help']
In essence, this is all tossed in a new section with the title 'name'.
Apparently there can also be a 'description' entry.
"""
definitions = map_nested_definitions(nested_content)
items = []
if 'children' in data:
subCommands = nodes.section(ids=["Sub-commands:"])
subCommands += nodes.title('Sub-commands:', 'Sub-commands:')
for child in data['children']:
sec = nodes.section(ids=[child['name']])
sec += nodes.title(child['name'], child['name'])
if 'description' in child and child['description']:
desc = [child['description']]
elif child['help']:
desc = [child['help']]
else:
desc = ['Undocumented']
# Handle nested content
subContent = []
if child['name'] in definitions:
classifier, s, subContent = definitions[child['name']]
if classifier == '@replace':
desc = [s]
elif classifier == '@after':
desc.append(s)
elif classifier == '@before':
desc.insert(0, s)
for element in renderList(desc, markDownHelp):
sec += element
sec += nodes.literal_block(text=child['bare_usage'])
for x in print_action_groups(child, nested_content + subContent, markDownHelp,
settings=settings):
sec += x
for x in print_subcommands(child, nested_content + subContent, markDownHelp,
settings=settings):
sec += x
if 'epilog' in child and child['epilog']:
for element in renderList([child['epilog']], markDownHelp):
sec += element
subCommands += sec
items.append(subCommands)
return items
def ensureUniqueIDs(items):
"""
If action groups are repeated, then links in the table of contents will
just go to the first of the repeats. This may not be desirable, particularly
in the case of subcommands where the option groups have different members.
This function updates the title IDs by adding _repeatX, where X is a number
so that the links are then unique.
"""
s = set()
for item in items:
for n in item.traverse(descend=True, siblings=True, ascend=False):
if isinstance(n, nodes.section):
ids = n['ids']
for idx, id in enumerate(ids):
if id not in s:
s.add(id)
else:
i = 1
while "{}_repeat{}".format(id, i) in s:
i += 1
ids[idx] = "{}_repeat{}".format(id, i)
s.add(ids[idx])
n['ids'] = ids
class ArgParseDirective(Directive):
has_content = True
option_spec = dict(module=unchanged, func=unchanged, ref=unchanged,
prog=unchanged, path=unchanged, nodefault=flag,
nodefaultconst=flag, filename=unchanged,
manpage=unchanged, nosubcommands=unchanged, passparser=flag,
noepilog=unchanged, nodescription=unchanged,
markdown=flag, markdownhelp=flag)
def _construct_manpage_specific_structure(self, parser_info):
"""
Construct a typical man page consisting of the following elements:
NAME (automatically generated, out of our control)
SYNOPSIS
DESCRIPTION
OPTIONS
FILES
SEE ALSO
BUGS
"""
items = []
# SYNOPSIS section
synopsis_section = nodes.section(
'',
nodes.title(text='Synopsis'),
nodes.literal_block(text=parser_info["bare_usage"]),
ids=['synopsis-section'])
items.append(synopsis_section)
# DESCRIPTION section
if 'nodescription' not in self.options:
description_section = nodes.section(
'',
nodes.title(text='Description'),
nodes.paragraph(text=parser_info.get(
'description', parser_info.get(
'help', "undocumented").capitalize())),
ids=['description-section'])
nested_parse_with_titles(
self.state, self.content, description_section)
items.append(description_section)
if parser_info.get('epilog') and 'noepilog' not in self.options:
# TODO: do whatever sphinx does to understand ReST inside
# docstrings magically imported from other places. The nested
# parse method invoked above seem to be able to do this but
# I haven't found a way to do it for arbitrary text
if description_section:
description_section += nodes.paragraph(
text=parser_info['epilog'])
else:
description_section = nodes.paragraph(
text=parser_info['epilog'])
items.append(description_section)
# OPTIONS section
options_section = nodes.section(
'',
nodes.title(text='Options'),
ids=['options-section'])
if 'args' in parser_info:
options_section += nodes.paragraph()
options_section += nodes.subtitle(text='Positional arguments:')
options_section += self._format_positional_arguments(parser_info)
for action_group in parser_info['action_groups']:
if 'options' in parser_info:
options_section += nodes.paragraph()
options_section += nodes.subtitle(text=action_group['title'])
options_section += self._format_optional_arguments(action_group)
# NOTE: we cannot generate NAME ourselves. It is generated by
# docutils.writers.manpage
# TODO: items.append(files)
# TODO: items.append(see also)
# TODO: items.append(bugs)
if len(options_section.children) > 1:
items.append(options_section)
if 'nosubcommands' not in self.options:
# SUBCOMMANDS section (non-standard)
subcommands_section = nodes.section(
'',
nodes.title(text='Sub-Commands'),
ids=['subcommands-section'])
if 'children' in parser_info:
subcommands_section += self._format_subcommands(parser_info)
if len(subcommands_section) > 1:
items.append(subcommands_section)
if os.getenv("INCLUDE_DEBUG_SECTION"):
import json
# DEBUG section (non-standard)
debug_section = nodes.section(
'',
nodes.title(text="Argparse + Sphinx Debugging"),
nodes.literal_block(text=json.dumps(parser_info, indent=' ')),
ids=['debug-section'])
items.append(debug_section)
return items
def _format_positional_arguments(self, parser_info):
assert 'args' in parser_info
items = []
for arg in parser_info['args']:
arg_items = []
if arg['help']:
arg_items.append(nodes.paragraph(text=arg['help']))
elif 'choices' not in arg:
arg_items.append(nodes.paragraph(text='Undocumented'))
if 'choices' in arg:
arg_items.append(
nodes.paragraph(
text='Possible choices: ' + ', '.join(arg['choices'])))
items.append(
nodes.option_list_item(
'',
nodes.option_group(
'', nodes.option(
'', nodes.option_string(text=arg['metavar'])
)
),
nodes.description('', *arg_items)))
return nodes.option_list('', *items)
def _format_optional_arguments(self, parser_info):
assert 'options' in parser_info
items = []
for opt in parser_info['options']:
names = []
opt_items = []
for name in opt['name']:
option_declaration = [nodes.option_string(text=name)]
if opt['default'] is not None \
and opt['default'] not in ['"==SUPPRESS=="', '==SUPPRESS==']:
option_declaration += nodes.option_argument(
'', text='=' + str(opt['default']))
names.append(nodes.option('', *option_declaration))
if opt['help']:
opt_items.append(nodes.paragraph(text=opt['help']))
elif 'choices' not in opt:
opt_items.append(nodes.paragraph(text='Undocumented'))
if 'choices' in opt:
opt_items.append(
nodes.paragraph(
text='Possible choices: ' + ', '.join(opt['choices'])))
items.append(
nodes.option_list_item(
'', nodes.option_group('', *names),
nodes.description('', *opt_items)))
return nodes.option_list('', *items)
def _format_subcommands(self, parser_info):
assert 'children' in parser_info
items = []
for subcmd in parser_info['children']:
subcmd_items = []
if subcmd['help']:
subcmd_items.append(nodes.paragraph(text=subcmd['help']))
else:
subcmd_items.append(nodes.paragraph(text='Undocumented'))
items.append(
nodes.definition_list_item(
'',
nodes.term('', '', nodes.strong(
text=subcmd['bare_usage'])),
nodes.definition('', *subcmd_items)))
return nodes.definition_list('', *items)
def _nested_parse_paragraph(self, text):
content = nodes.paragraph()
self.state.nested_parse(StringList(text.split("\n")), 0, content)
return content
def run(self):
if 'module' in self.options and 'func' in self.options:
module_name = self.options['module']
attr_name = self.options['func']
elif 'ref' in self.options:
_parts = self.options['ref'].split('.')
module_name = '.'.join(_parts[0:-1])
attr_name = _parts[-1]
elif 'filename' in self.options and 'func' in self.options:
mod = {}
try:
f = open(self.options['filename'])
except IOError:
# try open with abspath
f = open(os.path.abspath(self.options['filename']))
code = compile(f.read(), self.options['filename'], 'exec')
exec(code, mod)
attr_name = self.options['func']
func = mod[attr_name]
else:
raise self.error(
':module: and :func: should be specified, or :ref:, or :filename: and :func:')
# Skip this if we're dealing with a local file, since it obviously can't be imported
if 'filename' not in self.options:
try:
mod = __import__(module_name, globals(), locals(), [attr_name])
except:
raise self.error('Failed to import "%s" from "%s".\n%s' % (attr_name, module_name, sys.exc_info()[1]))
if not hasattr(mod, attr_name):
raise self.error((
'Module "%s" has no attribute "%s"\n'
'Incorrect argparse :module: or :func: values?'
) % (module_name, attr_name))
func = getattr(mod, attr_name)
if isinstance(func, ArgumentParser):
parser = func
elif 'passparser' in self.options:
parser = ArgumentParser()
func(parser)
else:
parser = func()
if 'path' not in self.options:
self.options['path'] = ''
path = str(self.options['path'])
if 'prog' in self.options:
parser.prog = self.options['prog']
result = parse_parser(
parser, skip_default_values='nodefault' in self.options, skip_default_const_values='nodefaultconst' in self.options)
result = parser_navigate(result, path)
if 'manpage' in self.options:
return self._construct_manpage_specific_structure(result)
# Handle nested content, where markdown needs to be preprocessed
items = []
nested_content = nodes.paragraph()
if 'markdown' in self.options:
from sphinxarg.markdown import parseMarkDownBlock
items.extend(parseMarkDownBlock('\n'.join(self.content) + '\n'))
else:
self.state.nested_parse(
self.content, self.content_offset, nested_content)
nested_content = nested_content.children
# add common content between
for item in nested_content:
if not isinstance(item, nodes.definition_list):
items.append(item)
markDownHelp = False
if 'markdownhelp' in self.options:
markDownHelp = True
if 'description' in result and 'nodescription' not in self.options:
if markDownHelp:
items.extend(renderList([result['description']], True))
else:
items.append(self._nested_parse_paragraph(result['description']))
items.append(nodes.literal_block(text=result['usage']))
items.extend(print_action_groups(result, nested_content, markDownHelp,
settings=self.state.document.settings))
if 'nosubcommands' not in self.options:
items.extend(print_subcommands(result, nested_content, markDownHelp,
settings=self.state.document.settings))
if 'epilog' in result and 'noepilog' not in self.options:
items.append(self._nested_parse_paragraph(result['epilog']))
# Traverse the returned nodes, modifying the title IDs as necessary to avoid repeats
ensureUniqueIDs(items)
return items
def setup(app):
app.add_directive('argparse', ArgParseDirective)
|
ribozz/sphinx-argparse | sphinxarg/ext.py | print_action_groups | python | def print_action_groups(data, nested_content, markDownHelp=False, settings=None):
definitions = map_nested_definitions(nested_content)
nodes_list = []
if 'action_groups' in data:
for action_group in data['action_groups']:
# Every action group is comprised of a section, holding a title, the description, and the option group (members)
section = nodes.section(ids=[action_group['title']])
section += nodes.title(action_group['title'], action_group['title'])
desc = []
if action_group['description']:
desc.append(action_group['description'])
# Replace/append/prepend content to the description according to nested content
subContent = []
if action_group['title'] in definitions:
classifier, s, subContent = definitions[action_group['title']]
if classifier == '@replace':
desc = [s]
elif classifier == '@after':
desc.append(s)
elif classifier == '@before':
desc.insert(0, s)
elif classifier == '@skip':
continue
if len(subContent) > 0:
for k, v in map_nested_definitions(subContent).items():
definitions[k] = v
# Render appropriately
for element in renderList(desc, markDownHelp):
section += element
localDefinitions = definitions
if len(subContent) > 0:
localDefinitions = {k: v for k, v in definitions.items()}
for k, v in map_nested_definitions(subContent).items():
localDefinitions[k] = v
items = []
# Iterate over action group members
for entry in action_group['options']:
"""
Members will include:
default The default value. This may be ==SUPPRESS==
name A list of option names (e.g., ['-h', '--help']
help The help message string
There may also be a 'choices' member.
"""
# Build the help text
arg = []
if 'choices' in entry:
arg.append('Possible choices: {}\n'.format(", ".join([str(c) for c in entry['choices']])))
if 'help' in entry:
arg.append(entry['help'])
if entry['default'] is not None and entry['default'] not in ['"==SUPPRESS=="', '==SUPPRESS==']:
if entry['default'] == '':
arg.append('Default: ""')
else:
arg.append('Default: {}'.format(entry['default']))
# Handle nested content, the term used in the dict has the comma removed for simplicity
desc = arg
term = ' '.join(entry['name'])
if term in localDefinitions:
classifier, s, subContent = localDefinitions[term]
if classifier == '@replace':
desc = [s]
elif classifier == '@after':
desc.append(s)
elif classifier == '@before':
desc.insert(0, s)
term = ', '.join(entry['name'])
n = nodes.option_list_item('',
nodes.option_group('', nodes.option_string(text=term)),
nodes.description('', *renderList(desc, markDownHelp, settings)))
items.append(n)
section += nodes.option_list('', *items)
nodes_list.append(section)
return nodes_list | Process all 'action groups', which are also include 'Options' and 'Required
arguments'. A list of nodes is returned. | train | https://github.com/ribozz/sphinx-argparse/blob/178672cd5c846440ff7ecd695e3708feea13e4b4/sphinxarg/ext.py#L78-L162 | [
"def map_nested_definitions(nested_content):\n if nested_content is None:\n raise Exception('Nested content should be iterable, not null')\n # build definition dictionary\n definitions = {}\n for item in nested_content:\n if not isinstance(item, nodes.definition_list):\n continue\n for subitem in item:\n if not isinstance(subitem, nodes.definition_list_item):\n continue\n if not len(subitem.children) > 0:\n continue\n classifier = '@after'\n idx = subitem.first_child_matching_class(nodes.classifier)\n if idx is not None:\n ci = subitem[idx]\n if len(ci.children) > 0:\n classifier = ci.children[0].astext()\n if classifier is not None and classifier not in (\n '@replace', '@before', '@after', '@skip'):\n raise Exception('Unknown classifier: %s' % classifier)\n idx = subitem.first_child_matching_class(nodes.term)\n if idx is not None:\n term = subitem[idx]\n if len(term.children) > 0:\n term = term.children[0].astext()\n idx = subitem.first_child_matching_class(nodes.definition)\n if idx is not None:\n subContent = []\n for _ in subitem[idx]:\n if isinstance(_, nodes.definition_list):\n subContent.append(_)\n definitions[term] = (classifier, subitem[idx], subContent)\n\n return definitions\n",
"def renderList(l, markDownHelp, settings=None):\n \"\"\"\n Given a list of reStructuredText or MarkDown sections, return a docutils node list\n \"\"\"\n if len(l) == 0:\n return []\n if markDownHelp:\n from sphinxarg.markdown import parseMarkDownBlock\n return parseMarkDownBlock('\\n\\n'.join(l) + '\\n')\n else:\n all_children = []\n for element in l:\n if isinstance(element, str):\n if settings is None:\n settings = OptionParser(components=(Parser,)).get_default_values()\n document = new_document(None, settings)\n Parser().parse(element + '\\n', document)\n all_children += document.children\n elif isinstance(element, nodes.definition):\n all_children += element\n\n return all_children\n"
] | import sys
from argparse import ArgumentParser
import os
from docutils import nodes
from docutils.statemachine import StringList
from docutils.parsers.rst.directives import flag, unchanged
from docutils.parsers.rst import Parser, Directive
from docutils.utils import new_document
from docutils.frontend import OptionParser
from sphinx.util.nodes import nested_parse_with_titles
from sphinxarg.parser import parse_parser, parser_navigate
def map_nested_definitions(nested_content):
if nested_content is None:
raise Exception('Nested content should be iterable, not null')
# build definition dictionary
definitions = {}
for item in nested_content:
if not isinstance(item, nodes.definition_list):
continue
for subitem in item:
if not isinstance(subitem, nodes.definition_list_item):
continue
if not len(subitem.children) > 0:
continue
classifier = '@after'
idx = subitem.first_child_matching_class(nodes.classifier)
if idx is not None:
ci = subitem[idx]
if len(ci.children) > 0:
classifier = ci.children[0].astext()
if classifier is not None and classifier not in (
'@replace', '@before', '@after', '@skip'):
raise Exception('Unknown classifier: %s' % classifier)
idx = subitem.first_child_matching_class(nodes.term)
if idx is not None:
term = subitem[idx]
if len(term.children) > 0:
term = term.children[0].astext()
idx = subitem.first_child_matching_class(nodes.definition)
if idx is not None:
subContent = []
for _ in subitem[idx]:
if isinstance(_, nodes.definition_list):
subContent.append(_)
definitions[term] = (classifier, subitem[idx], subContent)
return definitions
def renderList(l, markDownHelp, settings=None):
"""
Given a list of reStructuredText or MarkDown sections, return a docutils node list
"""
if len(l) == 0:
return []
if markDownHelp:
from sphinxarg.markdown import parseMarkDownBlock
return parseMarkDownBlock('\n\n'.join(l) + '\n')
else:
all_children = []
for element in l:
if isinstance(element, str):
if settings is None:
settings = OptionParser(components=(Parser,)).get_default_values()
document = new_document(None, settings)
Parser().parse(element + '\n', document)
all_children += document.children
elif isinstance(element, nodes.definition):
all_children += element
return all_children
def print_subcommands(data, nested_content, markDownHelp=False, settings=None):
"""
Each subcommand is a dictionary with the following keys:
['usage', 'action_groups', 'bare_usage', 'name', 'help']
In essence, this is all tossed in a new section with the title 'name'.
Apparently there can also be a 'description' entry.
"""
definitions = map_nested_definitions(nested_content)
items = []
if 'children' in data:
subCommands = nodes.section(ids=["Sub-commands:"])
subCommands += nodes.title('Sub-commands:', 'Sub-commands:')
for child in data['children']:
sec = nodes.section(ids=[child['name']])
sec += nodes.title(child['name'], child['name'])
if 'description' in child and child['description']:
desc = [child['description']]
elif child['help']:
desc = [child['help']]
else:
desc = ['Undocumented']
# Handle nested content
subContent = []
if child['name'] in definitions:
classifier, s, subContent = definitions[child['name']]
if classifier == '@replace':
desc = [s]
elif classifier == '@after':
desc.append(s)
elif classifier == '@before':
desc.insert(0, s)
for element in renderList(desc, markDownHelp):
sec += element
sec += nodes.literal_block(text=child['bare_usage'])
for x in print_action_groups(child, nested_content + subContent, markDownHelp,
settings=settings):
sec += x
for x in print_subcommands(child, nested_content + subContent, markDownHelp,
settings=settings):
sec += x
if 'epilog' in child and child['epilog']:
for element in renderList([child['epilog']], markDownHelp):
sec += element
subCommands += sec
items.append(subCommands)
return items
def ensureUniqueIDs(items):
"""
If action groups are repeated, then links in the table of contents will
just go to the first of the repeats. This may not be desirable, particularly
in the case of subcommands where the option groups have different members.
This function updates the title IDs by adding _repeatX, where X is a number
so that the links are then unique.
"""
s = set()
for item in items:
for n in item.traverse(descend=True, siblings=True, ascend=False):
if isinstance(n, nodes.section):
ids = n['ids']
for idx, id in enumerate(ids):
if id not in s:
s.add(id)
else:
i = 1
while "{}_repeat{}".format(id, i) in s:
i += 1
ids[idx] = "{}_repeat{}".format(id, i)
s.add(ids[idx])
n['ids'] = ids
class ArgParseDirective(Directive):
has_content = True
option_spec = dict(module=unchanged, func=unchanged, ref=unchanged,
prog=unchanged, path=unchanged, nodefault=flag,
nodefaultconst=flag, filename=unchanged,
manpage=unchanged, nosubcommands=unchanged, passparser=flag,
noepilog=unchanged, nodescription=unchanged,
markdown=flag, markdownhelp=flag)
def _construct_manpage_specific_structure(self, parser_info):
"""
Construct a typical man page consisting of the following elements:
NAME (automatically generated, out of our control)
SYNOPSIS
DESCRIPTION
OPTIONS
FILES
SEE ALSO
BUGS
"""
items = []
# SYNOPSIS section
synopsis_section = nodes.section(
'',
nodes.title(text='Synopsis'),
nodes.literal_block(text=parser_info["bare_usage"]),
ids=['synopsis-section'])
items.append(synopsis_section)
# DESCRIPTION section
if 'nodescription' not in self.options:
description_section = nodes.section(
'',
nodes.title(text='Description'),
nodes.paragraph(text=parser_info.get(
'description', parser_info.get(
'help', "undocumented").capitalize())),
ids=['description-section'])
nested_parse_with_titles(
self.state, self.content, description_section)
items.append(description_section)
if parser_info.get('epilog') and 'noepilog' not in self.options:
# TODO: do whatever sphinx does to understand ReST inside
# docstrings magically imported from other places. The nested
# parse method invoked above seem to be able to do this but
# I haven't found a way to do it for arbitrary text
if description_section:
description_section += nodes.paragraph(
text=parser_info['epilog'])
else:
description_section = nodes.paragraph(
text=parser_info['epilog'])
items.append(description_section)
# OPTIONS section
options_section = nodes.section(
'',
nodes.title(text='Options'),
ids=['options-section'])
if 'args' in parser_info:
options_section += nodes.paragraph()
options_section += nodes.subtitle(text='Positional arguments:')
options_section += self._format_positional_arguments(parser_info)
for action_group in parser_info['action_groups']:
if 'options' in parser_info:
options_section += nodes.paragraph()
options_section += nodes.subtitle(text=action_group['title'])
options_section += self._format_optional_arguments(action_group)
# NOTE: we cannot generate NAME ourselves. It is generated by
# docutils.writers.manpage
# TODO: items.append(files)
# TODO: items.append(see also)
# TODO: items.append(bugs)
if len(options_section.children) > 1:
items.append(options_section)
if 'nosubcommands' not in self.options:
# SUBCOMMANDS section (non-standard)
subcommands_section = nodes.section(
'',
nodes.title(text='Sub-Commands'),
ids=['subcommands-section'])
if 'children' in parser_info:
subcommands_section += self._format_subcommands(parser_info)
if len(subcommands_section) > 1:
items.append(subcommands_section)
if os.getenv("INCLUDE_DEBUG_SECTION"):
import json
# DEBUG section (non-standard)
debug_section = nodes.section(
'',
nodes.title(text="Argparse + Sphinx Debugging"),
nodes.literal_block(text=json.dumps(parser_info, indent=' ')),
ids=['debug-section'])
items.append(debug_section)
return items
def _format_positional_arguments(self, parser_info):
assert 'args' in parser_info
items = []
for arg in parser_info['args']:
arg_items = []
if arg['help']:
arg_items.append(nodes.paragraph(text=arg['help']))
elif 'choices' not in arg:
arg_items.append(nodes.paragraph(text='Undocumented'))
if 'choices' in arg:
arg_items.append(
nodes.paragraph(
text='Possible choices: ' + ', '.join(arg['choices'])))
items.append(
nodes.option_list_item(
'',
nodes.option_group(
'', nodes.option(
'', nodes.option_string(text=arg['metavar'])
)
),
nodes.description('', *arg_items)))
return nodes.option_list('', *items)
def _format_optional_arguments(self, parser_info):
assert 'options' in parser_info
items = []
for opt in parser_info['options']:
names = []
opt_items = []
for name in opt['name']:
option_declaration = [nodes.option_string(text=name)]
if opt['default'] is not None \
and opt['default'] not in ['"==SUPPRESS=="', '==SUPPRESS==']:
option_declaration += nodes.option_argument(
'', text='=' + str(opt['default']))
names.append(nodes.option('', *option_declaration))
if opt['help']:
opt_items.append(nodes.paragraph(text=opt['help']))
elif 'choices' not in opt:
opt_items.append(nodes.paragraph(text='Undocumented'))
if 'choices' in opt:
opt_items.append(
nodes.paragraph(
text='Possible choices: ' + ', '.join(opt['choices'])))
items.append(
nodes.option_list_item(
'', nodes.option_group('', *names),
nodes.description('', *opt_items)))
return nodes.option_list('', *items)
def _format_subcommands(self, parser_info):
assert 'children' in parser_info
items = []
for subcmd in parser_info['children']:
subcmd_items = []
if subcmd['help']:
subcmd_items.append(nodes.paragraph(text=subcmd['help']))
else:
subcmd_items.append(nodes.paragraph(text='Undocumented'))
items.append(
nodes.definition_list_item(
'',
nodes.term('', '', nodes.strong(
text=subcmd['bare_usage'])),
nodes.definition('', *subcmd_items)))
return nodes.definition_list('', *items)
def _nested_parse_paragraph(self, text):
content = nodes.paragraph()
self.state.nested_parse(StringList(text.split("\n")), 0, content)
return content
def run(self):
if 'module' in self.options and 'func' in self.options:
module_name = self.options['module']
attr_name = self.options['func']
elif 'ref' in self.options:
_parts = self.options['ref'].split('.')
module_name = '.'.join(_parts[0:-1])
attr_name = _parts[-1]
elif 'filename' in self.options and 'func' in self.options:
mod = {}
try:
f = open(self.options['filename'])
except IOError:
# try open with abspath
f = open(os.path.abspath(self.options['filename']))
code = compile(f.read(), self.options['filename'], 'exec')
exec(code, mod)
attr_name = self.options['func']
func = mod[attr_name]
else:
raise self.error(
':module: and :func: should be specified, or :ref:, or :filename: and :func:')
# Skip this if we're dealing with a local file, since it obviously can't be imported
if 'filename' not in self.options:
try:
mod = __import__(module_name, globals(), locals(), [attr_name])
except:
raise self.error('Failed to import "%s" from "%s".\n%s' % (attr_name, module_name, sys.exc_info()[1]))
if not hasattr(mod, attr_name):
raise self.error((
'Module "%s" has no attribute "%s"\n'
'Incorrect argparse :module: or :func: values?'
) % (module_name, attr_name))
func = getattr(mod, attr_name)
if isinstance(func, ArgumentParser):
parser = func
elif 'passparser' in self.options:
parser = ArgumentParser()
func(parser)
else:
parser = func()
if 'path' not in self.options:
self.options['path'] = ''
path = str(self.options['path'])
if 'prog' in self.options:
parser.prog = self.options['prog']
result = parse_parser(
parser, skip_default_values='nodefault' in self.options, skip_default_const_values='nodefaultconst' in self.options)
result = parser_navigate(result, path)
if 'manpage' in self.options:
return self._construct_manpage_specific_structure(result)
# Handle nested content, where markdown needs to be preprocessed
items = []
nested_content = nodes.paragraph()
if 'markdown' in self.options:
from sphinxarg.markdown import parseMarkDownBlock
items.extend(parseMarkDownBlock('\n'.join(self.content) + '\n'))
else:
self.state.nested_parse(
self.content, self.content_offset, nested_content)
nested_content = nested_content.children
# add common content between
for item in nested_content:
if not isinstance(item, nodes.definition_list):
items.append(item)
markDownHelp = False
if 'markdownhelp' in self.options:
markDownHelp = True
if 'description' in result and 'nodescription' not in self.options:
if markDownHelp:
items.extend(renderList([result['description']], True))
else:
items.append(self._nested_parse_paragraph(result['description']))
items.append(nodes.literal_block(text=result['usage']))
items.extend(print_action_groups(result, nested_content, markDownHelp,
settings=self.state.document.settings))
if 'nosubcommands' not in self.options:
items.extend(print_subcommands(result, nested_content, markDownHelp,
settings=self.state.document.settings))
if 'epilog' in result and 'noepilog' not in self.options:
items.append(self._nested_parse_paragraph(result['epilog']))
# Traverse the returned nodes, modifying the title IDs as necessary to avoid repeats
ensureUniqueIDs(items)
return items
def setup(app):
app.add_directive('argparse', ArgParseDirective)
|
ribozz/sphinx-argparse | sphinxarg/ext.py | print_subcommands | python | def print_subcommands(data, nested_content, markDownHelp=False, settings=None):
definitions = map_nested_definitions(nested_content)
items = []
if 'children' in data:
subCommands = nodes.section(ids=["Sub-commands:"])
subCommands += nodes.title('Sub-commands:', 'Sub-commands:')
for child in data['children']:
sec = nodes.section(ids=[child['name']])
sec += nodes.title(child['name'], child['name'])
if 'description' in child and child['description']:
desc = [child['description']]
elif child['help']:
desc = [child['help']]
else:
desc = ['Undocumented']
# Handle nested content
subContent = []
if child['name'] in definitions:
classifier, s, subContent = definitions[child['name']]
if classifier == '@replace':
desc = [s]
elif classifier == '@after':
desc.append(s)
elif classifier == '@before':
desc.insert(0, s)
for element in renderList(desc, markDownHelp):
sec += element
sec += nodes.literal_block(text=child['bare_usage'])
for x in print_action_groups(child, nested_content + subContent, markDownHelp,
settings=settings):
sec += x
for x in print_subcommands(child, nested_content + subContent, markDownHelp,
settings=settings):
sec += x
if 'epilog' in child and child['epilog']:
for element in renderList([child['epilog']], markDownHelp):
sec += element
subCommands += sec
items.append(subCommands)
return items | Each subcommand is a dictionary with the following keys:
['usage', 'action_groups', 'bare_usage', 'name', 'help']
In essence, this is all tossed in a new section with the title 'name'.
Apparently there can also be a 'description' entry. | train | https://github.com/ribozz/sphinx-argparse/blob/178672cd5c846440ff7ecd695e3708feea13e4b4/sphinxarg/ext.py#L165-L221 | [
"def map_nested_definitions(nested_content):\n if nested_content is None:\n raise Exception('Nested content should be iterable, not null')\n # build definition dictionary\n definitions = {}\n for item in nested_content:\n if not isinstance(item, nodes.definition_list):\n continue\n for subitem in item:\n if not isinstance(subitem, nodes.definition_list_item):\n continue\n if not len(subitem.children) > 0:\n continue\n classifier = '@after'\n idx = subitem.first_child_matching_class(nodes.classifier)\n if idx is not None:\n ci = subitem[idx]\n if len(ci.children) > 0:\n classifier = ci.children[0].astext()\n if classifier is not None and classifier not in (\n '@replace', '@before', '@after', '@skip'):\n raise Exception('Unknown classifier: %s' % classifier)\n idx = subitem.first_child_matching_class(nodes.term)\n if idx is not None:\n term = subitem[idx]\n if len(term.children) > 0:\n term = term.children[0].astext()\n idx = subitem.first_child_matching_class(nodes.definition)\n if idx is not None:\n subContent = []\n for _ in subitem[idx]:\n if isinstance(_, nodes.definition_list):\n subContent.append(_)\n definitions[term] = (classifier, subitem[idx], subContent)\n\n return definitions\n",
"def renderList(l, markDownHelp, settings=None):\n \"\"\"\n Given a list of reStructuredText or MarkDown sections, return a docutils node list\n \"\"\"\n if len(l) == 0:\n return []\n if markDownHelp:\n from sphinxarg.markdown import parseMarkDownBlock\n return parseMarkDownBlock('\\n\\n'.join(l) + '\\n')\n else:\n all_children = []\n for element in l:\n if isinstance(element, str):\n if settings is None:\n settings = OptionParser(components=(Parser,)).get_default_values()\n document = new_document(None, settings)\n Parser().parse(element + '\\n', document)\n all_children += document.children\n elif isinstance(element, nodes.definition):\n all_children += element\n\n return all_children\n",
"def print_action_groups(data, nested_content, markDownHelp=False, settings=None):\n \"\"\"\n Process all 'action groups', which are also include 'Options' and 'Required\n arguments'. A list of nodes is returned.\n \"\"\"\n definitions = map_nested_definitions(nested_content)\n nodes_list = []\n if 'action_groups' in data:\n for action_group in data['action_groups']:\n # Every action group is comprised of a section, holding a title, the description, and the option group (members)\n section = nodes.section(ids=[action_group['title']])\n section += nodes.title(action_group['title'], action_group['title'])\n\n desc = []\n if action_group['description']:\n desc.append(action_group['description'])\n # Replace/append/prepend content to the description according to nested content\n subContent = []\n if action_group['title'] in definitions:\n classifier, s, subContent = definitions[action_group['title']]\n if classifier == '@replace':\n desc = [s]\n elif classifier == '@after':\n desc.append(s)\n elif classifier == '@before':\n desc.insert(0, s)\n elif classifier == '@skip':\n continue\n if len(subContent) > 0:\n for k, v in map_nested_definitions(subContent).items():\n definitions[k] = v\n # Render appropriately\n for element in renderList(desc, markDownHelp):\n section += element\n\n localDefinitions = definitions\n if len(subContent) > 0:\n localDefinitions = {k: v for k, v in definitions.items()}\n for k, v in map_nested_definitions(subContent).items():\n localDefinitions[k] = v\n\n items = []\n # Iterate over action group members\n for entry in action_group['options']:\n \"\"\"\n Members will include:\n default\tThe default value. This may be ==SUPPRESS==\n name\tA list of option names (e.g., ['-h', '--help']\n help\tThe help message string\n There may also be a 'choices' member.\n \"\"\"\n # Build the help text\n arg = []\n if 'choices' in entry:\n arg.append('Possible choices: {}\\n'.format(\", \".join([str(c) for c in entry['choices']])))\n if 'help' in entry:\n arg.append(entry['help'])\n if entry['default'] is not None and entry['default'] not in ['\"==SUPPRESS==\"', '==SUPPRESS==']:\n if entry['default'] == '':\n arg.append('Default: \"\"')\n else:\n arg.append('Default: {}'.format(entry['default']))\n\n # Handle nested content, the term used in the dict has the comma removed for simplicity\n desc = arg\n term = ' '.join(entry['name'])\n if term in localDefinitions:\n classifier, s, subContent = localDefinitions[term]\n if classifier == '@replace':\n desc = [s]\n elif classifier == '@after':\n desc.append(s)\n elif classifier == '@before':\n desc.insert(0, s)\n term = ', '.join(entry['name'])\n\n n = nodes.option_list_item('',\n nodes.option_group('', nodes.option_string(text=term)),\n nodes.description('', *renderList(desc, markDownHelp, settings)))\n items.append(n)\n\n section += nodes.option_list('', *items)\n nodes_list.append(section)\n\n return nodes_list\n",
"def print_subcommands(data, nested_content, markDownHelp=False, settings=None):\n \"\"\"\n Each subcommand is a dictionary with the following keys:\n\n ['usage', 'action_groups', 'bare_usage', 'name', 'help']\n\n In essence, this is all tossed in a new section with the title 'name'.\n Apparently there can also be a 'description' entry.\n \"\"\"\n\n definitions = map_nested_definitions(nested_content)\n items = []\n if 'children' in data:\n subCommands = nodes.section(ids=[\"Sub-commands:\"])\n subCommands += nodes.title('Sub-commands:', 'Sub-commands:')\n\n for child in data['children']:\n sec = nodes.section(ids=[child['name']])\n sec += nodes.title(child['name'], child['name'])\n\n if 'description' in child and child['description']:\n desc = [child['description']]\n elif child['help']:\n desc = [child['help']]\n else:\n desc = ['Undocumented']\n\n # Handle nested content\n subContent = []\n if child['name'] in definitions:\n classifier, s, subContent = definitions[child['name']]\n if classifier == '@replace':\n desc = [s]\n elif classifier == '@after':\n desc.append(s)\n elif classifier == '@before':\n desc.insert(0, s)\n\n for element in renderList(desc, markDownHelp):\n sec += element\n sec += nodes.literal_block(text=child['bare_usage'])\n for x in print_action_groups(child, nested_content + subContent, markDownHelp,\n settings=settings):\n sec += x\n\n for x in print_subcommands(child, nested_content + subContent, markDownHelp,\n settings=settings):\n sec += x\n\n if 'epilog' in child and child['epilog']:\n for element in renderList([child['epilog']], markDownHelp):\n sec += element\n\n subCommands += sec\n items.append(subCommands)\n\n return items\n"
] | import sys
from argparse import ArgumentParser
import os
from docutils import nodes
from docutils.statemachine import StringList
from docutils.parsers.rst.directives import flag, unchanged
from docutils.parsers.rst import Parser, Directive
from docutils.utils import new_document
from docutils.frontend import OptionParser
from sphinx.util.nodes import nested_parse_with_titles
from sphinxarg.parser import parse_parser, parser_navigate
def map_nested_definitions(nested_content):
if nested_content is None:
raise Exception('Nested content should be iterable, not null')
# build definition dictionary
definitions = {}
for item in nested_content:
if not isinstance(item, nodes.definition_list):
continue
for subitem in item:
if not isinstance(subitem, nodes.definition_list_item):
continue
if not len(subitem.children) > 0:
continue
classifier = '@after'
idx = subitem.first_child_matching_class(nodes.classifier)
if idx is not None:
ci = subitem[idx]
if len(ci.children) > 0:
classifier = ci.children[0].astext()
if classifier is not None and classifier not in (
'@replace', '@before', '@after', '@skip'):
raise Exception('Unknown classifier: %s' % classifier)
idx = subitem.first_child_matching_class(nodes.term)
if idx is not None:
term = subitem[idx]
if len(term.children) > 0:
term = term.children[0].astext()
idx = subitem.first_child_matching_class(nodes.definition)
if idx is not None:
subContent = []
for _ in subitem[idx]:
if isinstance(_, nodes.definition_list):
subContent.append(_)
definitions[term] = (classifier, subitem[idx], subContent)
return definitions
def renderList(l, markDownHelp, settings=None):
"""
Given a list of reStructuredText or MarkDown sections, return a docutils node list
"""
if len(l) == 0:
return []
if markDownHelp:
from sphinxarg.markdown import parseMarkDownBlock
return parseMarkDownBlock('\n\n'.join(l) + '\n')
else:
all_children = []
for element in l:
if isinstance(element, str):
if settings is None:
settings = OptionParser(components=(Parser,)).get_default_values()
document = new_document(None, settings)
Parser().parse(element + '\n', document)
all_children += document.children
elif isinstance(element, nodes.definition):
all_children += element
return all_children
def print_action_groups(data, nested_content, markDownHelp=False, settings=None):
"""
Process all 'action groups', which are also include 'Options' and 'Required
arguments'. A list of nodes is returned.
"""
definitions = map_nested_definitions(nested_content)
nodes_list = []
if 'action_groups' in data:
for action_group in data['action_groups']:
# Every action group is comprised of a section, holding a title, the description, and the option group (members)
section = nodes.section(ids=[action_group['title']])
section += nodes.title(action_group['title'], action_group['title'])
desc = []
if action_group['description']:
desc.append(action_group['description'])
# Replace/append/prepend content to the description according to nested content
subContent = []
if action_group['title'] in definitions:
classifier, s, subContent = definitions[action_group['title']]
if classifier == '@replace':
desc = [s]
elif classifier == '@after':
desc.append(s)
elif classifier == '@before':
desc.insert(0, s)
elif classifier == '@skip':
continue
if len(subContent) > 0:
for k, v in map_nested_definitions(subContent).items():
definitions[k] = v
# Render appropriately
for element in renderList(desc, markDownHelp):
section += element
localDefinitions = definitions
if len(subContent) > 0:
localDefinitions = {k: v for k, v in definitions.items()}
for k, v in map_nested_definitions(subContent).items():
localDefinitions[k] = v
items = []
# Iterate over action group members
for entry in action_group['options']:
"""
Members will include:
default The default value. This may be ==SUPPRESS==
name A list of option names (e.g., ['-h', '--help']
help The help message string
There may also be a 'choices' member.
"""
# Build the help text
arg = []
if 'choices' in entry:
arg.append('Possible choices: {}\n'.format(", ".join([str(c) for c in entry['choices']])))
if 'help' in entry:
arg.append(entry['help'])
if entry['default'] is not None and entry['default'] not in ['"==SUPPRESS=="', '==SUPPRESS==']:
if entry['default'] == '':
arg.append('Default: ""')
else:
arg.append('Default: {}'.format(entry['default']))
# Handle nested content, the term used in the dict has the comma removed for simplicity
desc = arg
term = ' '.join(entry['name'])
if term in localDefinitions:
classifier, s, subContent = localDefinitions[term]
if classifier == '@replace':
desc = [s]
elif classifier == '@after':
desc.append(s)
elif classifier == '@before':
desc.insert(0, s)
term = ', '.join(entry['name'])
n = nodes.option_list_item('',
nodes.option_group('', nodes.option_string(text=term)),
nodes.description('', *renderList(desc, markDownHelp, settings)))
items.append(n)
section += nodes.option_list('', *items)
nodes_list.append(section)
return nodes_list
def ensureUniqueIDs(items):
"""
If action groups are repeated, then links in the table of contents will
just go to the first of the repeats. This may not be desirable, particularly
in the case of subcommands where the option groups have different members.
This function updates the title IDs by adding _repeatX, where X is a number
so that the links are then unique.
"""
s = set()
for item in items:
for n in item.traverse(descend=True, siblings=True, ascend=False):
if isinstance(n, nodes.section):
ids = n['ids']
for idx, id in enumerate(ids):
if id not in s:
s.add(id)
else:
i = 1
while "{}_repeat{}".format(id, i) in s:
i += 1
ids[idx] = "{}_repeat{}".format(id, i)
s.add(ids[idx])
n['ids'] = ids
class ArgParseDirective(Directive):
has_content = True
option_spec = dict(module=unchanged, func=unchanged, ref=unchanged,
prog=unchanged, path=unchanged, nodefault=flag,
nodefaultconst=flag, filename=unchanged,
manpage=unchanged, nosubcommands=unchanged, passparser=flag,
noepilog=unchanged, nodescription=unchanged,
markdown=flag, markdownhelp=flag)
def _construct_manpage_specific_structure(self, parser_info):
"""
Construct a typical man page consisting of the following elements:
NAME (automatically generated, out of our control)
SYNOPSIS
DESCRIPTION
OPTIONS
FILES
SEE ALSO
BUGS
"""
items = []
# SYNOPSIS section
synopsis_section = nodes.section(
'',
nodes.title(text='Synopsis'),
nodes.literal_block(text=parser_info["bare_usage"]),
ids=['synopsis-section'])
items.append(synopsis_section)
# DESCRIPTION section
if 'nodescription' not in self.options:
description_section = nodes.section(
'',
nodes.title(text='Description'),
nodes.paragraph(text=parser_info.get(
'description', parser_info.get(
'help', "undocumented").capitalize())),
ids=['description-section'])
nested_parse_with_titles(
self.state, self.content, description_section)
items.append(description_section)
if parser_info.get('epilog') and 'noepilog' not in self.options:
# TODO: do whatever sphinx does to understand ReST inside
# docstrings magically imported from other places. The nested
# parse method invoked above seem to be able to do this but
# I haven't found a way to do it for arbitrary text
if description_section:
description_section += nodes.paragraph(
text=parser_info['epilog'])
else:
description_section = nodes.paragraph(
text=parser_info['epilog'])
items.append(description_section)
# OPTIONS section
options_section = nodes.section(
'',
nodes.title(text='Options'),
ids=['options-section'])
if 'args' in parser_info:
options_section += nodes.paragraph()
options_section += nodes.subtitle(text='Positional arguments:')
options_section += self._format_positional_arguments(parser_info)
for action_group in parser_info['action_groups']:
if 'options' in parser_info:
options_section += nodes.paragraph()
options_section += nodes.subtitle(text=action_group['title'])
options_section += self._format_optional_arguments(action_group)
# NOTE: we cannot generate NAME ourselves. It is generated by
# docutils.writers.manpage
# TODO: items.append(files)
# TODO: items.append(see also)
# TODO: items.append(bugs)
if len(options_section.children) > 1:
items.append(options_section)
if 'nosubcommands' not in self.options:
# SUBCOMMANDS section (non-standard)
subcommands_section = nodes.section(
'',
nodes.title(text='Sub-Commands'),
ids=['subcommands-section'])
if 'children' in parser_info:
subcommands_section += self._format_subcommands(parser_info)
if len(subcommands_section) > 1:
items.append(subcommands_section)
if os.getenv("INCLUDE_DEBUG_SECTION"):
import json
# DEBUG section (non-standard)
debug_section = nodes.section(
'',
nodes.title(text="Argparse + Sphinx Debugging"),
nodes.literal_block(text=json.dumps(parser_info, indent=' ')),
ids=['debug-section'])
items.append(debug_section)
return items
def _format_positional_arguments(self, parser_info):
assert 'args' in parser_info
items = []
for arg in parser_info['args']:
arg_items = []
if arg['help']:
arg_items.append(nodes.paragraph(text=arg['help']))
elif 'choices' not in arg:
arg_items.append(nodes.paragraph(text='Undocumented'))
if 'choices' in arg:
arg_items.append(
nodes.paragraph(
text='Possible choices: ' + ', '.join(arg['choices'])))
items.append(
nodes.option_list_item(
'',
nodes.option_group(
'', nodes.option(
'', nodes.option_string(text=arg['metavar'])
)
),
nodes.description('', *arg_items)))
return nodes.option_list('', *items)
def _format_optional_arguments(self, parser_info):
assert 'options' in parser_info
items = []
for opt in parser_info['options']:
names = []
opt_items = []
for name in opt['name']:
option_declaration = [nodes.option_string(text=name)]
if opt['default'] is not None \
and opt['default'] not in ['"==SUPPRESS=="', '==SUPPRESS==']:
option_declaration += nodes.option_argument(
'', text='=' + str(opt['default']))
names.append(nodes.option('', *option_declaration))
if opt['help']:
opt_items.append(nodes.paragraph(text=opt['help']))
elif 'choices' not in opt:
opt_items.append(nodes.paragraph(text='Undocumented'))
if 'choices' in opt:
opt_items.append(
nodes.paragraph(
text='Possible choices: ' + ', '.join(opt['choices'])))
items.append(
nodes.option_list_item(
'', nodes.option_group('', *names),
nodes.description('', *opt_items)))
return nodes.option_list('', *items)
def _format_subcommands(self, parser_info):
assert 'children' in parser_info
items = []
for subcmd in parser_info['children']:
subcmd_items = []
if subcmd['help']:
subcmd_items.append(nodes.paragraph(text=subcmd['help']))
else:
subcmd_items.append(nodes.paragraph(text='Undocumented'))
items.append(
nodes.definition_list_item(
'',
nodes.term('', '', nodes.strong(
text=subcmd['bare_usage'])),
nodes.definition('', *subcmd_items)))
return nodes.definition_list('', *items)
def _nested_parse_paragraph(self, text):
content = nodes.paragraph()
self.state.nested_parse(StringList(text.split("\n")), 0, content)
return content
def run(self):
if 'module' in self.options and 'func' in self.options:
module_name = self.options['module']
attr_name = self.options['func']
elif 'ref' in self.options:
_parts = self.options['ref'].split('.')
module_name = '.'.join(_parts[0:-1])
attr_name = _parts[-1]
elif 'filename' in self.options and 'func' in self.options:
mod = {}
try:
f = open(self.options['filename'])
except IOError:
# try open with abspath
f = open(os.path.abspath(self.options['filename']))
code = compile(f.read(), self.options['filename'], 'exec')
exec(code, mod)
attr_name = self.options['func']
func = mod[attr_name]
else:
raise self.error(
':module: and :func: should be specified, or :ref:, or :filename: and :func:')
# Skip this if we're dealing with a local file, since it obviously can't be imported
if 'filename' not in self.options:
try:
mod = __import__(module_name, globals(), locals(), [attr_name])
except:
raise self.error('Failed to import "%s" from "%s".\n%s' % (attr_name, module_name, sys.exc_info()[1]))
if not hasattr(mod, attr_name):
raise self.error((
'Module "%s" has no attribute "%s"\n'
'Incorrect argparse :module: or :func: values?'
) % (module_name, attr_name))
func = getattr(mod, attr_name)
if isinstance(func, ArgumentParser):
parser = func
elif 'passparser' in self.options:
parser = ArgumentParser()
func(parser)
else:
parser = func()
if 'path' not in self.options:
self.options['path'] = ''
path = str(self.options['path'])
if 'prog' in self.options:
parser.prog = self.options['prog']
result = parse_parser(
parser, skip_default_values='nodefault' in self.options, skip_default_const_values='nodefaultconst' in self.options)
result = parser_navigate(result, path)
if 'manpage' in self.options:
return self._construct_manpage_specific_structure(result)
# Handle nested content, where markdown needs to be preprocessed
items = []
nested_content = nodes.paragraph()
if 'markdown' in self.options:
from sphinxarg.markdown import parseMarkDownBlock
items.extend(parseMarkDownBlock('\n'.join(self.content) + '\n'))
else:
self.state.nested_parse(
self.content, self.content_offset, nested_content)
nested_content = nested_content.children
# add common content between
for item in nested_content:
if not isinstance(item, nodes.definition_list):
items.append(item)
markDownHelp = False
if 'markdownhelp' in self.options:
markDownHelp = True
if 'description' in result and 'nodescription' not in self.options:
if markDownHelp:
items.extend(renderList([result['description']], True))
else:
items.append(self._nested_parse_paragraph(result['description']))
items.append(nodes.literal_block(text=result['usage']))
items.extend(print_action_groups(result, nested_content, markDownHelp,
settings=self.state.document.settings))
if 'nosubcommands' not in self.options:
items.extend(print_subcommands(result, nested_content, markDownHelp,
settings=self.state.document.settings))
if 'epilog' in result and 'noepilog' not in self.options:
items.append(self._nested_parse_paragraph(result['epilog']))
# Traverse the returned nodes, modifying the title IDs as necessary to avoid repeats
ensureUniqueIDs(items)
return items
def setup(app):
app.add_directive('argparse', ArgParseDirective)
|
ribozz/sphinx-argparse | sphinxarg/ext.py | ensureUniqueIDs | python | def ensureUniqueIDs(items):
s = set()
for item in items:
for n in item.traverse(descend=True, siblings=True, ascend=False):
if isinstance(n, nodes.section):
ids = n['ids']
for idx, id in enumerate(ids):
if id not in s:
s.add(id)
else:
i = 1
while "{}_repeat{}".format(id, i) in s:
i += 1
ids[idx] = "{}_repeat{}".format(id, i)
s.add(ids[idx])
n['ids'] = ids | If action groups are repeated, then links in the table of contents will
just go to the first of the repeats. This may not be desirable, particularly
in the case of subcommands where the option groups have different members.
This function updates the title IDs by adding _repeatX, where X is a number
so that the links are then unique. | train | https://github.com/ribozz/sphinx-argparse/blob/178672cd5c846440ff7ecd695e3708feea13e4b4/sphinxarg/ext.py#L224-L246 | null | import sys
from argparse import ArgumentParser
import os
from docutils import nodes
from docutils.statemachine import StringList
from docutils.parsers.rst.directives import flag, unchanged
from docutils.parsers.rst import Parser, Directive
from docutils.utils import new_document
from docutils.frontend import OptionParser
from sphinx.util.nodes import nested_parse_with_titles
from sphinxarg.parser import parse_parser, parser_navigate
def map_nested_definitions(nested_content):
if nested_content is None:
raise Exception('Nested content should be iterable, not null')
# build definition dictionary
definitions = {}
for item in nested_content:
if not isinstance(item, nodes.definition_list):
continue
for subitem in item:
if not isinstance(subitem, nodes.definition_list_item):
continue
if not len(subitem.children) > 0:
continue
classifier = '@after'
idx = subitem.first_child_matching_class(nodes.classifier)
if idx is not None:
ci = subitem[idx]
if len(ci.children) > 0:
classifier = ci.children[0].astext()
if classifier is not None and classifier not in (
'@replace', '@before', '@after', '@skip'):
raise Exception('Unknown classifier: %s' % classifier)
idx = subitem.first_child_matching_class(nodes.term)
if idx is not None:
term = subitem[idx]
if len(term.children) > 0:
term = term.children[0].astext()
idx = subitem.first_child_matching_class(nodes.definition)
if idx is not None:
subContent = []
for _ in subitem[idx]:
if isinstance(_, nodes.definition_list):
subContent.append(_)
definitions[term] = (classifier, subitem[idx], subContent)
return definitions
def renderList(l, markDownHelp, settings=None):
"""
Given a list of reStructuredText or MarkDown sections, return a docutils node list
"""
if len(l) == 0:
return []
if markDownHelp:
from sphinxarg.markdown import parseMarkDownBlock
return parseMarkDownBlock('\n\n'.join(l) + '\n')
else:
all_children = []
for element in l:
if isinstance(element, str):
if settings is None:
settings = OptionParser(components=(Parser,)).get_default_values()
document = new_document(None, settings)
Parser().parse(element + '\n', document)
all_children += document.children
elif isinstance(element, nodes.definition):
all_children += element
return all_children
def print_action_groups(data, nested_content, markDownHelp=False, settings=None):
"""
Process all 'action groups', which are also include 'Options' and 'Required
arguments'. A list of nodes is returned.
"""
definitions = map_nested_definitions(nested_content)
nodes_list = []
if 'action_groups' in data:
for action_group in data['action_groups']:
# Every action group is comprised of a section, holding a title, the description, and the option group (members)
section = nodes.section(ids=[action_group['title']])
section += nodes.title(action_group['title'], action_group['title'])
desc = []
if action_group['description']:
desc.append(action_group['description'])
# Replace/append/prepend content to the description according to nested content
subContent = []
if action_group['title'] in definitions:
classifier, s, subContent = definitions[action_group['title']]
if classifier == '@replace':
desc = [s]
elif classifier == '@after':
desc.append(s)
elif classifier == '@before':
desc.insert(0, s)
elif classifier == '@skip':
continue
if len(subContent) > 0:
for k, v in map_nested_definitions(subContent).items():
definitions[k] = v
# Render appropriately
for element in renderList(desc, markDownHelp):
section += element
localDefinitions = definitions
if len(subContent) > 0:
localDefinitions = {k: v for k, v in definitions.items()}
for k, v in map_nested_definitions(subContent).items():
localDefinitions[k] = v
items = []
# Iterate over action group members
for entry in action_group['options']:
"""
Members will include:
default The default value. This may be ==SUPPRESS==
name A list of option names (e.g., ['-h', '--help']
help The help message string
There may also be a 'choices' member.
"""
# Build the help text
arg = []
if 'choices' in entry:
arg.append('Possible choices: {}\n'.format(", ".join([str(c) for c in entry['choices']])))
if 'help' in entry:
arg.append(entry['help'])
if entry['default'] is not None and entry['default'] not in ['"==SUPPRESS=="', '==SUPPRESS==']:
if entry['default'] == '':
arg.append('Default: ""')
else:
arg.append('Default: {}'.format(entry['default']))
# Handle nested content, the term used in the dict has the comma removed for simplicity
desc = arg
term = ' '.join(entry['name'])
if term in localDefinitions:
classifier, s, subContent = localDefinitions[term]
if classifier == '@replace':
desc = [s]
elif classifier == '@after':
desc.append(s)
elif classifier == '@before':
desc.insert(0, s)
term = ', '.join(entry['name'])
n = nodes.option_list_item('',
nodes.option_group('', nodes.option_string(text=term)),
nodes.description('', *renderList(desc, markDownHelp, settings)))
items.append(n)
section += nodes.option_list('', *items)
nodes_list.append(section)
return nodes_list
def print_subcommands(data, nested_content, markDownHelp=False, settings=None):
"""
Each subcommand is a dictionary with the following keys:
['usage', 'action_groups', 'bare_usage', 'name', 'help']
In essence, this is all tossed in a new section with the title 'name'.
Apparently there can also be a 'description' entry.
"""
definitions = map_nested_definitions(nested_content)
items = []
if 'children' in data:
subCommands = nodes.section(ids=["Sub-commands:"])
subCommands += nodes.title('Sub-commands:', 'Sub-commands:')
for child in data['children']:
sec = nodes.section(ids=[child['name']])
sec += nodes.title(child['name'], child['name'])
if 'description' in child and child['description']:
desc = [child['description']]
elif child['help']:
desc = [child['help']]
else:
desc = ['Undocumented']
# Handle nested content
subContent = []
if child['name'] in definitions:
classifier, s, subContent = definitions[child['name']]
if classifier == '@replace':
desc = [s]
elif classifier == '@after':
desc.append(s)
elif classifier == '@before':
desc.insert(0, s)
for element in renderList(desc, markDownHelp):
sec += element
sec += nodes.literal_block(text=child['bare_usage'])
for x in print_action_groups(child, nested_content + subContent, markDownHelp,
settings=settings):
sec += x
for x in print_subcommands(child, nested_content + subContent, markDownHelp,
settings=settings):
sec += x
if 'epilog' in child and child['epilog']:
for element in renderList([child['epilog']], markDownHelp):
sec += element
subCommands += sec
items.append(subCommands)
return items
class ArgParseDirective(Directive):
has_content = True
option_spec = dict(module=unchanged, func=unchanged, ref=unchanged,
prog=unchanged, path=unchanged, nodefault=flag,
nodefaultconst=flag, filename=unchanged,
manpage=unchanged, nosubcommands=unchanged, passparser=flag,
noepilog=unchanged, nodescription=unchanged,
markdown=flag, markdownhelp=flag)
def _construct_manpage_specific_structure(self, parser_info):
"""
Construct a typical man page consisting of the following elements:
NAME (automatically generated, out of our control)
SYNOPSIS
DESCRIPTION
OPTIONS
FILES
SEE ALSO
BUGS
"""
items = []
# SYNOPSIS section
synopsis_section = nodes.section(
'',
nodes.title(text='Synopsis'),
nodes.literal_block(text=parser_info["bare_usage"]),
ids=['synopsis-section'])
items.append(synopsis_section)
# DESCRIPTION section
if 'nodescription' not in self.options:
description_section = nodes.section(
'',
nodes.title(text='Description'),
nodes.paragraph(text=parser_info.get(
'description', parser_info.get(
'help', "undocumented").capitalize())),
ids=['description-section'])
nested_parse_with_titles(
self.state, self.content, description_section)
items.append(description_section)
if parser_info.get('epilog') and 'noepilog' not in self.options:
# TODO: do whatever sphinx does to understand ReST inside
# docstrings magically imported from other places. The nested
# parse method invoked above seem to be able to do this but
# I haven't found a way to do it for arbitrary text
if description_section:
description_section += nodes.paragraph(
text=parser_info['epilog'])
else:
description_section = nodes.paragraph(
text=parser_info['epilog'])
items.append(description_section)
# OPTIONS section
options_section = nodes.section(
'',
nodes.title(text='Options'),
ids=['options-section'])
if 'args' in parser_info:
options_section += nodes.paragraph()
options_section += nodes.subtitle(text='Positional arguments:')
options_section += self._format_positional_arguments(parser_info)
for action_group in parser_info['action_groups']:
if 'options' in parser_info:
options_section += nodes.paragraph()
options_section += nodes.subtitle(text=action_group['title'])
options_section += self._format_optional_arguments(action_group)
# NOTE: we cannot generate NAME ourselves. It is generated by
# docutils.writers.manpage
# TODO: items.append(files)
# TODO: items.append(see also)
# TODO: items.append(bugs)
if len(options_section.children) > 1:
items.append(options_section)
if 'nosubcommands' not in self.options:
# SUBCOMMANDS section (non-standard)
subcommands_section = nodes.section(
'',
nodes.title(text='Sub-Commands'),
ids=['subcommands-section'])
if 'children' in parser_info:
subcommands_section += self._format_subcommands(parser_info)
if len(subcommands_section) > 1:
items.append(subcommands_section)
if os.getenv("INCLUDE_DEBUG_SECTION"):
import json
# DEBUG section (non-standard)
debug_section = nodes.section(
'',
nodes.title(text="Argparse + Sphinx Debugging"),
nodes.literal_block(text=json.dumps(parser_info, indent=' ')),
ids=['debug-section'])
items.append(debug_section)
return items
def _format_positional_arguments(self, parser_info):
assert 'args' in parser_info
items = []
for arg in parser_info['args']:
arg_items = []
if arg['help']:
arg_items.append(nodes.paragraph(text=arg['help']))
elif 'choices' not in arg:
arg_items.append(nodes.paragraph(text='Undocumented'))
if 'choices' in arg:
arg_items.append(
nodes.paragraph(
text='Possible choices: ' + ', '.join(arg['choices'])))
items.append(
nodes.option_list_item(
'',
nodes.option_group(
'', nodes.option(
'', nodes.option_string(text=arg['metavar'])
)
),
nodes.description('', *arg_items)))
return nodes.option_list('', *items)
def _format_optional_arguments(self, parser_info):
assert 'options' in parser_info
items = []
for opt in parser_info['options']:
names = []
opt_items = []
for name in opt['name']:
option_declaration = [nodes.option_string(text=name)]
if opt['default'] is not None \
and opt['default'] not in ['"==SUPPRESS=="', '==SUPPRESS==']:
option_declaration += nodes.option_argument(
'', text='=' + str(opt['default']))
names.append(nodes.option('', *option_declaration))
if opt['help']:
opt_items.append(nodes.paragraph(text=opt['help']))
elif 'choices' not in opt:
opt_items.append(nodes.paragraph(text='Undocumented'))
if 'choices' in opt:
opt_items.append(
nodes.paragraph(
text='Possible choices: ' + ', '.join(opt['choices'])))
items.append(
nodes.option_list_item(
'', nodes.option_group('', *names),
nodes.description('', *opt_items)))
return nodes.option_list('', *items)
def _format_subcommands(self, parser_info):
assert 'children' in parser_info
items = []
for subcmd in parser_info['children']:
subcmd_items = []
if subcmd['help']:
subcmd_items.append(nodes.paragraph(text=subcmd['help']))
else:
subcmd_items.append(nodes.paragraph(text='Undocumented'))
items.append(
nodes.definition_list_item(
'',
nodes.term('', '', nodes.strong(
text=subcmd['bare_usage'])),
nodes.definition('', *subcmd_items)))
return nodes.definition_list('', *items)
def _nested_parse_paragraph(self, text):
content = nodes.paragraph()
self.state.nested_parse(StringList(text.split("\n")), 0, content)
return content
def run(self):
if 'module' in self.options and 'func' in self.options:
module_name = self.options['module']
attr_name = self.options['func']
elif 'ref' in self.options:
_parts = self.options['ref'].split('.')
module_name = '.'.join(_parts[0:-1])
attr_name = _parts[-1]
elif 'filename' in self.options and 'func' in self.options:
mod = {}
try:
f = open(self.options['filename'])
except IOError:
# try open with abspath
f = open(os.path.abspath(self.options['filename']))
code = compile(f.read(), self.options['filename'], 'exec')
exec(code, mod)
attr_name = self.options['func']
func = mod[attr_name]
else:
raise self.error(
':module: and :func: should be specified, or :ref:, or :filename: and :func:')
# Skip this if we're dealing with a local file, since it obviously can't be imported
if 'filename' not in self.options:
try:
mod = __import__(module_name, globals(), locals(), [attr_name])
except:
raise self.error('Failed to import "%s" from "%s".\n%s' % (attr_name, module_name, sys.exc_info()[1]))
if not hasattr(mod, attr_name):
raise self.error((
'Module "%s" has no attribute "%s"\n'
'Incorrect argparse :module: or :func: values?'
) % (module_name, attr_name))
func = getattr(mod, attr_name)
if isinstance(func, ArgumentParser):
parser = func
elif 'passparser' in self.options:
parser = ArgumentParser()
func(parser)
else:
parser = func()
if 'path' not in self.options:
self.options['path'] = ''
path = str(self.options['path'])
if 'prog' in self.options:
parser.prog = self.options['prog']
result = parse_parser(
parser, skip_default_values='nodefault' in self.options, skip_default_const_values='nodefaultconst' in self.options)
result = parser_navigate(result, path)
if 'manpage' in self.options:
return self._construct_manpage_specific_structure(result)
# Handle nested content, where markdown needs to be preprocessed
items = []
nested_content = nodes.paragraph()
if 'markdown' in self.options:
from sphinxarg.markdown import parseMarkDownBlock
items.extend(parseMarkDownBlock('\n'.join(self.content) + '\n'))
else:
self.state.nested_parse(
self.content, self.content_offset, nested_content)
nested_content = nested_content.children
# add common content between
for item in nested_content:
if not isinstance(item, nodes.definition_list):
items.append(item)
markDownHelp = False
if 'markdownhelp' in self.options:
markDownHelp = True
if 'description' in result and 'nodescription' not in self.options:
if markDownHelp:
items.extend(renderList([result['description']], True))
else:
items.append(self._nested_parse_paragraph(result['description']))
items.append(nodes.literal_block(text=result['usage']))
items.extend(print_action_groups(result, nested_content, markDownHelp,
settings=self.state.document.settings))
if 'nosubcommands' not in self.options:
items.extend(print_subcommands(result, nested_content, markDownHelp,
settings=self.state.document.settings))
if 'epilog' in result and 'noepilog' not in self.options:
items.append(self._nested_parse_paragraph(result['epilog']))
# Traverse the returned nodes, modifying the title IDs as necessary to avoid repeats
ensureUniqueIDs(items)
return items
def setup(app):
app.add_directive('argparse', ArgParseDirective)
|
ribozz/sphinx-argparse | sphinxarg/ext.py | ArgParseDirective._construct_manpage_specific_structure | python | def _construct_manpage_specific_structure(self, parser_info):
items = []
# SYNOPSIS section
synopsis_section = nodes.section(
'',
nodes.title(text='Synopsis'),
nodes.literal_block(text=parser_info["bare_usage"]),
ids=['synopsis-section'])
items.append(synopsis_section)
# DESCRIPTION section
if 'nodescription' not in self.options:
description_section = nodes.section(
'',
nodes.title(text='Description'),
nodes.paragraph(text=parser_info.get(
'description', parser_info.get(
'help', "undocumented").capitalize())),
ids=['description-section'])
nested_parse_with_titles(
self.state, self.content, description_section)
items.append(description_section)
if parser_info.get('epilog') and 'noepilog' not in self.options:
# TODO: do whatever sphinx does to understand ReST inside
# docstrings magically imported from other places. The nested
# parse method invoked above seem to be able to do this but
# I haven't found a way to do it for arbitrary text
if description_section:
description_section += nodes.paragraph(
text=parser_info['epilog'])
else:
description_section = nodes.paragraph(
text=parser_info['epilog'])
items.append(description_section)
# OPTIONS section
options_section = nodes.section(
'',
nodes.title(text='Options'),
ids=['options-section'])
if 'args' in parser_info:
options_section += nodes.paragraph()
options_section += nodes.subtitle(text='Positional arguments:')
options_section += self._format_positional_arguments(parser_info)
for action_group in parser_info['action_groups']:
if 'options' in parser_info:
options_section += nodes.paragraph()
options_section += nodes.subtitle(text=action_group['title'])
options_section += self._format_optional_arguments(action_group)
# NOTE: we cannot generate NAME ourselves. It is generated by
# docutils.writers.manpage
# TODO: items.append(files)
# TODO: items.append(see also)
# TODO: items.append(bugs)
if len(options_section.children) > 1:
items.append(options_section)
if 'nosubcommands' not in self.options:
# SUBCOMMANDS section (non-standard)
subcommands_section = nodes.section(
'',
nodes.title(text='Sub-Commands'),
ids=['subcommands-section'])
if 'children' in parser_info:
subcommands_section += self._format_subcommands(parser_info)
if len(subcommands_section) > 1:
items.append(subcommands_section)
if os.getenv("INCLUDE_DEBUG_SECTION"):
import json
# DEBUG section (non-standard)
debug_section = nodes.section(
'',
nodes.title(text="Argparse + Sphinx Debugging"),
nodes.literal_block(text=json.dumps(parser_info, indent=' ')),
ids=['debug-section'])
items.append(debug_section)
return items | Construct a typical man page consisting of the following elements:
NAME (automatically generated, out of our control)
SYNOPSIS
DESCRIPTION
OPTIONS
FILES
SEE ALSO
BUGS | train | https://github.com/ribozz/sphinx-argparse/blob/178672cd5c846440ff7ecd695e3708feea13e4b4/sphinxarg/ext.py#L258-L343 | null | class ArgParseDirective(Directive):
has_content = True
option_spec = dict(module=unchanged, func=unchanged, ref=unchanged,
prog=unchanged, path=unchanged, nodefault=flag,
nodefaultconst=flag, filename=unchanged,
manpage=unchanged, nosubcommands=unchanged, passparser=flag,
noepilog=unchanged, nodescription=unchanged,
markdown=flag, markdownhelp=flag)
def _format_positional_arguments(self, parser_info):
assert 'args' in parser_info
items = []
for arg in parser_info['args']:
arg_items = []
if arg['help']:
arg_items.append(nodes.paragraph(text=arg['help']))
elif 'choices' not in arg:
arg_items.append(nodes.paragraph(text='Undocumented'))
if 'choices' in arg:
arg_items.append(
nodes.paragraph(
text='Possible choices: ' + ', '.join(arg['choices'])))
items.append(
nodes.option_list_item(
'',
nodes.option_group(
'', nodes.option(
'', nodes.option_string(text=arg['metavar'])
)
),
nodes.description('', *arg_items)))
return nodes.option_list('', *items)
def _format_optional_arguments(self, parser_info):
assert 'options' in parser_info
items = []
for opt in parser_info['options']:
names = []
opt_items = []
for name in opt['name']:
option_declaration = [nodes.option_string(text=name)]
if opt['default'] is not None \
and opt['default'] not in ['"==SUPPRESS=="', '==SUPPRESS==']:
option_declaration += nodes.option_argument(
'', text='=' + str(opt['default']))
names.append(nodes.option('', *option_declaration))
if opt['help']:
opt_items.append(nodes.paragraph(text=opt['help']))
elif 'choices' not in opt:
opt_items.append(nodes.paragraph(text='Undocumented'))
if 'choices' in opt:
opt_items.append(
nodes.paragraph(
text='Possible choices: ' + ', '.join(opt['choices'])))
items.append(
nodes.option_list_item(
'', nodes.option_group('', *names),
nodes.description('', *opt_items)))
return nodes.option_list('', *items)
def _format_subcommands(self, parser_info):
assert 'children' in parser_info
items = []
for subcmd in parser_info['children']:
subcmd_items = []
if subcmd['help']:
subcmd_items.append(nodes.paragraph(text=subcmd['help']))
else:
subcmd_items.append(nodes.paragraph(text='Undocumented'))
items.append(
nodes.definition_list_item(
'',
nodes.term('', '', nodes.strong(
text=subcmd['bare_usage'])),
nodes.definition('', *subcmd_items)))
return nodes.definition_list('', *items)
def _nested_parse_paragraph(self, text):
content = nodes.paragraph()
self.state.nested_parse(StringList(text.split("\n")), 0, content)
return content
def run(self):
if 'module' in self.options and 'func' in self.options:
module_name = self.options['module']
attr_name = self.options['func']
elif 'ref' in self.options:
_parts = self.options['ref'].split('.')
module_name = '.'.join(_parts[0:-1])
attr_name = _parts[-1]
elif 'filename' in self.options and 'func' in self.options:
mod = {}
try:
f = open(self.options['filename'])
except IOError:
# try open with abspath
f = open(os.path.abspath(self.options['filename']))
code = compile(f.read(), self.options['filename'], 'exec')
exec(code, mod)
attr_name = self.options['func']
func = mod[attr_name]
else:
raise self.error(
':module: and :func: should be specified, or :ref:, or :filename: and :func:')
# Skip this if we're dealing with a local file, since it obviously can't be imported
if 'filename' not in self.options:
try:
mod = __import__(module_name, globals(), locals(), [attr_name])
except:
raise self.error('Failed to import "%s" from "%s".\n%s' % (attr_name, module_name, sys.exc_info()[1]))
if not hasattr(mod, attr_name):
raise self.error((
'Module "%s" has no attribute "%s"\n'
'Incorrect argparse :module: or :func: values?'
) % (module_name, attr_name))
func = getattr(mod, attr_name)
if isinstance(func, ArgumentParser):
parser = func
elif 'passparser' in self.options:
parser = ArgumentParser()
func(parser)
else:
parser = func()
if 'path' not in self.options:
self.options['path'] = ''
path = str(self.options['path'])
if 'prog' in self.options:
parser.prog = self.options['prog']
result = parse_parser(
parser, skip_default_values='nodefault' in self.options, skip_default_const_values='nodefaultconst' in self.options)
result = parser_navigate(result, path)
if 'manpage' in self.options:
return self._construct_manpage_specific_structure(result)
# Handle nested content, where markdown needs to be preprocessed
items = []
nested_content = nodes.paragraph()
if 'markdown' in self.options:
from sphinxarg.markdown import parseMarkDownBlock
items.extend(parseMarkDownBlock('\n'.join(self.content) + '\n'))
else:
self.state.nested_parse(
self.content, self.content_offset, nested_content)
nested_content = nested_content.children
# add common content between
for item in nested_content:
if not isinstance(item, nodes.definition_list):
items.append(item)
markDownHelp = False
if 'markdownhelp' in self.options:
markDownHelp = True
if 'description' in result and 'nodescription' not in self.options:
if markDownHelp:
items.extend(renderList([result['description']], True))
else:
items.append(self._nested_parse_paragraph(result['description']))
items.append(nodes.literal_block(text=result['usage']))
items.extend(print_action_groups(result, nested_content, markDownHelp,
settings=self.state.document.settings))
if 'nosubcommands' not in self.options:
items.extend(print_subcommands(result, nested_content, markDownHelp,
settings=self.state.document.settings))
if 'epilog' in result and 'noepilog' not in self.options:
items.append(self._nested_parse_paragraph(result['epilog']))
# Traverse the returned nodes, modifying the title IDs as necessary to avoid repeats
ensureUniqueIDs(items)
return items
|
erikrose/peep | peep.py | activate | python | def activate(specifier):
try:
for distro in require(specifier):
distro.activate()
except (VersionConflict, DistributionNotFound):
raise RuntimeError('The installed version of pip is too old; peep '
'requires ' + specifier) | Make a compatible version of pip importable. Raise a RuntimeError if we
couldn't. | train | https://github.com/erikrose/peep/blob/c16f08c7f61e2f2afecb7cd1c93752bdd96c4968/peep.py#L65-L73 | null | #!/usr/bin/env python
"""peep ("prudently examine every package") verifies that packages conform to a
trusted, locally stored hash and only then installs them::
peep install -r requirements.txt
This makes your deployments verifiably repeatable without having to maintain a
local PyPI mirror or use a vendor lib. Just update the version numbers and
hashes in requirements.txt, and you're all set.
"""
# This is here so embedded copies of peep.py are MIT-compliant:
# Copyright (c) 2013 Erik Rose
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
from __future__ import print_function
try:
xrange = xrange
except NameError:
xrange = range
from base64 import urlsafe_b64encode, urlsafe_b64decode
from binascii import hexlify
import cgi
from collections import defaultdict
from functools import wraps
from hashlib import sha256
from itertools import chain, islice
import mimetypes
from optparse import OptionParser
from os.path import join, basename, splitext, isdir
from pickle import dumps, loads
import re
import sys
from shutil import rmtree, copy
from sys import argv, exit
from tempfile import mkdtemp
import traceback
try:
from urllib2 import build_opener, HTTPHandler, HTTPSHandler, HTTPError
except ImportError:
from urllib.request import build_opener, HTTPHandler, HTTPSHandler
from urllib.error import HTTPError
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse # 3.4
# TODO: Probably use six to make urllib stuff work across 2/3.
from pkg_resources import require, VersionConflict, DistributionNotFound, safe_name
# We don't admit our dependency on pip in setup.py, lest a naive user simply
# say `pip install peep.tar.gz` and thus pull down an untrusted copy of pip
# from PyPI. Instead, we make sure it's installed and new enough here and spit
# out an error message if not:
# Before 0.6.2, the log module wasn't there, so some
# of our monkeypatching fails. It probably wouldn't be
# much work to support even earlier, though.
activate('pip>=0.6.2')
import pip
from pip.commands.install import InstallCommand
try:
from pip.download import url_to_path # 1.5.6
except ImportError:
try:
from pip.util import url_to_path # 0.7.0
except ImportError:
from pip.util import url_to_filename as url_to_path # 0.6.2
from pip.exceptions import InstallationError
from pip.index import PackageFinder, Link
try:
from pip.log import logger
except ImportError:
from pip import logger # 6.0
from pip.req import parse_requirements
try:
from pip.utils.ui import DownloadProgressBar, DownloadProgressSpinner
except ImportError:
class NullProgressBar(object):
def __init__(self, *args, **kwargs):
pass
def iter(self, ret, *args, **kwargs):
return ret
DownloadProgressBar = DownloadProgressSpinner = NullProgressBar
__version__ = 3, 1, 2
try:
from pip.index import FormatControl # noqa
FORMAT_CONTROL_ARG = 'format_control'
# The line-numbering bug will be fixed in pip 8. All 7.x releases had it.
PIP_MAJOR_VERSION = int(pip.__version__.split('.')[0])
PIP_COUNTS_COMMENTS = PIP_MAJOR_VERSION >= 8
except ImportError:
FORMAT_CONTROL_ARG = 'use_wheel' # pre-7
PIP_COUNTS_COMMENTS = True
ITS_FINE_ITS_FINE = 0
SOMETHING_WENT_WRONG = 1
# "Traditional" for command-line errors according to optparse docs:
COMMAND_LINE_ERROR = 2
UNHANDLED_EXCEPTION = 3
ARCHIVE_EXTENSIONS = ('.tar.bz2', '.tar.gz', '.tgz', '.tar', '.zip')
MARKER = object()
class PipException(Exception):
"""When I delegated to pip, it exited with an error."""
def __init__(self, error_code):
self.error_code = error_code
class UnsupportedRequirementError(Exception):
"""An unsupported line was encountered in a requirements file."""
class DownloadError(Exception):
def __init__(self, link, exc):
self.link = link
self.reason = str(exc)
def __str__(self):
return 'Downloading %s failed: %s' % (self.link, self.reason)
def encoded_hash(sha):
"""Return a short, 7-bit-safe representation of a hash.
If you pass a sha256, this results in the hash algorithm that the Wheel
format (PEP 427) uses, except here it's intended to be run across the
downloaded archive before unpacking.
"""
return urlsafe_b64encode(sha.digest()).decode('ascii').rstrip('=')
def path_and_line(req):
"""Return the path and line number of the file from which an
InstallRequirement came.
"""
path, line = (re.match(r'-r (.*) \(line (\d+)\)$',
req.comes_from).groups())
return path, int(line)
def hashes_above(path, line_number):
"""Yield hashes from contiguous comment lines before line ``line_number``.
"""
def hash_lists(path):
"""Yield lists of hashes appearing between non-comment lines.
The lists will be in order of appearance and, for each non-empty
list, their place in the results will coincide with that of the
line number of the corresponding result from `parse_requirements`
(which changed in pip 7.0 to not count comments).
"""
hashes = []
with open(path) as file:
for lineno, line in enumerate(file, 1):
match = HASH_COMMENT_RE.match(line)
if match: # Accumulate this hash.
hashes.append(match.groupdict()['hash'])
if not IGNORED_LINE_RE.match(line):
yield hashes # Report hashes seen so far.
hashes = []
elif PIP_COUNTS_COMMENTS:
# Comment: count as normal req but have no hashes.
yield []
return next(islice(hash_lists(path), line_number - 1, None))
def run_pip(initial_args):
"""Delegate to pip the given args (starting with the subcommand), and raise
``PipException`` if something goes wrong."""
status_code = pip.main(initial_args)
# Clear out the registrations in the pip "logger" singleton. Otherwise,
# loggers keep getting appended to it with every run. Pip assumes only one
# command invocation will happen per interpreter lifetime.
logger.consumers = []
if status_code:
raise PipException(status_code)
def hash_of_file(path):
"""Return the hash of a downloaded file."""
with open(path, 'rb') as archive:
sha = sha256()
while True:
data = archive.read(2 ** 20)
if not data:
break
sha.update(data)
return encoded_hash(sha)
def is_git_sha(text):
"""Return whether this is probably a git sha"""
# Handle both the full sha as well as the 7-character abbreviation
if len(text) in (40, 7):
try:
int(text, 16)
return True
except ValueError:
pass
return False
def filename_from_url(url):
parsed = urlparse(url)
path = parsed.path
return path.split('/')[-1]
def requirement_args(argv, want_paths=False, want_other=False):
"""Return an iterable of filtered arguments.
:arg argv: Arguments, starting after the subcommand
:arg want_paths: If True, the returned iterable includes the paths to any
requirements files following a ``-r`` or ``--requirement`` option.
:arg want_other: If True, the returned iterable includes the args that are
not a requirement-file path or a ``-r`` or ``--requirement`` flag.
"""
was_r = False
for arg in argv:
# Allow for requirements files named "-r", don't freak out if there's a
# trailing "-r", etc.
if was_r:
if want_paths:
yield arg
was_r = False
elif arg in ['-r', '--requirement']:
was_r = True
else:
if want_other:
yield arg
# any line that is a comment or just whitespace
IGNORED_LINE_RE = re.compile(r'^(\s*#.*)?\s*$')
HASH_COMMENT_RE = re.compile(
r"""
\s*\#\s+ # Lines that start with a '#'
(?P<hash_type>sha256):\s+ # Hash type is hardcoded to be sha256 for now.
(?P<hash>[^\s]+) # Hashes can be anything except '#' or spaces.
\s* # Suck up whitespace before the comment or
# just trailing whitespace if there is no
# comment. Also strip trailing newlines.
(?:\#(?P<comment>.*))? # Comments can be anything after a whitespace+#
# and are optional.
$""", re.X)
def peep_hash(argv):
"""Return the peep hash of one or more files, returning a shell status code
or raising a PipException.
:arg argv: The commandline args, starting after the subcommand
"""
parser = OptionParser(
usage='usage: %prog hash file [file ...]',
description='Print a peep hash line for one or more files: for '
'example, "# sha256: '
'oz42dZy6Gowxw8AelDtO4gRgTW_xPdooH484k7I5EOY".')
_, paths = parser.parse_args(args=argv)
if paths:
for path in paths:
print('# sha256:', hash_of_file(path))
return ITS_FINE_ITS_FINE
else:
parser.print_usage()
return COMMAND_LINE_ERROR
class EmptyOptions(object):
"""Fake optparse options for compatibility with pip<1.2
pip<1.2 had a bug in parse_requirements() in which the ``options`` kwarg
was required. We work around that by passing it a mock object.
"""
default_vcs = None
skip_requirements_regex = None
isolated_mode = False
def memoize(func):
"""Memoize a method that should return the same result every time on a
given instance.
"""
@wraps(func)
def memoizer(self):
if not hasattr(self, '_cache'):
self._cache = {}
if func.__name__ not in self._cache:
self._cache[func.__name__] = func(self)
return self._cache[func.__name__]
return memoizer
def package_finder(argv):
"""Return a PackageFinder respecting command-line options.
:arg argv: Everything after the subcommand
"""
# We instantiate an InstallCommand and then use some of its private
# machinery--its arg parser--for our own purposes, like a virus. This
# approach is portable across many pip versions, where more fine-grained
# ones are not. Ignoring options that don't exist on the parser (for
# instance, --use-wheel) gives us a straightforward method of backward
# compatibility.
try:
command = InstallCommand()
except TypeError:
# This is likely pip 1.3.0's "__init__() takes exactly 2 arguments (1
# given)" error. In that version, InstallCommand takes a top=level
# parser passed in from outside.
from pip.baseparser import create_main_parser
command = InstallCommand(create_main_parser())
# The downside is that it essentially ruins the InstallCommand class for
# further use. Calling out to pip.main() within the same interpreter, for
# example, would result in arguments parsed this time turning up there.
# Thus, we deepcopy the arg parser so we don't trash its singletons. Of
# course, deepcopy doesn't work on these objects, because they contain
# uncopyable regex patterns, so we pickle and unpickle instead. Fun!
options, _ = loads(dumps(command.parser)).parse_args(argv)
# Carry over PackageFinder kwargs that have [about] the same names as
# options attr names:
possible_options = [
'find_links',
FORMAT_CONTROL_ARG,
('allow_all_prereleases', 'pre'),
'process_dependency_links'
]
kwargs = {}
for option in possible_options:
kw, attr = option if isinstance(option, tuple) else (option, option)
value = getattr(options, attr, MARKER)
if value is not MARKER:
kwargs[kw] = value
# Figure out index_urls:
index_urls = [options.index_url] + options.extra_index_urls
if options.no_index:
index_urls = []
index_urls += getattr(options, 'mirrors', [])
# If pip is new enough to have a PipSession, initialize one, since
# PackageFinder requires it:
if hasattr(command, '_build_session'):
kwargs['session'] = command._build_session(options)
return PackageFinder(index_urls=index_urls, **kwargs)
class DownloadedReq(object):
"""A wrapper around InstallRequirement which offers additional information
based on downloading and examining a corresponding package archive
These are conceptually immutable, so we can get away with memoizing
expensive things.
"""
def __init__(self, req, argv, finder):
"""Download a requirement, compare its hashes, and return a subclass
of DownloadedReq depending on its state.
:arg req: The InstallRequirement I am based on
:arg argv: The args, starting after the subcommand
"""
self._req = req
self._argv = argv
self._finder = finder
# We use a separate temp dir for each requirement so requirements
# (from different indices) that happen to have the same archive names
# don't overwrite each other, leading to a security hole in which the
# latter is a hash mismatch, the former has already passed the
# comparison, and the latter gets installed.
self._temp_path = mkdtemp(prefix='peep-')
# Think of DownloadedReq as a one-shot state machine. It's an abstract
# class that ratchets forward to being one of its own subclasses,
# depending on its package status. Then it doesn't move again.
self.__class__ = self._class()
def dispose(self):
"""Delete temp files and dirs I've made. Render myself useless.
Do not call further methods on me after calling dispose().
"""
rmtree(self._temp_path)
def _version(self):
"""Deduce the version number of the downloaded package from its filename."""
# TODO: Can we delete this method and just print the line from the
# reqs file verbatim instead?
def version_of_archive(filename, package_name):
# Since we know the project_name, we can strip that off the left, strip
# any archive extensions off the right, and take the rest as the
# version.
for ext in ARCHIVE_EXTENSIONS:
if filename.endswith(ext):
filename = filename[:-len(ext)]
break
# Handle github sha tarball downloads.
if is_git_sha(filename):
filename = package_name + '-' + filename
if not filename.lower().replace('_', '-').startswith(package_name.lower()):
# TODO: Should we replace runs of [^a-zA-Z0-9.], not just _, with -?
give_up(filename, package_name)
return filename[len(package_name) + 1:] # Strip off '-' before version.
def version_of_wheel(filename, package_name):
# For Wheel files (http://legacy.python.org/dev/peps/pep-0427/#file-
# name-convention) we know the format bits are '-' separated.
whl_package_name, version, _rest = filename.split('-', 2)
# Do the alteration to package_name from PEP 427:
our_package_name = re.sub(r'[^\w\d.]+', '_', package_name, re.UNICODE)
if whl_package_name != our_package_name:
give_up(filename, whl_package_name)
return version
def give_up(filename, package_name):
raise RuntimeError("The archive '%s' didn't start with the package name "
"'%s', so I couldn't figure out the version number. "
"My bad; improve me." %
(filename, package_name))
get_version = (version_of_wheel
if self._downloaded_filename().endswith('.whl')
else version_of_archive)
return get_version(self._downloaded_filename(), self._project_name())
def _is_always_unsatisfied(self):
"""Returns whether this requirement is always unsatisfied
This would happen in cases where we can't determine the version
from the filename.
"""
# If this is a github sha tarball, then it is always unsatisfied
# because the url has a commit sha in it and not the version
# number.
url = self._url()
if url:
filename = filename_from_url(url)
if filename.endswith(ARCHIVE_EXTENSIONS):
filename, ext = splitext(filename)
if is_git_sha(filename):
return True
return False
@memoize # Avoid hitting the file[cache] over and over.
def _expected_hashes(self):
"""Return a list of known-good hashes for this package."""
return hashes_above(*path_and_line(self._req))
def _download(self, link):
"""Download a file, and return its name within my temp dir.
This does no verification of HTTPS certs, but our checking hashes
makes that largely unimportant. It would be nice to be able to use the
requests lib, which can verify certs, but it is guaranteed to be
available only in pip >= 1.5.
This also drops support for proxies and basic auth, though those could
be added back in.
"""
# Based on pip 1.4.1's URLOpener but with cert verification removed
def opener(is_https):
if is_https:
opener = build_opener(HTTPSHandler())
# Strip out HTTPHandler to prevent MITM spoof:
for handler in opener.handlers:
if isinstance(handler, HTTPHandler):
opener.handlers.remove(handler)
else:
opener = build_opener()
return opener
# Descended from unpack_http_url() in pip 1.4.1
def best_filename(link, response):
"""Return the most informative possible filename for a download,
ideally with a proper extension.
"""
content_type = response.info().get('content-type', '')
filename = link.filename # fallback
# Have a look at the Content-Disposition header for a better guess:
content_disposition = response.info().get('content-disposition')
if content_disposition:
type, params = cgi.parse_header(content_disposition)
# We use ``or`` here because we don't want to use an "empty" value
# from the filename param:
filename = params.get('filename') or filename
ext = splitext(filename)[1]
if not ext:
ext = mimetypes.guess_extension(content_type)
if ext:
filename += ext
if not ext and link.url != response.geturl():
ext = splitext(response.geturl())[1]
if ext:
filename += ext
return filename
# Descended from _download_url() in pip 1.4.1
def pipe_to_file(response, path, size=0):
"""Pull the data off an HTTP response, shove it in a new file, and
show progress.
:arg response: A file-like object to read from
:arg path: The path of the new file
:arg size: The expected size, in bytes, of the download. 0 for
unknown or to suppress progress indication (as for cached
downloads)
"""
def response_chunks(chunk_size):
while True:
chunk = response.read(chunk_size)
if not chunk:
break
yield chunk
print('Downloading %s%s...' % (
self._req.req,
(' (%sK)' % (size / 1000)) if size > 1000 else ''))
progress_indicator = (DownloadProgressBar(max=size).iter if size
else DownloadProgressSpinner().iter)
with open(path, 'wb') as file:
for chunk in progress_indicator(response_chunks(4096), 4096):
file.write(chunk)
url = link.url.split('#', 1)[0]
try:
response = opener(urlparse(url).scheme != 'http').open(url)
except (HTTPError, IOError) as exc:
raise DownloadError(link, exc)
filename = best_filename(link, response)
try:
size = int(response.headers['content-length'])
except (ValueError, KeyError, TypeError):
size = 0
pipe_to_file(response, join(self._temp_path, filename), size=size)
return filename
# Based on req_set.prepare_files() in pip bb2a8428d4aebc8d313d05d590f386fa3f0bbd0f
@memoize # Avoid re-downloading.
def _downloaded_filename(self):
"""Download the package's archive if necessary, and return its
filename.
--no-deps is implied, as we have reimplemented the bits that would
ordinarily do dependency resolution.
"""
# Peep doesn't support requirements that don't come down as a single
# file, because it can't hash them. Thus, it doesn't support editable
# requirements, because pip itself doesn't support editable
# requirements except for "local projects or a VCS url". Nor does it
# support VCS requirements yet, because we haven't yet come up with a
# portable, deterministic way to hash them. In summary, all we support
# is == requirements and tarballs/zips/etc.
# TODO: Stop on reqs that are editable or aren't ==.
# If the requirement isn't already specified as a URL, get a URL
# from an index:
link = self._link() or self._finder.find_requirement(self._req, upgrade=False)
if link:
lower_scheme = link.scheme.lower() # pip lower()s it for some reason.
if lower_scheme == 'http' or lower_scheme == 'https':
file_path = self._download(link)
return basename(file_path)
elif lower_scheme == 'file':
# The following is inspired by pip's unpack_file_url():
link_path = url_to_path(link.url_without_fragment)
if isdir(link_path):
raise UnsupportedRequirementError(
"%s: %s is a directory. So that it can compute "
"a hash, peep supports only filesystem paths which "
"point to files" %
(self._req, link.url_without_fragment))
else:
copy(link_path, self._temp_path)
return basename(link_path)
else:
raise UnsupportedRequirementError(
"%s: The download link, %s, would not result in a file "
"that can be hashed. Peep supports only == requirements, "
"file:// URLs pointing to files (not folders), and "
"http:// and https:// URLs pointing to tarballs, zips, "
"etc." % (self._req, link.url))
else:
raise UnsupportedRequirementError(
"%s: couldn't determine where to download this requirement from."
% (self._req,))
def install(self):
"""Install the package I represent, without dependencies.
Obey typical pip-install options passed in on the command line.
"""
other_args = list(requirement_args(self._argv, want_other=True))
archive_path = join(self._temp_path, self._downloaded_filename())
# -U so it installs whether pip deems the requirement "satisfied" or
# not. This is necessary for GitHub-sourced zips, which change without
# their version numbers changing.
run_pip(['install'] + other_args + ['--no-deps', '-U', archive_path])
@memoize
def _actual_hash(self):
"""Download the package's archive if necessary, and return its hash."""
return hash_of_file(join(self._temp_path, self._downloaded_filename()))
def _project_name(self):
"""Return the inner Requirement's "unsafe name".
Raise ValueError if there is no name.
"""
name = getattr(self._req.req, 'project_name', '')
if name:
return name
name = getattr(self._req.req, 'name', '')
if name:
return safe_name(name)
raise ValueError('Requirement has no project_name.')
def _name(self):
return self._req.name
def _link(self):
try:
return self._req.link
except AttributeError:
# The link attribute isn't available prior to pip 6.1.0, so fall
# back to the now deprecated 'url' attribute.
return Link(self._req.url) if self._req.url else None
def _url(self):
link = self._link()
return link.url if link else None
@memoize # Avoid re-running expensive check_if_exists().
def _is_satisfied(self):
self._req.check_if_exists()
return (self._req.satisfied_by and
not self._is_always_unsatisfied())
def _class(self):
"""Return the class I should be, spanning a continuum of goodness."""
try:
self._project_name()
except ValueError:
return MalformedReq
if self._is_satisfied():
return SatisfiedReq
if not self._expected_hashes():
return MissingReq
if self._actual_hash() not in self._expected_hashes():
return MismatchedReq
return InstallableReq
@classmethod
def foot(cls):
"""Return the text to be printed once, after all of the errors from
classes of my type are printed.
"""
return ''
class MalformedReq(DownloadedReq):
"""A requirement whose package name could not be determined"""
@classmethod
def head(cls):
return 'The following requirements could not be processed:\n'
def error(self):
return '* Unable to determine package name from URL %s; add #egg=' % self._url()
class MissingReq(DownloadedReq):
"""A requirement for which no hashes were specified in the requirements file"""
@classmethod
def head(cls):
return ('The following packages had no hashes specified in the requirements file, which\n'
'leaves them open to tampering. Vet these packages to your satisfaction, then\n'
'add these "sha256" lines like so:\n\n')
def error(self):
if self._url():
# _url() always contains an #egg= part, or this would be a
# MalformedRequest.
line = self._url()
else:
line = '%s==%s' % (self._name(), self._version())
return '# sha256: %s\n%s\n' % (self._actual_hash(), line)
class MismatchedReq(DownloadedReq):
"""A requirement for which the downloaded file didn't match any of my hashes."""
@classmethod
def head(cls):
return ("THE FOLLOWING PACKAGES DIDN'T MATCH THE HASHES SPECIFIED IN THE REQUIREMENTS\n"
"FILE. If you have updated the package versions, update the hashes. If not,\n"
"freak out, because someone has tampered with the packages.\n\n")
def error(self):
preamble = ' %s: expected' % self._project_name()
if len(self._expected_hashes()) > 1:
preamble += ' one of'
padding = '\n' + ' ' * (len(preamble) + 1)
return '%s %s\n%s got %s' % (preamble,
padding.join(self._expected_hashes()),
' ' * (len(preamble) - 4),
self._actual_hash())
@classmethod
def foot(cls):
return '\n'
class SatisfiedReq(DownloadedReq):
"""A requirement which turned out to be already installed"""
@classmethod
def head(cls):
return ("These packages were already installed, so we didn't need to download or build\n"
"them again. If you installed them with peep in the first place, you should be\n"
"safe. If not, uninstall them, then re-attempt your install with peep.\n")
def error(self):
return ' %s' % (self._req,)
class InstallableReq(DownloadedReq):
"""A requirement whose hash matched and can be safely installed"""
# DownloadedReq subclasses that indicate an error that should keep us from
# going forward with installation, in the order in which their errors should
# be reported:
ERROR_CLASSES = [MismatchedReq, MissingReq, MalformedReq]
def bucket(things, key):
"""Return a map of key -> list of things."""
ret = defaultdict(list)
for thing in things:
ret[key(thing)].append(thing)
return ret
def first_every_last(iterable, first, every, last):
"""Execute something before the first item of iter, something else for each
item, and a third thing after the last.
If there are no items in the iterable, don't execute anything.
"""
did_first = False
for item in iterable:
if not did_first:
did_first = True
first(item)
every(item)
if did_first:
last(item)
def _parse_requirements(path, finder):
try:
# list() so the generator that is parse_requirements() actually runs
# far enough to report a TypeError
return list(parse_requirements(
path, options=EmptyOptions(), finder=finder))
except TypeError:
# session is a required kwarg as of pip 6.0 and will raise
# a TypeError if missing. It needs to be a PipSession instance,
# but in older versions we can't import it from pip.download
# (nor do we need it at all) so we only import it in this except block
from pip.download import PipSession
return list(parse_requirements(
path, options=EmptyOptions(), session=PipSession(), finder=finder))
def downloaded_reqs_from_path(path, argv):
"""Return a list of DownloadedReqs representing the requirements parsed
out of a given requirements file.
:arg path: The path to the requirements file
:arg argv: The commandline args, starting after the subcommand
"""
finder = package_finder(argv)
return [DownloadedReq(req, argv, finder) for req in
_parse_requirements(path, finder)]
def peep_install(argv):
"""Perform the ``peep install`` subcommand, returning a shell status code
or raising a PipException.
:arg argv: The commandline args, starting after the subcommand
"""
output = []
out = output.append
reqs = []
try:
req_paths = list(requirement_args(argv, want_paths=True))
if not req_paths:
out("You have to specify one or more requirements files with the -r option, because\n"
"otherwise there's nowhere for peep to look up the hashes.\n")
return COMMAND_LINE_ERROR
# We're a "peep install" command, and we have some requirement paths.
reqs = list(chain.from_iterable(
downloaded_reqs_from_path(path, argv)
for path in req_paths))
buckets = bucket(reqs, lambda r: r.__class__)
# Skip a line after pip's "Cleaning up..." so the important stuff
# stands out:
if any(buckets[b] for b in ERROR_CLASSES):
out('\n')
printers = (lambda r: out(r.head()),
lambda r: out(r.error() + '\n'),
lambda r: out(r.foot()))
for c in ERROR_CLASSES:
first_every_last(buckets[c], *printers)
if any(buckets[b] for b in ERROR_CLASSES):
out('-------------------------------\n'
'Not proceeding to installation.\n')
return SOMETHING_WENT_WRONG
else:
for req in buckets[InstallableReq]:
req.install()
first_every_last(buckets[SatisfiedReq], *printers)
return ITS_FINE_ITS_FINE
except (UnsupportedRequirementError, InstallationError, DownloadError) as exc:
out(str(exc))
return SOMETHING_WENT_WRONG
finally:
for req in reqs:
req.dispose()
print(''.join(output))
def peep_port(paths):
"""Convert a peep requirements file to one compatble with pip-8 hashing.
Loses comments and tromps on URLs, so the result will need a little manual
massaging, but the hard part--the hash conversion--is done for you.
"""
if not paths:
print('Please specify one or more requirements files so I have '
'something to port.\n')
return COMMAND_LINE_ERROR
comes_from = None
for req in chain.from_iterable(
_parse_requirements(path, package_finder(argv)) for path in paths):
req_path, req_line = path_and_line(req)
hashes = [hexlify(urlsafe_b64decode((hash + '=').encode('ascii'))).decode('ascii')
for hash in hashes_above(req_path, req_line)]
if req_path != comes_from:
print()
print('# from %s' % req_path)
print()
comes_from = req_path
if not hashes:
print(req.req)
else:
print('%s' % (req.link if getattr(req, 'link', None) else req.req), end='')
for hash in hashes:
print(' \\')
print(' --hash=sha256:%s' % hash, end='')
print()
def main():
"""Be the top-level entrypoint. Return a shell status code."""
commands = {'hash': peep_hash,
'install': peep_install,
'port': peep_port}
try:
if len(argv) >= 2 and argv[1] in commands:
return commands[argv[1]](argv[2:])
else:
# Fall through to top-level pip main() for everything else:
return pip.main()
except PipException as exc:
return exc.error_code
def exception_handler(exc_type, exc_value, exc_tb):
print('Oh no! Peep had a problem while trying to do stuff. Please write up a bug report')
print('with the specifics so we can fix it:')
print()
print('https://github.com/erikrose/peep/issues/new')
print()
print('Here are some particulars you can copy and paste into the bug report:')
print()
print('---')
print('peep:', repr(__version__))
print('python:', repr(sys.version))
print('pip:', repr(getattr(pip, '__version__', 'no __version__ attr')))
print('Command line: ', repr(sys.argv))
print(
''.join(traceback.format_exception(exc_type, exc_value, exc_tb)))
print('---')
if __name__ == '__main__':
try:
exit(main())
except Exception:
exception_handler(*sys.exc_info())
exit(UNHANDLED_EXCEPTION)
|
erikrose/peep | peep.py | path_and_line | python | def path_and_line(req):
path, line = (re.match(r'-r (.*) \(line (\d+)\)$',
req.comes_from).groups())
return path, int(line) | Return the path and line number of the file from which an
InstallRequirement came. | train | https://github.com/erikrose/peep/blob/c16f08c7f61e2f2afecb7cd1c93752bdd96c4968/peep.py#L164-L171 | null | #!/usr/bin/env python
"""peep ("prudently examine every package") verifies that packages conform to a
trusted, locally stored hash and only then installs them::
peep install -r requirements.txt
This makes your deployments verifiably repeatable without having to maintain a
local PyPI mirror or use a vendor lib. Just update the version numbers and
hashes in requirements.txt, and you're all set.
"""
# This is here so embedded copies of peep.py are MIT-compliant:
# Copyright (c) 2013 Erik Rose
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
from __future__ import print_function
try:
xrange = xrange
except NameError:
xrange = range
from base64 import urlsafe_b64encode, urlsafe_b64decode
from binascii import hexlify
import cgi
from collections import defaultdict
from functools import wraps
from hashlib import sha256
from itertools import chain, islice
import mimetypes
from optparse import OptionParser
from os.path import join, basename, splitext, isdir
from pickle import dumps, loads
import re
import sys
from shutil import rmtree, copy
from sys import argv, exit
from tempfile import mkdtemp
import traceback
try:
from urllib2 import build_opener, HTTPHandler, HTTPSHandler, HTTPError
except ImportError:
from urllib.request import build_opener, HTTPHandler, HTTPSHandler
from urllib.error import HTTPError
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse # 3.4
# TODO: Probably use six to make urllib stuff work across 2/3.
from pkg_resources import require, VersionConflict, DistributionNotFound, safe_name
# We don't admit our dependency on pip in setup.py, lest a naive user simply
# say `pip install peep.tar.gz` and thus pull down an untrusted copy of pip
# from PyPI. Instead, we make sure it's installed and new enough here and spit
# out an error message if not:
def activate(specifier):
"""Make a compatible version of pip importable. Raise a RuntimeError if we
couldn't."""
try:
for distro in require(specifier):
distro.activate()
except (VersionConflict, DistributionNotFound):
raise RuntimeError('The installed version of pip is too old; peep '
'requires ' + specifier)
# Before 0.6.2, the log module wasn't there, so some
# of our monkeypatching fails. It probably wouldn't be
# much work to support even earlier, though.
activate('pip>=0.6.2')
import pip
from pip.commands.install import InstallCommand
try:
from pip.download import url_to_path # 1.5.6
except ImportError:
try:
from pip.util import url_to_path # 0.7.0
except ImportError:
from pip.util import url_to_filename as url_to_path # 0.6.2
from pip.exceptions import InstallationError
from pip.index import PackageFinder, Link
try:
from pip.log import logger
except ImportError:
from pip import logger # 6.0
from pip.req import parse_requirements
try:
from pip.utils.ui import DownloadProgressBar, DownloadProgressSpinner
except ImportError:
class NullProgressBar(object):
def __init__(self, *args, **kwargs):
pass
def iter(self, ret, *args, **kwargs):
return ret
DownloadProgressBar = DownloadProgressSpinner = NullProgressBar
__version__ = 3, 1, 2
try:
from pip.index import FormatControl # noqa
FORMAT_CONTROL_ARG = 'format_control'
# The line-numbering bug will be fixed in pip 8. All 7.x releases had it.
PIP_MAJOR_VERSION = int(pip.__version__.split('.')[0])
PIP_COUNTS_COMMENTS = PIP_MAJOR_VERSION >= 8
except ImportError:
FORMAT_CONTROL_ARG = 'use_wheel' # pre-7
PIP_COUNTS_COMMENTS = True
ITS_FINE_ITS_FINE = 0
SOMETHING_WENT_WRONG = 1
# "Traditional" for command-line errors according to optparse docs:
COMMAND_LINE_ERROR = 2
UNHANDLED_EXCEPTION = 3
ARCHIVE_EXTENSIONS = ('.tar.bz2', '.tar.gz', '.tgz', '.tar', '.zip')
MARKER = object()
class PipException(Exception):
"""When I delegated to pip, it exited with an error."""
def __init__(self, error_code):
self.error_code = error_code
class UnsupportedRequirementError(Exception):
"""An unsupported line was encountered in a requirements file."""
class DownloadError(Exception):
def __init__(self, link, exc):
self.link = link
self.reason = str(exc)
def __str__(self):
return 'Downloading %s failed: %s' % (self.link, self.reason)
def encoded_hash(sha):
"""Return a short, 7-bit-safe representation of a hash.
If you pass a sha256, this results in the hash algorithm that the Wheel
format (PEP 427) uses, except here it's intended to be run across the
downloaded archive before unpacking.
"""
return urlsafe_b64encode(sha.digest()).decode('ascii').rstrip('=')
def hashes_above(path, line_number):
"""Yield hashes from contiguous comment lines before line ``line_number``.
"""
def hash_lists(path):
"""Yield lists of hashes appearing between non-comment lines.
The lists will be in order of appearance and, for each non-empty
list, their place in the results will coincide with that of the
line number of the corresponding result from `parse_requirements`
(which changed in pip 7.0 to not count comments).
"""
hashes = []
with open(path) as file:
for lineno, line in enumerate(file, 1):
match = HASH_COMMENT_RE.match(line)
if match: # Accumulate this hash.
hashes.append(match.groupdict()['hash'])
if not IGNORED_LINE_RE.match(line):
yield hashes # Report hashes seen so far.
hashes = []
elif PIP_COUNTS_COMMENTS:
# Comment: count as normal req but have no hashes.
yield []
return next(islice(hash_lists(path), line_number - 1, None))
def run_pip(initial_args):
"""Delegate to pip the given args (starting with the subcommand), and raise
``PipException`` if something goes wrong."""
status_code = pip.main(initial_args)
# Clear out the registrations in the pip "logger" singleton. Otherwise,
# loggers keep getting appended to it with every run. Pip assumes only one
# command invocation will happen per interpreter lifetime.
logger.consumers = []
if status_code:
raise PipException(status_code)
def hash_of_file(path):
"""Return the hash of a downloaded file."""
with open(path, 'rb') as archive:
sha = sha256()
while True:
data = archive.read(2 ** 20)
if not data:
break
sha.update(data)
return encoded_hash(sha)
def is_git_sha(text):
"""Return whether this is probably a git sha"""
# Handle both the full sha as well as the 7-character abbreviation
if len(text) in (40, 7):
try:
int(text, 16)
return True
except ValueError:
pass
return False
def filename_from_url(url):
parsed = urlparse(url)
path = parsed.path
return path.split('/')[-1]
def requirement_args(argv, want_paths=False, want_other=False):
"""Return an iterable of filtered arguments.
:arg argv: Arguments, starting after the subcommand
:arg want_paths: If True, the returned iterable includes the paths to any
requirements files following a ``-r`` or ``--requirement`` option.
:arg want_other: If True, the returned iterable includes the args that are
not a requirement-file path or a ``-r`` or ``--requirement`` flag.
"""
was_r = False
for arg in argv:
# Allow for requirements files named "-r", don't freak out if there's a
# trailing "-r", etc.
if was_r:
if want_paths:
yield arg
was_r = False
elif arg in ['-r', '--requirement']:
was_r = True
else:
if want_other:
yield arg
# any line that is a comment or just whitespace
IGNORED_LINE_RE = re.compile(r'^(\s*#.*)?\s*$')
HASH_COMMENT_RE = re.compile(
r"""
\s*\#\s+ # Lines that start with a '#'
(?P<hash_type>sha256):\s+ # Hash type is hardcoded to be sha256 for now.
(?P<hash>[^\s]+) # Hashes can be anything except '#' or spaces.
\s* # Suck up whitespace before the comment or
# just trailing whitespace if there is no
# comment. Also strip trailing newlines.
(?:\#(?P<comment>.*))? # Comments can be anything after a whitespace+#
# and are optional.
$""", re.X)
def peep_hash(argv):
"""Return the peep hash of one or more files, returning a shell status code
or raising a PipException.
:arg argv: The commandline args, starting after the subcommand
"""
parser = OptionParser(
usage='usage: %prog hash file [file ...]',
description='Print a peep hash line for one or more files: for '
'example, "# sha256: '
'oz42dZy6Gowxw8AelDtO4gRgTW_xPdooH484k7I5EOY".')
_, paths = parser.parse_args(args=argv)
if paths:
for path in paths:
print('# sha256:', hash_of_file(path))
return ITS_FINE_ITS_FINE
else:
parser.print_usage()
return COMMAND_LINE_ERROR
class EmptyOptions(object):
"""Fake optparse options for compatibility with pip<1.2
pip<1.2 had a bug in parse_requirements() in which the ``options`` kwarg
was required. We work around that by passing it a mock object.
"""
default_vcs = None
skip_requirements_regex = None
isolated_mode = False
def memoize(func):
"""Memoize a method that should return the same result every time on a
given instance.
"""
@wraps(func)
def memoizer(self):
if not hasattr(self, '_cache'):
self._cache = {}
if func.__name__ not in self._cache:
self._cache[func.__name__] = func(self)
return self._cache[func.__name__]
return memoizer
def package_finder(argv):
"""Return a PackageFinder respecting command-line options.
:arg argv: Everything after the subcommand
"""
# We instantiate an InstallCommand and then use some of its private
# machinery--its arg parser--for our own purposes, like a virus. This
# approach is portable across many pip versions, where more fine-grained
# ones are not. Ignoring options that don't exist on the parser (for
# instance, --use-wheel) gives us a straightforward method of backward
# compatibility.
try:
command = InstallCommand()
except TypeError:
# This is likely pip 1.3.0's "__init__() takes exactly 2 arguments (1
# given)" error. In that version, InstallCommand takes a top=level
# parser passed in from outside.
from pip.baseparser import create_main_parser
command = InstallCommand(create_main_parser())
# The downside is that it essentially ruins the InstallCommand class for
# further use. Calling out to pip.main() within the same interpreter, for
# example, would result in arguments parsed this time turning up there.
# Thus, we deepcopy the arg parser so we don't trash its singletons. Of
# course, deepcopy doesn't work on these objects, because they contain
# uncopyable regex patterns, so we pickle and unpickle instead. Fun!
options, _ = loads(dumps(command.parser)).parse_args(argv)
# Carry over PackageFinder kwargs that have [about] the same names as
# options attr names:
possible_options = [
'find_links',
FORMAT_CONTROL_ARG,
('allow_all_prereleases', 'pre'),
'process_dependency_links'
]
kwargs = {}
for option in possible_options:
kw, attr = option if isinstance(option, tuple) else (option, option)
value = getattr(options, attr, MARKER)
if value is not MARKER:
kwargs[kw] = value
# Figure out index_urls:
index_urls = [options.index_url] + options.extra_index_urls
if options.no_index:
index_urls = []
index_urls += getattr(options, 'mirrors', [])
# If pip is new enough to have a PipSession, initialize one, since
# PackageFinder requires it:
if hasattr(command, '_build_session'):
kwargs['session'] = command._build_session(options)
return PackageFinder(index_urls=index_urls, **kwargs)
class DownloadedReq(object):
"""A wrapper around InstallRequirement which offers additional information
based on downloading and examining a corresponding package archive
These are conceptually immutable, so we can get away with memoizing
expensive things.
"""
def __init__(self, req, argv, finder):
"""Download a requirement, compare its hashes, and return a subclass
of DownloadedReq depending on its state.
:arg req: The InstallRequirement I am based on
:arg argv: The args, starting after the subcommand
"""
self._req = req
self._argv = argv
self._finder = finder
# We use a separate temp dir for each requirement so requirements
# (from different indices) that happen to have the same archive names
# don't overwrite each other, leading to a security hole in which the
# latter is a hash mismatch, the former has already passed the
# comparison, and the latter gets installed.
self._temp_path = mkdtemp(prefix='peep-')
# Think of DownloadedReq as a one-shot state machine. It's an abstract
# class that ratchets forward to being one of its own subclasses,
# depending on its package status. Then it doesn't move again.
self.__class__ = self._class()
def dispose(self):
"""Delete temp files and dirs I've made. Render myself useless.
Do not call further methods on me after calling dispose().
"""
rmtree(self._temp_path)
def _version(self):
"""Deduce the version number of the downloaded package from its filename."""
# TODO: Can we delete this method and just print the line from the
# reqs file verbatim instead?
def version_of_archive(filename, package_name):
# Since we know the project_name, we can strip that off the left, strip
# any archive extensions off the right, and take the rest as the
# version.
for ext in ARCHIVE_EXTENSIONS:
if filename.endswith(ext):
filename = filename[:-len(ext)]
break
# Handle github sha tarball downloads.
if is_git_sha(filename):
filename = package_name + '-' + filename
if not filename.lower().replace('_', '-').startswith(package_name.lower()):
# TODO: Should we replace runs of [^a-zA-Z0-9.], not just _, with -?
give_up(filename, package_name)
return filename[len(package_name) + 1:] # Strip off '-' before version.
def version_of_wheel(filename, package_name):
# For Wheel files (http://legacy.python.org/dev/peps/pep-0427/#file-
# name-convention) we know the format bits are '-' separated.
whl_package_name, version, _rest = filename.split('-', 2)
# Do the alteration to package_name from PEP 427:
our_package_name = re.sub(r'[^\w\d.]+', '_', package_name, re.UNICODE)
if whl_package_name != our_package_name:
give_up(filename, whl_package_name)
return version
def give_up(filename, package_name):
raise RuntimeError("The archive '%s' didn't start with the package name "
"'%s', so I couldn't figure out the version number. "
"My bad; improve me." %
(filename, package_name))
get_version = (version_of_wheel
if self._downloaded_filename().endswith('.whl')
else version_of_archive)
return get_version(self._downloaded_filename(), self._project_name())
def _is_always_unsatisfied(self):
"""Returns whether this requirement is always unsatisfied
This would happen in cases where we can't determine the version
from the filename.
"""
# If this is a github sha tarball, then it is always unsatisfied
# because the url has a commit sha in it and not the version
# number.
url = self._url()
if url:
filename = filename_from_url(url)
if filename.endswith(ARCHIVE_EXTENSIONS):
filename, ext = splitext(filename)
if is_git_sha(filename):
return True
return False
@memoize # Avoid hitting the file[cache] over and over.
def _expected_hashes(self):
"""Return a list of known-good hashes for this package."""
return hashes_above(*path_and_line(self._req))
def _download(self, link):
"""Download a file, and return its name within my temp dir.
This does no verification of HTTPS certs, but our checking hashes
makes that largely unimportant. It would be nice to be able to use the
requests lib, which can verify certs, but it is guaranteed to be
available only in pip >= 1.5.
This also drops support for proxies and basic auth, though those could
be added back in.
"""
# Based on pip 1.4.1's URLOpener but with cert verification removed
def opener(is_https):
if is_https:
opener = build_opener(HTTPSHandler())
# Strip out HTTPHandler to prevent MITM spoof:
for handler in opener.handlers:
if isinstance(handler, HTTPHandler):
opener.handlers.remove(handler)
else:
opener = build_opener()
return opener
# Descended from unpack_http_url() in pip 1.4.1
def best_filename(link, response):
"""Return the most informative possible filename for a download,
ideally with a proper extension.
"""
content_type = response.info().get('content-type', '')
filename = link.filename # fallback
# Have a look at the Content-Disposition header for a better guess:
content_disposition = response.info().get('content-disposition')
if content_disposition:
type, params = cgi.parse_header(content_disposition)
# We use ``or`` here because we don't want to use an "empty" value
# from the filename param:
filename = params.get('filename') or filename
ext = splitext(filename)[1]
if not ext:
ext = mimetypes.guess_extension(content_type)
if ext:
filename += ext
if not ext and link.url != response.geturl():
ext = splitext(response.geturl())[1]
if ext:
filename += ext
return filename
# Descended from _download_url() in pip 1.4.1
def pipe_to_file(response, path, size=0):
"""Pull the data off an HTTP response, shove it in a new file, and
show progress.
:arg response: A file-like object to read from
:arg path: The path of the new file
:arg size: The expected size, in bytes, of the download. 0 for
unknown or to suppress progress indication (as for cached
downloads)
"""
def response_chunks(chunk_size):
while True:
chunk = response.read(chunk_size)
if not chunk:
break
yield chunk
print('Downloading %s%s...' % (
self._req.req,
(' (%sK)' % (size / 1000)) if size > 1000 else ''))
progress_indicator = (DownloadProgressBar(max=size).iter if size
else DownloadProgressSpinner().iter)
with open(path, 'wb') as file:
for chunk in progress_indicator(response_chunks(4096), 4096):
file.write(chunk)
url = link.url.split('#', 1)[0]
try:
response = opener(urlparse(url).scheme != 'http').open(url)
except (HTTPError, IOError) as exc:
raise DownloadError(link, exc)
filename = best_filename(link, response)
try:
size = int(response.headers['content-length'])
except (ValueError, KeyError, TypeError):
size = 0
pipe_to_file(response, join(self._temp_path, filename), size=size)
return filename
# Based on req_set.prepare_files() in pip bb2a8428d4aebc8d313d05d590f386fa3f0bbd0f
@memoize # Avoid re-downloading.
def _downloaded_filename(self):
"""Download the package's archive if necessary, and return its
filename.
--no-deps is implied, as we have reimplemented the bits that would
ordinarily do dependency resolution.
"""
# Peep doesn't support requirements that don't come down as a single
# file, because it can't hash them. Thus, it doesn't support editable
# requirements, because pip itself doesn't support editable
# requirements except for "local projects or a VCS url". Nor does it
# support VCS requirements yet, because we haven't yet come up with a
# portable, deterministic way to hash them. In summary, all we support
# is == requirements and tarballs/zips/etc.
# TODO: Stop on reqs that are editable or aren't ==.
# If the requirement isn't already specified as a URL, get a URL
# from an index:
link = self._link() or self._finder.find_requirement(self._req, upgrade=False)
if link:
lower_scheme = link.scheme.lower() # pip lower()s it for some reason.
if lower_scheme == 'http' or lower_scheme == 'https':
file_path = self._download(link)
return basename(file_path)
elif lower_scheme == 'file':
# The following is inspired by pip's unpack_file_url():
link_path = url_to_path(link.url_without_fragment)
if isdir(link_path):
raise UnsupportedRequirementError(
"%s: %s is a directory. So that it can compute "
"a hash, peep supports only filesystem paths which "
"point to files" %
(self._req, link.url_without_fragment))
else:
copy(link_path, self._temp_path)
return basename(link_path)
else:
raise UnsupportedRequirementError(
"%s: The download link, %s, would not result in a file "
"that can be hashed. Peep supports only == requirements, "
"file:// URLs pointing to files (not folders), and "
"http:// and https:// URLs pointing to tarballs, zips, "
"etc." % (self._req, link.url))
else:
raise UnsupportedRequirementError(
"%s: couldn't determine where to download this requirement from."
% (self._req,))
def install(self):
"""Install the package I represent, without dependencies.
Obey typical pip-install options passed in on the command line.
"""
other_args = list(requirement_args(self._argv, want_other=True))
archive_path = join(self._temp_path, self._downloaded_filename())
# -U so it installs whether pip deems the requirement "satisfied" or
# not. This is necessary for GitHub-sourced zips, which change without
# their version numbers changing.
run_pip(['install'] + other_args + ['--no-deps', '-U', archive_path])
@memoize
def _actual_hash(self):
"""Download the package's archive if necessary, and return its hash."""
return hash_of_file(join(self._temp_path, self._downloaded_filename()))
def _project_name(self):
"""Return the inner Requirement's "unsafe name".
Raise ValueError if there is no name.
"""
name = getattr(self._req.req, 'project_name', '')
if name:
return name
name = getattr(self._req.req, 'name', '')
if name:
return safe_name(name)
raise ValueError('Requirement has no project_name.')
def _name(self):
return self._req.name
def _link(self):
try:
return self._req.link
except AttributeError:
# The link attribute isn't available prior to pip 6.1.0, so fall
# back to the now deprecated 'url' attribute.
return Link(self._req.url) if self._req.url else None
def _url(self):
link = self._link()
return link.url if link else None
@memoize # Avoid re-running expensive check_if_exists().
def _is_satisfied(self):
self._req.check_if_exists()
return (self._req.satisfied_by and
not self._is_always_unsatisfied())
def _class(self):
"""Return the class I should be, spanning a continuum of goodness."""
try:
self._project_name()
except ValueError:
return MalformedReq
if self._is_satisfied():
return SatisfiedReq
if not self._expected_hashes():
return MissingReq
if self._actual_hash() not in self._expected_hashes():
return MismatchedReq
return InstallableReq
@classmethod
def foot(cls):
"""Return the text to be printed once, after all of the errors from
classes of my type are printed.
"""
return ''
class MalformedReq(DownloadedReq):
"""A requirement whose package name could not be determined"""
@classmethod
def head(cls):
return 'The following requirements could not be processed:\n'
def error(self):
return '* Unable to determine package name from URL %s; add #egg=' % self._url()
class MissingReq(DownloadedReq):
"""A requirement for which no hashes were specified in the requirements file"""
@classmethod
def head(cls):
return ('The following packages had no hashes specified in the requirements file, which\n'
'leaves them open to tampering. Vet these packages to your satisfaction, then\n'
'add these "sha256" lines like so:\n\n')
def error(self):
if self._url():
# _url() always contains an #egg= part, or this would be a
# MalformedRequest.
line = self._url()
else:
line = '%s==%s' % (self._name(), self._version())
return '# sha256: %s\n%s\n' % (self._actual_hash(), line)
class MismatchedReq(DownloadedReq):
"""A requirement for which the downloaded file didn't match any of my hashes."""
@classmethod
def head(cls):
return ("THE FOLLOWING PACKAGES DIDN'T MATCH THE HASHES SPECIFIED IN THE REQUIREMENTS\n"
"FILE. If you have updated the package versions, update the hashes. If not,\n"
"freak out, because someone has tampered with the packages.\n\n")
def error(self):
preamble = ' %s: expected' % self._project_name()
if len(self._expected_hashes()) > 1:
preamble += ' one of'
padding = '\n' + ' ' * (len(preamble) + 1)
return '%s %s\n%s got %s' % (preamble,
padding.join(self._expected_hashes()),
' ' * (len(preamble) - 4),
self._actual_hash())
@classmethod
def foot(cls):
return '\n'
class SatisfiedReq(DownloadedReq):
"""A requirement which turned out to be already installed"""
@classmethod
def head(cls):
return ("These packages were already installed, so we didn't need to download or build\n"
"them again. If you installed them with peep in the first place, you should be\n"
"safe. If not, uninstall them, then re-attempt your install with peep.\n")
def error(self):
return ' %s' % (self._req,)
class InstallableReq(DownloadedReq):
"""A requirement whose hash matched and can be safely installed"""
# DownloadedReq subclasses that indicate an error that should keep us from
# going forward with installation, in the order in which their errors should
# be reported:
ERROR_CLASSES = [MismatchedReq, MissingReq, MalformedReq]
def bucket(things, key):
"""Return a map of key -> list of things."""
ret = defaultdict(list)
for thing in things:
ret[key(thing)].append(thing)
return ret
def first_every_last(iterable, first, every, last):
"""Execute something before the first item of iter, something else for each
item, and a third thing after the last.
If there are no items in the iterable, don't execute anything.
"""
did_first = False
for item in iterable:
if not did_first:
did_first = True
first(item)
every(item)
if did_first:
last(item)
def _parse_requirements(path, finder):
try:
# list() so the generator that is parse_requirements() actually runs
# far enough to report a TypeError
return list(parse_requirements(
path, options=EmptyOptions(), finder=finder))
except TypeError:
# session is a required kwarg as of pip 6.0 and will raise
# a TypeError if missing. It needs to be a PipSession instance,
# but in older versions we can't import it from pip.download
# (nor do we need it at all) so we only import it in this except block
from pip.download import PipSession
return list(parse_requirements(
path, options=EmptyOptions(), session=PipSession(), finder=finder))
def downloaded_reqs_from_path(path, argv):
"""Return a list of DownloadedReqs representing the requirements parsed
out of a given requirements file.
:arg path: The path to the requirements file
:arg argv: The commandline args, starting after the subcommand
"""
finder = package_finder(argv)
return [DownloadedReq(req, argv, finder) for req in
_parse_requirements(path, finder)]
def peep_install(argv):
"""Perform the ``peep install`` subcommand, returning a shell status code
or raising a PipException.
:arg argv: The commandline args, starting after the subcommand
"""
output = []
out = output.append
reqs = []
try:
req_paths = list(requirement_args(argv, want_paths=True))
if not req_paths:
out("You have to specify one or more requirements files with the -r option, because\n"
"otherwise there's nowhere for peep to look up the hashes.\n")
return COMMAND_LINE_ERROR
# We're a "peep install" command, and we have some requirement paths.
reqs = list(chain.from_iterable(
downloaded_reqs_from_path(path, argv)
for path in req_paths))
buckets = bucket(reqs, lambda r: r.__class__)
# Skip a line after pip's "Cleaning up..." so the important stuff
# stands out:
if any(buckets[b] for b in ERROR_CLASSES):
out('\n')
printers = (lambda r: out(r.head()),
lambda r: out(r.error() + '\n'),
lambda r: out(r.foot()))
for c in ERROR_CLASSES:
first_every_last(buckets[c], *printers)
if any(buckets[b] for b in ERROR_CLASSES):
out('-------------------------------\n'
'Not proceeding to installation.\n')
return SOMETHING_WENT_WRONG
else:
for req in buckets[InstallableReq]:
req.install()
first_every_last(buckets[SatisfiedReq], *printers)
return ITS_FINE_ITS_FINE
except (UnsupportedRequirementError, InstallationError, DownloadError) as exc:
out(str(exc))
return SOMETHING_WENT_WRONG
finally:
for req in reqs:
req.dispose()
print(''.join(output))
def peep_port(paths):
"""Convert a peep requirements file to one compatble with pip-8 hashing.
Loses comments and tromps on URLs, so the result will need a little manual
massaging, but the hard part--the hash conversion--is done for you.
"""
if not paths:
print('Please specify one or more requirements files so I have '
'something to port.\n')
return COMMAND_LINE_ERROR
comes_from = None
for req in chain.from_iterable(
_parse_requirements(path, package_finder(argv)) for path in paths):
req_path, req_line = path_and_line(req)
hashes = [hexlify(urlsafe_b64decode((hash + '=').encode('ascii'))).decode('ascii')
for hash in hashes_above(req_path, req_line)]
if req_path != comes_from:
print()
print('# from %s' % req_path)
print()
comes_from = req_path
if not hashes:
print(req.req)
else:
print('%s' % (req.link if getattr(req, 'link', None) else req.req), end='')
for hash in hashes:
print(' \\')
print(' --hash=sha256:%s' % hash, end='')
print()
def main():
"""Be the top-level entrypoint. Return a shell status code."""
commands = {'hash': peep_hash,
'install': peep_install,
'port': peep_port}
try:
if len(argv) >= 2 and argv[1] in commands:
return commands[argv[1]](argv[2:])
else:
# Fall through to top-level pip main() for everything else:
return pip.main()
except PipException as exc:
return exc.error_code
def exception_handler(exc_type, exc_value, exc_tb):
print('Oh no! Peep had a problem while trying to do stuff. Please write up a bug report')
print('with the specifics so we can fix it:')
print()
print('https://github.com/erikrose/peep/issues/new')
print()
print('Here are some particulars you can copy and paste into the bug report:')
print()
print('---')
print('peep:', repr(__version__))
print('python:', repr(sys.version))
print('pip:', repr(getattr(pip, '__version__', 'no __version__ attr')))
print('Command line: ', repr(sys.argv))
print(
''.join(traceback.format_exception(exc_type, exc_value, exc_tb)))
print('---')
if __name__ == '__main__':
try:
exit(main())
except Exception:
exception_handler(*sys.exc_info())
exit(UNHANDLED_EXCEPTION)
|
erikrose/peep | peep.py | hashes_above | python | def hashes_above(path, line_number):
def hash_lists(path):
"""Yield lists of hashes appearing between non-comment lines.
The lists will be in order of appearance and, for each non-empty
list, their place in the results will coincide with that of the
line number of the corresponding result from `parse_requirements`
(which changed in pip 7.0 to not count comments).
"""
hashes = []
with open(path) as file:
for lineno, line in enumerate(file, 1):
match = HASH_COMMENT_RE.match(line)
if match: # Accumulate this hash.
hashes.append(match.groupdict()['hash'])
if not IGNORED_LINE_RE.match(line):
yield hashes # Report hashes seen so far.
hashes = []
elif PIP_COUNTS_COMMENTS:
# Comment: count as normal req but have no hashes.
yield []
return next(islice(hash_lists(path), line_number - 1, None)) | Yield hashes from contiguous comment lines before line ``line_number``. | train | https://github.com/erikrose/peep/blob/c16f08c7f61e2f2afecb7cd1c93752bdd96c4968/peep.py#L174-L200 | [
"def hash_lists(path):\n \"\"\"Yield lists of hashes appearing between non-comment lines.\n\n The lists will be in order of appearance and, for each non-empty\n list, their place in the results will coincide with that of the\n line number of the corresponding result from `parse_requirements`\n (which changed in pip 7.0 to not count comments).\n\n \"\"\"\n hashes = []\n with open(path) as file:\n for lineno, line in enumerate(file, 1):\n match = HASH_COMMENT_RE.match(line)\n if match: # Accumulate this hash.\n hashes.append(match.groupdict()['hash'])\n if not IGNORED_LINE_RE.match(line):\n yield hashes # Report hashes seen so far.\n hashes = []\n elif PIP_COUNTS_COMMENTS:\n # Comment: count as normal req but have no hashes.\n yield []\n"
] | #!/usr/bin/env python
"""peep ("prudently examine every package") verifies that packages conform to a
trusted, locally stored hash and only then installs them::
peep install -r requirements.txt
This makes your deployments verifiably repeatable without having to maintain a
local PyPI mirror or use a vendor lib. Just update the version numbers and
hashes in requirements.txt, and you're all set.
"""
# This is here so embedded copies of peep.py are MIT-compliant:
# Copyright (c) 2013 Erik Rose
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
from __future__ import print_function
try:
xrange = xrange
except NameError:
xrange = range
from base64 import urlsafe_b64encode, urlsafe_b64decode
from binascii import hexlify
import cgi
from collections import defaultdict
from functools import wraps
from hashlib import sha256
from itertools import chain, islice
import mimetypes
from optparse import OptionParser
from os.path import join, basename, splitext, isdir
from pickle import dumps, loads
import re
import sys
from shutil import rmtree, copy
from sys import argv, exit
from tempfile import mkdtemp
import traceback
try:
from urllib2 import build_opener, HTTPHandler, HTTPSHandler, HTTPError
except ImportError:
from urllib.request import build_opener, HTTPHandler, HTTPSHandler
from urllib.error import HTTPError
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse # 3.4
# TODO: Probably use six to make urllib stuff work across 2/3.
from pkg_resources import require, VersionConflict, DistributionNotFound, safe_name
# We don't admit our dependency on pip in setup.py, lest a naive user simply
# say `pip install peep.tar.gz` and thus pull down an untrusted copy of pip
# from PyPI. Instead, we make sure it's installed and new enough here and spit
# out an error message if not:
def activate(specifier):
"""Make a compatible version of pip importable. Raise a RuntimeError if we
couldn't."""
try:
for distro in require(specifier):
distro.activate()
except (VersionConflict, DistributionNotFound):
raise RuntimeError('The installed version of pip is too old; peep '
'requires ' + specifier)
# Before 0.6.2, the log module wasn't there, so some
# of our monkeypatching fails. It probably wouldn't be
# much work to support even earlier, though.
activate('pip>=0.6.2')
import pip
from pip.commands.install import InstallCommand
try:
from pip.download import url_to_path # 1.5.6
except ImportError:
try:
from pip.util import url_to_path # 0.7.0
except ImportError:
from pip.util import url_to_filename as url_to_path # 0.6.2
from pip.exceptions import InstallationError
from pip.index import PackageFinder, Link
try:
from pip.log import logger
except ImportError:
from pip import logger # 6.0
from pip.req import parse_requirements
try:
from pip.utils.ui import DownloadProgressBar, DownloadProgressSpinner
except ImportError:
class NullProgressBar(object):
def __init__(self, *args, **kwargs):
pass
def iter(self, ret, *args, **kwargs):
return ret
DownloadProgressBar = DownloadProgressSpinner = NullProgressBar
__version__ = 3, 1, 2
try:
from pip.index import FormatControl # noqa
FORMAT_CONTROL_ARG = 'format_control'
# The line-numbering bug will be fixed in pip 8. All 7.x releases had it.
PIP_MAJOR_VERSION = int(pip.__version__.split('.')[0])
PIP_COUNTS_COMMENTS = PIP_MAJOR_VERSION >= 8
except ImportError:
FORMAT_CONTROL_ARG = 'use_wheel' # pre-7
PIP_COUNTS_COMMENTS = True
ITS_FINE_ITS_FINE = 0
SOMETHING_WENT_WRONG = 1
# "Traditional" for command-line errors according to optparse docs:
COMMAND_LINE_ERROR = 2
UNHANDLED_EXCEPTION = 3
ARCHIVE_EXTENSIONS = ('.tar.bz2', '.tar.gz', '.tgz', '.tar', '.zip')
MARKER = object()
class PipException(Exception):
"""When I delegated to pip, it exited with an error."""
def __init__(self, error_code):
self.error_code = error_code
class UnsupportedRequirementError(Exception):
"""An unsupported line was encountered in a requirements file."""
class DownloadError(Exception):
def __init__(self, link, exc):
self.link = link
self.reason = str(exc)
def __str__(self):
return 'Downloading %s failed: %s' % (self.link, self.reason)
def encoded_hash(sha):
"""Return a short, 7-bit-safe representation of a hash.
If you pass a sha256, this results in the hash algorithm that the Wheel
format (PEP 427) uses, except here it's intended to be run across the
downloaded archive before unpacking.
"""
return urlsafe_b64encode(sha.digest()).decode('ascii').rstrip('=')
def path_and_line(req):
"""Return the path and line number of the file from which an
InstallRequirement came.
"""
path, line = (re.match(r'-r (.*) \(line (\d+)\)$',
req.comes_from).groups())
return path, int(line)
def run_pip(initial_args):
"""Delegate to pip the given args (starting with the subcommand), and raise
``PipException`` if something goes wrong."""
status_code = pip.main(initial_args)
# Clear out the registrations in the pip "logger" singleton. Otherwise,
# loggers keep getting appended to it with every run. Pip assumes only one
# command invocation will happen per interpreter lifetime.
logger.consumers = []
if status_code:
raise PipException(status_code)
def hash_of_file(path):
"""Return the hash of a downloaded file."""
with open(path, 'rb') as archive:
sha = sha256()
while True:
data = archive.read(2 ** 20)
if not data:
break
sha.update(data)
return encoded_hash(sha)
def is_git_sha(text):
"""Return whether this is probably a git sha"""
# Handle both the full sha as well as the 7-character abbreviation
if len(text) in (40, 7):
try:
int(text, 16)
return True
except ValueError:
pass
return False
def filename_from_url(url):
parsed = urlparse(url)
path = parsed.path
return path.split('/')[-1]
def requirement_args(argv, want_paths=False, want_other=False):
"""Return an iterable of filtered arguments.
:arg argv: Arguments, starting after the subcommand
:arg want_paths: If True, the returned iterable includes the paths to any
requirements files following a ``-r`` or ``--requirement`` option.
:arg want_other: If True, the returned iterable includes the args that are
not a requirement-file path or a ``-r`` or ``--requirement`` flag.
"""
was_r = False
for arg in argv:
# Allow for requirements files named "-r", don't freak out if there's a
# trailing "-r", etc.
if was_r:
if want_paths:
yield arg
was_r = False
elif arg in ['-r', '--requirement']:
was_r = True
else:
if want_other:
yield arg
# any line that is a comment or just whitespace
IGNORED_LINE_RE = re.compile(r'^(\s*#.*)?\s*$')
HASH_COMMENT_RE = re.compile(
r"""
\s*\#\s+ # Lines that start with a '#'
(?P<hash_type>sha256):\s+ # Hash type is hardcoded to be sha256 for now.
(?P<hash>[^\s]+) # Hashes can be anything except '#' or spaces.
\s* # Suck up whitespace before the comment or
# just trailing whitespace if there is no
# comment. Also strip trailing newlines.
(?:\#(?P<comment>.*))? # Comments can be anything after a whitespace+#
# and are optional.
$""", re.X)
def peep_hash(argv):
"""Return the peep hash of one or more files, returning a shell status code
or raising a PipException.
:arg argv: The commandline args, starting after the subcommand
"""
parser = OptionParser(
usage='usage: %prog hash file [file ...]',
description='Print a peep hash line for one or more files: for '
'example, "# sha256: '
'oz42dZy6Gowxw8AelDtO4gRgTW_xPdooH484k7I5EOY".')
_, paths = parser.parse_args(args=argv)
if paths:
for path in paths:
print('# sha256:', hash_of_file(path))
return ITS_FINE_ITS_FINE
else:
parser.print_usage()
return COMMAND_LINE_ERROR
class EmptyOptions(object):
"""Fake optparse options for compatibility with pip<1.2
pip<1.2 had a bug in parse_requirements() in which the ``options`` kwarg
was required. We work around that by passing it a mock object.
"""
default_vcs = None
skip_requirements_regex = None
isolated_mode = False
def memoize(func):
"""Memoize a method that should return the same result every time on a
given instance.
"""
@wraps(func)
def memoizer(self):
if not hasattr(self, '_cache'):
self._cache = {}
if func.__name__ not in self._cache:
self._cache[func.__name__] = func(self)
return self._cache[func.__name__]
return memoizer
def package_finder(argv):
"""Return a PackageFinder respecting command-line options.
:arg argv: Everything after the subcommand
"""
# We instantiate an InstallCommand and then use some of its private
# machinery--its arg parser--for our own purposes, like a virus. This
# approach is portable across many pip versions, where more fine-grained
# ones are not. Ignoring options that don't exist on the parser (for
# instance, --use-wheel) gives us a straightforward method of backward
# compatibility.
try:
command = InstallCommand()
except TypeError:
# This is likely pip 1.3.0's "__init__() takes exactly 2 arguments (1
# given)" error. In that version, InstallCommand takes a top=level
# parser passed in from outside.
from pip.baseparser import create_main_parser
command = InstallCommand(create_main_parser())
# The downside is that it essentially ruins the InstallCommand class for
# further use. Calling out to pip.main() within the same interpreter, for
# example, would result in arguments parsed this time turning up there.
# Thus, we deepcopy the arg parser so we don't trash its singletons. Of
# course, deepcopy doesn't work on these objects, because they contain
# uncopyable regex patterns, so we pickle and unpickle instead. Fun!
options, _ = loads(dumps(command.parser)).parse_args(argv)
# Carry over PackageFinder kwargs that have [about] the same names as
# options attr names:
possible_options = [
'find_links',
FORMAT_CONTROL_ARG,
('allow_all_prereleases', 'pre'),
'process_dependency_links'
]
kwargs = {}
for option in possible_options:
kw, attr = option if isinstance(option, tuple) else (option, option)
value = getattr(options, attr, MARKER)
if value is not MARKER:
kwargs[kw] = value
# Figure out index_urls:
index_urls = [options.index_url] + options.extra_index_urls
if options.no_index:
index_urls = []
index_urls += getattr(options, 'mirrors', [])
# If pip is new enough to have a PipSession, initialize one, since
# PackageFinder requires it:
if hasattr(command, '_build_session'):
kwargs['session'] = command._build_session(options)
return PackageFinder(index_urls=index_urls, **kwargs)
class DownloadedReq(object):
"""A wrapper around InstallRequirement which offers additional information
based on downloading and examining a corresponding package archive
These are conceptually immutable, so we can get away with memoizing
expensive things.
"""
def __init__(self, req, argv, finder):
"""Download a requirement, compare its hashes, and return a subclass
of DownloadedReq depending on its state.
:arg req: The InstallRequirement I am based on
:arg argv: The args, starting after the subcommand
"""
self._req = req
self._argv = argv
self._finder = finder
# We use a separate temp dir for each requirement so requirements
# (from different indices) that happen to have the same archive names
# don't overwrite each other, leading to a security hole in which the
# latter is a hash mismatch, the former has already passed the
# comparison, and the latter gets installed.
self._temp_path = mkdtemp(prefix='peep-')
# Think of DownloadedReq as a one-shot state machine. It's an abstract
# class that ratchets forward to being one of its own subclasses,
# depending on its package status. Then it doesn't move again.
self.__class__ = self._class()
def dispose(self):
"""Delete temp files and dirs I've made. Render myself useless.
Do not call further methods on me after calling dispose().
"""
rmtree(self._temp_path)
def _version(self):
"""Deduce the version number of the downloaded package from its filename."""
# TODO: Can we delete this method and just print the line from the
# reqs file verbatim instead?
def version_of_archive(filename, package_name):
# Since we know the project_name, we can strip that off the left, strip
# any archive extensions off the right, and take the rest as the
# version.
for ext in ARCHIVE_EXTENSIONS:
if filename.endswith(ext):
filename = filename[:-len(ext)]
break
# Handle github sha tarball downloads.
if is_git_sha(filename):
filename = package_name + '-' + filename
if not filename.lower().replace('_', '-').startswith(package_name.lower()):
# TODO: Should we replace runs of [^a-zA-Z0-9.], not just _, with -?
give_up(filename, package_name)
return filename[len(package_name) + 1:] # Strip off '-' before version.
def version_of_wheel(filename, package_name):
# For Wheel files (http://legacy.python.org/dev/peps/pep-0427/#file-
# name-convention) we know the format bits are '-' separated.
whl_package_name, version, _rest = filename.split('-', 2)
# Do the alteration to package_name from PEP 427:
our_package_name = re.sub(r'[^\w\d.]+', '_', package_name, re.UNICODE)
if whl_package_name != our_package_name:
give_up(filename, whl_package_name)
return version
def give_up(filename, package_name):
raise RuntimeError("The archive '%s' didn't start with the package name "
"'%s', so I couldn't figure out the version number. "
"My bad; improve me." %
(filename, package_name))
get_version = (version_of_wheel
if self._downloaded_filename().endswith('.whl')
else version_of_archive)
return get_version(self._downloaded_filename(), self._project_name())
def _is_always_unsatisfied(self):
"""Returns whether this requirement is always unsatisfied
This would happen in cases where we can't determine the version
from the filename.
"""
# If this is a github sha tarball, then it is always unsatisfied
# because the url has a commit sha in it and not the version
# number.
url = self._url()
if url:
filename = filename_from_url(url)
if filename.endswith(ARCHIVE_EXTENSIONS):
filename, ext = splitext(filename)
if is_git_sha(filename):
return True
return False
@memoize # Avoid hitting the file[cache] over and over.
def _expected_hashes(self):
"""Return a list of known-good hashes for this package."""
return hashes_above(*path_and_line(self._req))
def _download(self, link):
"""Download a file, and return its name within my temp dir.
This does no verification of HTTPS certs, but our checking hashes
makes that largely unimportant. It would be nice to be able to use the
requests lib, which can verify certs, but it is guaranteed to be
available only in pip >= 1.5.
This also drops support for proxies and basic auth, though those could
be added back in.
"""
# Based on pip 1.4.1's URLOpener but with cert verification removed
def opener(is_https):
if is_https:
opener = build_opener(HTTPSHandler())
# Strip out HTTPHandler to prevent MITM spoof:
for handler in opener.handlers:
if isinstance(handler, HTTPHandler):
opener.handlers.remove(handler)
else:
opener = build_opener()
return opener
# Descended from unpack_http_url() in pip 1.4.1
def best_filename(link, response):
"""Return the most informative possible filename for a download,
ideally with a proper extension.
"""
content_type = response.info().get('content-type', '')
filename = link.filename # fallback
# Have a look at the Content-Disposition header for a better guess:
content_disposition = response.info().get('content-disposition')
if content_disposition:
type, params = cgi.parse_header(content_disposition)
# We use ``or`` here because we don't want to use an "empty" value
# from the filename param:
filename = params.get('filename') or filename
ext = splitext(filename)[1]
if not ext:
ext = mimetypes.guess_extension(content_type)
if ext:
filename += ext
if not ext and link.url != response.geturl():
ext = splitext(response.geturl())[1]
if ext:
filename += ext
return filename
# Descended from _download_url() in pip 1.4.1
def pipe_to_file(response, path, size=0):
"""Pull the data off an HTTP response, shove it in a new file, and
show progress.
:arg response: A file-like object to read from
:arg path: The path of the new file
:arg size: The expected size, in bytes, of the download. 0 for
unknown or to suppress progress indication (as for cached
downloads)
"""
def response_chunks(chunk_size):
while True:
chunk = response.read(chunk_size)
if not chunk:
break
yield chunk
print('Downloading %s%s...' % (
self._req.req,
(' (%sK)' % (size / 1000)) if size > 1000 else ''))
progress_indicator = (DownloadProgressBar(max=size).iter if size
else DownloadProgressSpinner().iter)
with open(path, 'wb') as file:
for chunk in progress_indicator(response_chunks(4096), 4096):
file.write(chunk)
url = link.url.split('#', 1)[0]
try:
response = opener(urlparse(url).scheme != 'http').open(url)
except (HTTPError, IOError) as exc:
raise DownloadError(link, exc)
filename = best_filename(link, response)
try:
size = int(response.headers['content-length'])
except (ValueError, KeyError, TypeError):
size = 0
pipe_to_file(response, join(self._temp_path, filename), size=size)
return filename
# Based on req_set.prepare_files() in pip bb2a8428d4aebc8d313d05d590f386fa3f0bbd0f
@memoize # Avoid re-downloading.
def _downloaded_filename(self):
"""Download the package's archive if necessary, and return its
filename.
--no-deps is implied, as we have reimplemented the bits that would
ordinarily do dependency resolution.
"""
# Peep doesn't support requirements that don't come down as a single
# file, because it can't hash them. Thus, it doesn't support editable
# requirements, because pip itself doesn't support editable
# requirements except for "local projects or a VCS url". Nor does it
# support VCS requirements yet, because we haven't yet come up with a
# portable, deterministic way to hash them. In summary, all we support
# is == requirements and tarballs/zips/etc.
# TODO: Stop on reqs that are editable or aren't ==.
# If the requirement isn't already specified as a URL, get a URL
# from an index:
link = self._link() or self._finder.find_requirement(self._req, upgrade=False)
if link:
lower_scheme = link.scheme.lower() # pip lower()s it for some reason.
if lower_scheme == 'http' or lower_scheme == 'https':
file_path = self._download(link)
return basename(file_path)
elif lower_scheme == 'file':
# The following is inspired by pip's unpack_file_url():
link_path = url_to_path(link.url_without_fragment)
if isdir(link_path):
raise UnsupportedRequirementError(
"%s: %s is a directory. So that it can compute "
"a hash, peep supports only filesystem paths which "
"point to files" %
(self._req, link.url_without_fragment))
else:
copy(link_path, self._temp_path)
return basename(link_path)
else:
raise UnsupportedRequirementError(
"%s: The download link, %s, would not result in a file "
"that can be hashed. Peep supports only == requirements, "
"file:// URLs pointing to files (not folders), and "
"http:// and https:// URLs pointing to tarballs, zips, "
"etc." % (self._req, link.url))
else:
raise UnsupportedRequirementError(
"%s: couldn't determine where to download this requirement from."
% (self._req,))
def install(self):
"""Install the package I represent, without dependencies.
Obey typical pip-install options passed in on the command line.
"""
other_args = list(requirement_args(self._argv, want_other=True))
archive_path = join(self._temp_path, self._downloaded_filename())
# -U so it installs whether pip deems the requirement "satisfied" or
# not. This is necessary for GitHub-sourced zips, which change without
# their version numbers changing.
run_pip(['install'] + other_args + ['--no-deps', '-U', archive_path])
@memoize
def _actual_hash(self):
"""Download the package's archive if necessary, and return its hash."""
return hash_of_file(join(self._temp_path, self._downloaded_filename()))
def _project_name(self):
"""Return the inner Requirement's "unsafe name".
Raise ValueError if there is no name.
"""
name = getattr(self._req.req, 'project_name', '')
if name:
return name
name = getattr(self._req.req, 'name', '')
if name:
return safe_name(name)
raise ValueError('Requirement has no project_name.')
def _name(self):
return self._req.name
def _link(self):
try:
return self._req.link
except AttributeError:
# The link attribute isn't available prior to pip 6.1.0, so fall
# back to the now deprecated 'url' attribute.
return Link(self._req.url) if self._req.url else None
def _url(self):
link = self._link()
return link.url if link else None
@memoize # Avoid re-running expensive check_if_exists().
def _is_satisfied(self):
self._req.check_if_exists()
return (self._req.satisfied_by and
not self._is_always_unsatisfied())
def _class(self):
"""Return the class I should be, spanning a continuum of goodness."""
try:
self._project_name()
except ValueError:
return MalformedReq
if self._is_satisfied():
return SatisfiedReq
if not self._expected_hashes():
return MissingReq
if self._actual_hash() not in self._expected_hashes():
return MismatchedReq
return InstallableReq
@classmethod
def foot(cls):
"""Return the text to be printed once, after all of the errors from
classes of my type are printed.
"""
return ''
class MalformedReq(DownloadedReq):
"""A requirement whose package name could not be determined"""
@classmethod
def head(cls):
return 'The following requirements could not be processed:\n'
def error(self):
return '* Unable to determine package name from URL %s; add #egg=' % self._url()
class MissingReq(DownloadedReq):
"""A requirement for which no hashes were specified in the requirements file"""
@classmethod
def head(cls):
return ('The following packages had no hashes specified in the requirements file, which\n'
'leaves them open to tampering. Vet these packages to your satisfaction, then\n'
'add these "sha256" lines like so:\n\n')
def error(self):
if self._url():
# _url() always contains an #egg= part, or this would be a
# MalformedRequest.
line = self._url()
else:
line = '%s==%s' % (self._name(), self._version())
return '# sha256: %s\n%s\n' % (self._actual_hash(), line)
class MismatchedReq(DownloadedReq):
"""A requirement for which the downloaded file didn't match any of my hashes."""
@classmethod
def head(cls):
return ("THE FOLLOWING PACKAGES DIDN'T MATCH THE HASHES SPECIFIED IN THE REQUIREMENTS\n"
"FILE. If you have updated the package versions, update the hashes. If not,\n"
"freak out, because someone has tampered with the packages.\n\n")
def error(self):
preamble = ' %s: expected' % self._project_name()
if len(self._expected_hashes()) > 1:
preamble += ' one of'
padding = '\n' + ' ' * (len(preamble) + 1)
return '%s %s\n%s got %s' % (preamble,
padding.join(self._expected_hashes()),
' ' * (len(preamble) - 4),
self._actual_hash())
@classmethod
def foot(cls):
return '\n'
class SatisfiedReq(DownloadedReq):
"""A requirement which turned out to be already installed"""
@classmethod
def head(cls):
return ("These packages were already installed, so we didn't need to download or build\n"
"them again. If you installed them with peep in the first place, you should be\n"
"safe. If not, uninstall them, then re-attempt your install with peep.\n")
def error(self):
return ' %s' % (self._req,)
class InstallableReq(DownloadedReq):
"""A requirement whose hash matched and can be safely installed"""
# DownloadedReq subclasses that indicate an error that should keep us from
# going forward with installation, in the order in which their errors should
# be reported:
ERROR_CLASSES = [MismatchedReq, MissingReq, MalformedReq]
def bucket(things, key):
"""Return a map of key -> list of things."""
ret = defaultdict(list)
for thing in things:
ret[key(thing)].append(thing)
return ret
def first_every_last(iterable, first, every, last):
"""Execute something before the first item of iter, something else for each
item, and a third thing after the last.
If there are no items in the iterable, don't execute anything.
"""
did_first = False
for item in iterable:
if not did_first:
did_first = True
first(item)
every(item)
if did_first:
last(item)
def _parse_requirements(path, finder):
try:
# list() so the generator that is parse_requirements() actually runs
# far enough to report a TypeError
return list(parse_requirements(
path, options=EmptyOptions(), finder=finder))
except TypeError:
# session is a required kwarg as of pip 6.0 and will raise
# a TypeError if missing. It needs to be a PipSession instance,
# but in older versions we can't import it from pip.download
# (nor do we need it at all) so we only import it in this except block
from pip.download import PipSession
return list(parse_requirements(
path, options=EmptyOptions(), session=PipSession(), finder=finder))
def downloaded_reqs_from_path(path, argv):
"""Return a list of DownloadedReqs representing the requirements parsed
out of a given requirements file.
:arg path: The path to the requirements file
:arg argv: The commandline args, starting after the subcommand
"""
finder = package_finder(argv)
return [DownloadedReq(req, argv, finder) for req in
_parse_requirements(path, finder)]
def peep_install(argv):
"""Perform the ``peep install`` subcommand, returning a shell status code
or raising a PipException.
:arg argv: The commandline args, starting after the subcommand
"""
output = []
out = output.append
reqs = []
try:
req_paths = list(requirement_args(argv, want_paths=True))
if not req_paths:
out("You have to specify one or more requirements files with the -r option, because\n"
"otherwise there's nowhere for peep to look up the hashes.\n")
return COMMAND_LINE_ERROR
# We're a "peep install" command, and we have some requirement paths.
reqs = list(chain.from_iterable(
downloaded_reqs_from_path(path, argv)
for path in req_paths))
buckets = bucket(reqs, lambda r: r.__class__)
# Skip a line after pip's "Cleaning up..." so the important stuff
# stands out:
if any(buckets[b] for b in ERROR_CLASSES):
out('\n')
printers = (lambda r: out(r.head()),
lambda r: out(r.error() + '\n'),
lambda r: out(r.foot()))
for c in ERROR_CLASSES:
first_every_last(buckets[c], *printers)
if any(buckets[b] for b in ERROR_CLASSES):
out('-------------------------------\n'
'Not proceeding to installation.\n')
return SOMETHING_WENT_WRONG
else:
for req in buckets[InstallableReq]:
req.install()
first_every_last(buckets[SatisfiedReq], *printers)
return ITS_FINE_ITS_FINE
except (UnsupportedRequirementError, InstallationError, DownloadError) as exc:
out(str(exc))
return SOMETHING_WENT_WRONG
finally:
for req in reqs:
req.dispose()
print(''.join(output))
def peep_port(paths):
"""Convert a peep requirements file to one compatble with pip-8 hashing.
Loses comments and tromps on URLs, so the result will need a little manual
massaging, but the hard part--the hash conversion--is done for you.
"""
if not paths:
print('Please specify one or more requirements files so I have '
'something to port.\n')
return COMMAND_LINE_ERROR
comes_from = None
for req in chain.from_iterable(
_parse_requirements(path, package_finder(argv)) for path in paths):
req_path, req_line = path_and_line(req)
hashes = [hexlify(urlsafe_b64decode((hash + '=').encode('ascii'))).decode('ascii')
for hash in hashes_above(req_path, req_line)]
if req_path != comes_from:
print()
print('# from %s' % req_path)
print()
comes_from = req_path
if not hashes:
print(req.req)
else:
print('%s' % (req.link if getattr(req, 'link', None) else req.req), end='')
for hash in hashes:
print(' \\')
print(' --hash=sha256:%s' % hash, end='')
print()
def main():
"""Be the top-level entrypoint. Return a shell status code."""
commands = {'hash': peep_hash,
'install': peep_install,
'port': peep_port}
try:
if len(argv) >= 2 and argv[1] in commands:
return commands[argv[1]](argv[2:])
else:
# Fall through to top-level pip main() for everything else:
return pip.main()
except PipException as exc:
return exc.error_code
def exception_handler(exc_type, exc_value, exc_tb):
print('Oh no! Peep had a problem while trying to do stuff. Please write up a bug report')
print('with the specifics so we can fix it:')
print()
print('https://github.com/erikrose/peep/issues/new')
print()
print('Here are some particulars you can copy and paste into the bug report:')
print()
print('---')
print('peep:', repr(__version__))
print('python:', repr(sys.version))
print('pip:', repr(getattr(pip, '__version__', 'no __version__ attr')))
print('Command line: ', repr(sys.argv))
print(
''.join(traceback.format_exception(exc_type, exc_value, exc_tb)))
print('---')
if __name__ == '__main__':
try:
exit(main())
except Exception:
exception_handler(*sys.exc_info())
exit(UNHANDLED_EXCEPTION)
|
erikrose/peep | peep.py | run_pip | python | def run_pip(initial_args):
status_code = pip.main(initial_args)
# Clear out the registrations in the pip "logger" singleton. Otherwise,
# loggers keep getting appended to it with every run. Pip assumes only one
# command invocation will happen per interpreter lifetime.
logger.consumers = []
if status_code:
raise PipException(status_code) | Delegate to pip the given args (starting with the subcommand), and raise
``PipException`` if something goes wrong. | train | https://github.com/erikrose/peep/blob/c16f08c7f61e2f2afecb7cd1c93752bdd96c4968/peep.py#L203-L214 | null | #!/usr/bin/env python
"""peep ("prudently examine every package") verifies that packages conform to a
trusted, locally stored hash and only then installs them::
peep install -r requirements.txt
This makes your deployments verifiably repeatable without having to maintain a
local PyPI mirror or use a vendor lib. Just update the version numbers and
hashes in requirements.txt, and you're all set.
"""
# This is here so embedded copies of peep.py are MIT-compliant:
# Copyright (c) 2013 Erik Rose
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
from __future__ import print_function
try:
xrange = xrange
except NameError:
xrange = range
from base64 import urlsafe_b64encode, urlsafe_b64decode
from binascii import hexlify
import cgi
from collections import defaultdict
from functools import wraps
from hashlib import sha256
from itertools import chain, islice
import mimetypes
from optparse import OptionParser
from os.path import join, basename, splitext, isdir
from pickle import dumps, loads
import re
import sys
from shutil import rmtree, copy
from sys import argv, exit
from tempfile import mkdtemp
import traceback
try:
from urllib2 import build_opener, HTTPHandler, HTTPSHandler, HTTPError
except ImportError:
from urllib.request import build_opener, HTTPHandler, HTTPSHandler
from urllib.error import HTTPError
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse # 3.4
# TODO: Probably use six to make urllib stuff work across 2/3.
from pkg_resources import require, VersionConflict, DistributionNotFound, safe_name
# We don't admit our dependency on pip in setup.py, lest a naive user simply
# say `pip install peep.tar.gz` and thus pull down an untrusted copy of pip
# from PyPI. Instead, we make sure it's installed and new enough here and spit
# out an error message if not:
def activate(specifier):
"""Make a compatible version of pip importable. Raise a RuntimeError if we
couldn't."""
try:
for distro in require(specifier):
distro.activate()
except (VersionConflict, DistributionNotFound):
raise RuntimeError('The installed version of pip is too old; peep '
'requires ' + specifier)
# Before 0.6.2, the log module wasn't there, so some
# of our monkeypatching fails. It probably wouldn't be
# much work to support even earlier, though.
activate('pip>=0.6.2')
import pip
from pip.commands.install import InstallCommand
try:
from pip.download import url_to_path # 1.5.6
except ImportError:
try:
from pip.util import url_to_path # 0.7.0
except ImportError:
from pip.util import url_to_filename as url_to_path # 0.6.2
from pip.exceptions import InstallationError
from pip.index import PackageFinder, Link
try:
from pip.log import logger
except ImportError:
from pip import logger # 6.0
from pip.req import parse_requirements
try:
from pip.utils.ui import DownloadProgressBar, DownloadProgressSpinner
except ImportError:
class NullProgressBar(object):
def __init__(self, *args, **kwargs):
pass
def iter(self, ret, *args, **kwargs):
return ret
DownloadProgressBar = DownloadProgressSpinner = NullProgressBar
__version__ = 3, 1, 2
try:
from pip.index import FormatControl # noqa
FORMAT_CONTROL_ARG = 'format_control'
# The line-numbering bug will be fixed in pip 8. All 7.x releases had it.
PIP_MAJOR_VERSION = int(pip.__version__.split('.')[0])
PIP_COUNTS_COMMENTS = PIP_MAJOR_VERSION >= 8
except ImportError:
FORMAT_CONTROL_ARG = 'use_wheel' # pre-7
PIP_COUNTS_COMMENTS = True
ITS_FINE_ITS_FINE = 0
SOMETHING_WENT_WRONG = 1
# "Traditional" for command-line errors according to optparse docs:
COMMAND_LINE_ERROR = 2
UNHANDLED_EXCEPTION = 3
ARCHIVE_EXTENSIONS = ('.tar.bz2', '.tar.gz', '.tgz', '.tar', '.zip')
MARKER = object()
class PipException(Exception):
"""When I delegated to pip, it exited with an error."""
def __init__(self, error_code):
self.error_code = error_code
class UnsupportedRequirementError(Exception):
"""An unsupported line was encountered in a requirements file."""
class DownloadError(Exception):
def __init__(self, link, exc):
self.link = link
self.reason = str(exc)
def __str__(self):
return 'Downloading %s failed: %s' % (self.link, self.reason)
def encoded_hash(sha):
"""Return a short, 7-bit-safe representation of a hash.
If you pass a sha256, this results in the hash algorithm that the Wheel
format (PEP 427) uses, except here it's intended to be run across the
downloaded archive before unpacking.
"""
return urlsafe_b64encode(sha.digest()).decode('ascii').rstrip('=')
def path_and_line(req):
"""Return the path and line number of the file from which an
InstallRequirement came.
"""
path, line = (re.match(r'-r (.*) \(line (\d+)\)$',
req.comes_from).groups())
return path, int(line)
def hashes_above(path, line_number):
"""Yield hashes from contiguous comment lines before line ``line_number``.
"""
def hash_lists(path):
"""Yield lists of hashes appearing between non-comment lines.
The lists will be in order of appearance and, for each non-empty
list, their place in the results will coincide with that of the
line number of the corresponding result from `parse_requirements`
(which changed in pip 7.0 to not count comments).
"""
hashes = []
with open(path) as file:
for lineno, line in enumerate(file, 1):
match = HASH_COMMENT_RE.match(line)
if match: # Accumulate this hash.
hashes.append(match.groupdict()['hash'])
if not IGNORED_LINE_RE.match(line):
yield hashes # Report hashes seen so far.
hashes = []
elif PIP_COUNTS_COMMENTS:
# Comment: count as normal req but have no hashes.
yield []
return next(islice(hash_lists(path), line_number - 1, None))
def hash_of_file(path):
"""Return the hash of a downloaded file."""
with open(path, 'rb') as archive:
sha = sha256()
while True:
data = archive.read(2 ** 20)
if not data:
break
sha.update(data)
return encoded_hash(sha)
def is_git_sha(text):
"""Return whether this is probably a git sha"""
# Handle both the full sha as well as the 7-character abbreviation
if len(text) in (40, 7):
try:
int(text, 16)
return True
except ValueError:
pass
return False
def filename_from_url(url):
parsed = urlparse(url)
path = parsed.path
return path.split('/')[-1]
def requirement_args(argv, want_paths=False, want_other=False):
"""Return an iterable of filtered arguments.
:arg argv: Arguments, starting after the subcommand
:arg want_paths: If True, the returned iterable includes the paths to any
requirements files following a ``-r`` or ``--requirement`` option.
:arg want_other: If True, the returned iterable includes the args that are
not a requirement-file path or a ``-r`` or ``--requirement`` flag.
"""
was_r = False
for arg in argv:
# Allow for requirements files named "-r", don't freak out if there's a
# trailing "-r", etc.
if was_r:
if want_paths:
yield arg
was_r = False
elif arg in ['-r', '--requirement']:
was_r = True
else:
if want_other:
yield arg
# any line that is a comment or just whitespace
IGNORED_LINE_RE = re.compile(r'^(\s*#.*)?\s*$')
HASH_COMMENT_RE = re.compile(
r"""
\s*\#\s+ # Lines that start with a '#'
(?P<hash_type>sha256):\s+ # Hash type is hardcoded to be sha256 for now.
(?P<hash>[^\s]+) # Hashes can be anything except '#' or spaces.
\s* # Suck up whitespace before the comment or
# just trailing whitespace if there is no
# comment. Also strip trailing newlines.
(?:\#(?P<comment>.*))? # Comments can be anything after a whitespace+#
# and are optional.
$""", re.X)
def peep_hash(argv):
"""Return the peep hash of one or more files, returning a shell status code
or raising a PipException.
:arg argv: The commandline args, starting after the subcommand
"""
parser = OptionParser(
usage='usage: %prog hash file [file ...]',
description='Print a peep hash line for one or more files: for '
'example, "# sha256: '
'oz42dZy6Gowxw8AelDtO4gRgTW_xPdooH484k7I5EOY".')
_, paths = parser.parse_args(args=argv)
if paths:
for path in paths:
print('# sha256:', hash_of_file(path))
return ITS_FINE_ITS_FINE
else:
parser.print_usage()
return COMMAND_LINE_ERROR
class EmptyOptions(object):
"""Fake optparse options for compatibility with pip<1.2
pip<1.2 had a bug in parse_requirements() in which the ``options`` kwarg
was required. We work around that by passing it a mock object.
"""
default_vcs = None
skip_requirements_regex = None
isolated_mode = False
def memoize(func):
"""Memoize a method that should return the same result every time on a
given instance.
"""
@wraps(func)
def memoizer(self):
if not hasattr(self, '_cache'):
self._cache = {}
if func.__name__ not in self._cache:
self._cache[func.__name__] = func(self)
return self._cache[func.__name__]
return memoizer
def package_finder(argv):
"""Return a PackageFinder respecting command-line options.
:arg argv: Everything after the subcommand
"""
# We instantiate an InstallCommand and then use some of its private
# machinery--its arg parser--for our own purposes, like a virus. This
# approach is portable across many pip versions, where more fine-grained
# ones are not. Ignoring options that don't exist on the parser (for
# instance, --use-wheel) gives us a straightforward method of backward
# compatibility.
try:
command = InstallCommand()
except TypeError:
# This is likely pip 1.3.0's "__init__() takes exactly 2 arguments (1
# given)" error. In that version, InstallCommand takes a top=level
# parser passed in from outside.
from pip.baseparser import create_main_parser
command = InstallCommand(create_main_parser())
# The downside is that it essentially ruins the InstallCommand class for
# further use. Calling out to pip.main() within the same interpreter, for
# example, would result in arguments parsed this time turning up there.
# Thus, we deepcopy the arg parser so we don't trash its singletons. Of
# course, deepcopy doesn't work on these objects, because they contain
# uncopyable regex patterns, so we pickle and unpickle instead. Fun!
options, _ = loads(dumps(command.parser)).parse_args(argv)
# Carry over PackageFinder kwargs that have [about] the same names as
# options attr names:
possible_options = [
'find_links',
FORMAT_CONTROL_ARG,
('allow_all_prereleases', 'pre'),
'process_dependency_links'
]
kwargs = {}
for option in possible_options:
kw, attr = option if isinstance(option, tuple) else (option, option)
value = getattr(options, attr, MARKER)
if value is not MARKER:
kwargs[kw] = value
# Figure out index_urls:
index_urls = [options.index_url] + options.extra_index_urls
if options.no_index:
index_urls = []
index_urls += getattr(options, 'mirrors', [])
# If pip is new enough to have a PipSession, initialize one, since
# PackageFinder requires it:
if hasattr(command, '_build_session'):
kwargs['session'] = command._build_session(options)
return PackageFinder(index_urls=index_urls, **kwargs)
class DownloadedReq(object):
"""A wrapper around InstallRequirement which offers additional information
based on downloading and examining a corresponding package archive
These are conceptually immutable, so we can get away with memoizing
expensive things.
"""
def __init__(self, req, argv, finder):
"""Download a requirement, compare its hashes, and return a subclass
of DownloadedReq depending on its state.
:arg req: The InstallRequirement I am based on
:arg argv: The args, starting after the subcommand
"""
self._req = req
self._argv = argv
self._finder = finder
# We use a separate temp dir for each requirement so requirements
# (from different indices) that happen to have the same archive names
# don't overwrite each other, leading to a security hole in which the
# latter is a hash mismatch, the former has already passed the
# comparison, and the latter gets installed.
self._temp_path = mkdtemp(prefix='peep-')
# Think of DownloadedReq as a one-shot state machine. It's an abstract
# class that ratchets forward to being one of its own subclasses,
# depending on its package status. Then it doesn't move again.
self.__class__ = self._class()
def dispose(self):
"""Delete temp files and dirs I've made. Render myself useless.
Do not call further methods on me after calling dispose().
"""
rmtree(self._temp_path)
def _version(self):
"""Deduce the version number of the downloaded package from its filename."""
# TODO: Can we delete this method and just print the line from the
# reqs file verbatim instead?
def version_of_archive(filename, package_name):
# Since we know the project_name, we can strip that off the left, strip
# any archive extensions off the right, and take the rest as the
# version.
for ext in ARCHIVE_EXTENSIONS:
if filename.endswith(ext):
filename = filename[:-len(ext)]
break
# Handle github sha tarball downloads.
if is_git_sha(filename):
filename = package_name + '-' + filename
if not filename.lower().replace('_', '-').startswith(package_name.lower()):
# TODO: Should we replace runs of [^a-zA-Z0-9.], not just _, with -?
give_up(filename, package_name)
return filename[len(package_name) + 1:] # Strip off '-' before version.
def version_of_wheel(filename, package_name):
# For Wheel files (http://legacy.python.org/dev/peps/pep-0427/#file-
# name-convention) we know the format bits are '-' separated.
whl_package_name, version, _rest = filename.split('-', 2)
# Do the alteration to package_name from PEP 427:
our_package_name = re.sub(r'[^\w\d.]+', '_', package_name, re.UNICODE)
if whl_package_name != our_package_name:
give_up(filename, whl_package_name)
return version
def give_up(filename, package_name):
raise RuntimeError("The archive '%s' didn't start with the package name "
"'%s', so I couldn't figure out the version number. "
"My bad; improve me." %
(filename, package_name))
get_version = (version_of_wheel
if self._downloaded_filename().endswith('.whl')
else version_of_archive)
return get_version(self._downloaded_filename(), self._project_name())
def _is_always_unsatisfied(self):
"""Returns whether this requirement is always unsatisfied
This would happen in cases where we can't determine the version
from the filename.
"""
# If this is a github sha tarball, then it is always unsatisfied
# because the url has a commit sha in it and not the version
# number.
url = self._url()
if url:
filename = filename_from_url(url)
if filename.endswith(ARCHIVE_EXTENSIONS):
filename, ext = splitext(filename)
if is_git_sha(filename):
return True
return False
@memoize # Avoid hitting the file[cache] over and over.
def _expected_hashes(self):
"""Return a list of known-good hashes for this package."""
return hashes_above(*path_and_line(self._req))
def _download(self, link):
"""Download a file, and return its name within my temp dir.
This does no verification of HTTPS certs, but our checking hashes
makes that largely unimportant. It would be nice to be able to use the
requests lib, which can verify certs, but it is guaranteed to be
available only in pip >= 1.5.
This also drops support for proxies and basic auth, though those could
be added back in.
"""
# Based on pip 1.4.1's URLOpener but with cert verification removed
def opener(is_https):
if is_https:
opener = build_opener(HTTPSHandler())
# Strip out HTTPHandler to prevent MITM spoof:
for handler in opener.handlers:
if isinstance(handler, HTTPHandler):
opener.handlers.remove(handler)
else:
opener = build_opener()
return opener
# Descended from unpack_http_url() in pip 1.4.1
def best_filename(link, response):
"""Return the most informative possible filename for a download,
ideally with a proper extension.
"""
content_type = response.info().get('content-type', '')
filename = link.filename # fallback
# Have a look at the Content-Disposition header for a better guess:
content_disposition = response.info().get('content-disposition')
if content_disposition:
type, params = cgi.parse_header(content_disposition)
# We use ``or`` here because we don't want to use an "empty" value
# from the filename param:
filename = params.get('filename') or filename
ext = splitext(filename)[1]
if not ext:
ext = mimetypes.guess_extension(content_type)
if ext:
filename += ext
if not ext and link.url != response.geturl():
ext = splitext(response.geturl())[1]
if ext:
filename += ext
return filename
# Descended from _download_url() in pip 1.4.1
def pipe_to_file(response, path, size=0):
"""Pull the data off an HTTP response, shove it in a new file, and
show progress.
:arg response: A file-like object to read from
:arg path: The path of the new file
:arg size: The expected size, in bytes, of the download. 0 for
unknown or to suppress progress indication (as for cached
downloads)
"""
def response_chunks(chunk_size):
while True:
chunk = response.read(chunk_size)
if not chunk:
break
yield chunk
print('Downloading %s%s...' % (
self._req.req,
(' (%sK)' % (size / 1000)) if size > 1000 else ''))
progress_indicator = (DownloadProgressBar(max=size).iter if size
else DownloadProgressSpinner().iter)
with open(path, 'wb') as file:
for chunk in progress_indicator(response_chunks(4096), 4096):
file.write(chunk)
url = link.url.split('#', 1)[0]
try:
response = opener(urlparse(url).scheme != 'http').open(url)
except (HTTPError, IOError) as exc:
raise DownloadError(link, exc)
filename = best_filename(link, response)
try:
size = int(response.headers['content-length'])
except (ValueError, KeyError, TypeError):
size = 0
pipe_to_file(response, join(self._temp_path, filename), size=size)
return filename
# Based on req_set.prepare_files() in pip bb2a8428d4aebc8d313d05d590f386fa3f0bbd0f
@memoize # Avoid re-downloading.
def _downloaded_filename(self):
"""Download the package's archive if necessary, and return its
filename.
--no-deps is implied, as we have reimplemented the bits that would
ordinarily do dependency resolution.
"""
# Peep doesn't support requirements that don't come down as a single
# file, because it can't hash them. Thus, it doesn't support editable
# requirements, because pip itself doesn't support editable
# requirements except for "local projects or a VCS url". Nor does it
# support VCS requirements yet, because we haven't yet come up with a
# portable, deterministic way to hash them. In summary, all we support
# is == requirements and tarballs/zips/etc.
# TODO: Stop on reqs that are editable or aren't ==.
# If the requirement isn't already specified as a URL, get a URL
# from an index:
link = self._link() or self._finder.find_requirement(self._req, upgrade=False)
if link:
lower_scheme = link.scheme.lower() # pip lower()s it for some reason.
if lower_scheme == 'http' or lower_scheme == 'https':
file_path = self._download(link)
return basename(file_path)
elif lower_scheme == 'file':
# The following is inspired by pip's unpack_file_url():
link_path = url_to_path(link.url_without_fragment)
if isdir(link_path):
raise UnsupportedRequirementError(
"%s: %s is a directory. So that it can compute "
"a hash, peep supports only filesystem paths which "
"point to files" %
(self._req, link.url_without_fragment))
else:
copy(link_path, self._temp_path)
return basename(link_path)
else:
raise UnsupportedRequirementError(
"%s: The download link, %s, would not result in a file "
"that can be hashed. Peep supports only == requirements, "
"file:// URLs pointing to files (not folders), and "
"http:// and https:// URLs pointing to tarballs, zips, "
"etc." % (self._req, link.url))
else:
raise UnsupportedRequirementError(
"%s: couldn't determine where to download this requirement from."
% (self._req,))
def install(self):
"""Install the package I represent, without dependencies.
Obey typical pip-install options passed in on the command line.
"""
other_args = list(requirement_args(self._argv, want_other=True))
archive_path = join(self._temp_path, self._downloaded_filename())
# -U so it installs whether pip deems the requirement "satisfied" or
# not. This is necessary for GitHub-sourced zips, which change without
# their version numbers changing.
run_pip(['install'] + other_args + ['--no-deps', '-U', archive_path])
@memoize
def _actual_hash(self):
"""Download the package's archive if necessary, and return its hash."""
return hash_of_file(join(self._temp_path, self._downloaded_filename()))
def _project_name(self):
"""Return the inner Requirement's "unsafe name".
Raise ValueError if there is no name.
"""
name = getattr(self._req.req, 'project_name', '')
if name:
return name
name = getattr(self._req.req, 'name', '')
if name:
return safe_name(name)
raise ValueError('Requirement has no project_name.')
def _name(self):
return self._req.name
def _link(self):
try:
return self._req.link
except AttributeError:
# The link attribute isn't available prior to pip 6.1.0, so fall
# back to the now deprecated 'url' attribute.
return Link(self._req.url) if self._req.url else None
def _url(self):
link = self._link()
return link.url if link else None
@memoize # Avoid re-running expensive check_if_exists().
def _is_satisfied(self):
self._req.check_if_exists()
return (self._req.satisfied_by and
not self._is_always_unsatisfied())
def _class(self):
"""Return the class I should be, spanning a continuum of goodness."""
try:
self._project_name()
except ValueError:
return MalformedReq
if self._is_satisfied():
return SatisfiedReq
if not self._expected_hashes():
return MissingReq
if self._actual_hash() not in self._expected_hashes():
return MismatchedReq
return InstallableReq
@classmethod
def foot(cls):
"""Return the text to be printed once, after all of the errors from
classes of my type are printed.
"""
return ''
class MalformedReq(DownloadedReq):
"""A requirement whose package name could not be determined"""
@classmethod
def head(cls):
return 'The following requirements could not be processed:\n'
def error(self):
return '* Unable to determine package name from URL %s; add #egg=' % self._url()
class MissingReq(DownloadedReq):
"""A requirement for which no hashes were specified in the requirements file"""
@classmethod
def head(cls):
return ('The following packages had no hashes specified in the requirements file, which\n'
'leaves them open to tampering. Vet these packages to your satisfaction, then\n'
'add these "sha256" lines like so:\n\n')
def error(self):
if self._url():
# _url() always contains an #egg= part, or this would be a
# MalformedRequest.
line = self._url()
else:
line = '%s==%s' % (self._name(), self._version())
return '# sha256: %s\n%s\n' % (self._actual_hash(), line)
class MismatchedReq(DownloadedReq):
"""A requirement for which the downloaded file didn't match any of my hashes."""
@classmethod
def head(cls):
return ("THE FOLLOWING PACKAGES DIDN'T MATCH THE HASHES SPECIFIED IN THE REQUIREMENTS\n"
"FILE. If you have updated the package versions, update the hashes. If not,\n"
"freak out, because someone has tampered with the packages.\n\n")
def error(self):
preamble = ' %s: expected' % self._project_name()
if len(self._expected_hashes()) > 1:
preamble += ' one of'
padding = '\n' + ' ' * (len(preamble) + 1)
return '%s %s\n%s got %s' % (preamble,
padding.join(self._expected_hashes()),
' ' * (len(preamble) - 4),
self._actual_hash())
@classmethod
def foot(cls):
return '\n'
class SatisfiedReq(DownloadedReq):
"""A requirement which turned out to be already installed"""
@classmethod
def head(cls):
return ("These packages were already installed, so we didn't need to download or build\n"
"them again. If you installed them with peep in the first place, you should be\n"
"safe. If not, uninstall them, then re-attempt your install with peep.\n")
def error(self):
return ' %s' % (self._req,)
class InstallableReq(DownloadedReq):
"""A requirement whose hash matched and can be safely installed"""
# DownloadedReq subclasses that indicate an error that should keep us from
# going forward with installation, in the order in which their errors should
# be reported:
ERROR_CLASSES = [MismatchedReq, MissingReq, MalformedReq]
def bucket(things, key):
"""Return a map of key -> list of things."""
ret = defaultdict(list)
for thing in things:
ret[key(thing)].append(thing)
return ret
def first_every_last(iterable, first, every, last):
"""Execute something before the first item of iter, something else for each
item, and a third thing after the last.
If there are no items in the iterable, don't execute anything.
"""
did_first = False
for item in iterable:
if not did_first:
did_first = True
first(item)
every(item)
if did_first:
last(item)
def _parse_requirements(path, finder):
try:
# list() so the generator that is parse_requirements() actually runs
# far enough to report a TypeError
return list(parse_requirements(
path, options=EmptyOptions(), finder=finder))
except TypeError:
# session is a required kwarg as of pip 6.0 and will raise
# a TypeError if missing. It needs to be a PipSession instance,
# but in older versions we can't import it from pip.download
# (nor do we need it at all) so we only import it in this except block
from pip.download import PipSession
return list(parse_requirements(
path, options=EmptyOptions(), session=PipSession(), finder=finder))
def downloaded_reqs_from_path(path, argv):
"""Return a list of DownloadedReqs representing the requirements parsed
out of a given requirements file.
:arg path: The path to the requirements file
:arg argv: The commandline args, starting after the subcommand
"""
finder = package_finder(argv)
return [DownloadedReq(req, argv, finder) for req in
_parse_requirements(path, finder)]
def peep_install(argv):
"""Perform the ``peep install`` subcommand, returning a shell status code
or raising a PipException.
:arg argv: The commandline args, starting after the subcommand
"""
output = []
out = output.append
reqs = []
try:
req_paths = list(requirement_args(argv, want_paths=True))
if not req_paths:
out("You have to specify one or more requirements files with the -r option, because\n"
"otherwise there's nowhere for peep to look up the hashes.\n")
return COMMAND_LINE_ERROR
# We're a "peep install" command, and we have some requirement paths.
reqs = list(chain.from_iterable(
downloaded_reqs_from_path(path, argv)
for path in req_paths))
buckets = bucket(reqs, lambda r: r.__class__)
# Skip a line after pip's "Cleaning up..." so the important stuff
# stands out:
if any(buckets[b] for b in ERROR_CLASSES):
out('\n')
printers = (lambda r: out(r.head()),
lambda r: out(r.error() + '\n'),
lambda r: out(r.foot()))
for c in ERROR_CLASSES:
first_every_last(buckets[c], *printers)
if any(buckets[b] for b in ERROR_CLASSES):
out('-------------------------------\n'
'Not proceeding to installation.\n')
return SOMETHING_WENT_WRONG
else:
for req in buckets[InstallableReq]:
req.install()
first_every_last(buckets[SatisfiedReq], *printers)
return ITS_FINE_ITS_FINE
except (UnsupportedRequirementError, InstallationError, DownloadError) as exc:
out(str(exc))
return SOMETHING_WENT_WRONG
finally:
for req in reqs:
req.dispose()
print(''.join(output))
def peep_port(paths):
"""Convert a peep requirements file to one compatble with pip-8 hashing.
Loses comments and tromps on URLs, so the result will need a little manual
massaging, but the hard part--the hash conversion--is done for you.
"""
if not paths:
print('Please specify one or more requirements files so I have '
'something to port.\n')
return COMMAND_LINE_ERROR
comes_from = None
for req in chain.from_iterable(
_parse_requirements(path, package_finder(argv)) for path in paths):
req_path, req_line = path_and_line(req)
hashes = [hexlify(urlsafe_b64decode((hash + '=').encode('ascii'))).decode('ascii')
for hash in hashes_above(req_path, req_line)]
if req_path != comes_from:
print()
print('# from %s' % req_path)
print()
comes_from = req_path
if not hashes:
print(req.req)
else:
print('%s' % (req.link if getattr(req, 'link', None) else req.req), end='')
for hash in hashes:
print(' \\')
print(' --hash=sha256:%s' % hash, end='')
print()
def main():
"""Be the top-level entrypoint. Return a shell status code."""
commands = {'hash': peep_hash,
'install': peep_install,
'port': peep_port}
try:
if len(argv) >= 2 and argv[1] in commands:
return commands[argv[1]](argv[2:])
else:
# Fall through to top-level pip main() for everything else:
return pip.main()
except PipException as exc:
return exc.error_code
def exception_handler(exc_type, exc_value, exc_tb):
print('Oh no! Peep had a problem while trying to do stuff. Please write up a bug report')
print('with the specifics so we can fix it:')
print()
print('https://github.com/erikrose/peep/issues/new')
print()
print('Here are some particulars you can copy and paste into the bug report:')
print()
print('---')
print('peep:', repr(__version__))
print('python:', repr(sys.version))
print('pip:', repr(getattr(pip, '__version__', 'no __version__ attr')))
print('Command line: ', repr(sys.argv))
print(
''.join(traceback.format_exception(exc_type, exc_value, exc_tb)))
print('---')
if __name__ == '__main__':
try:
exit(main())
except Exception:
exception_handler(*sys.exc_info())
exit(UNHANDLED_EXCEPTION)
|
erikrose/peep | peep.py | hash_of_file | python | def hash_of_file(path):
with open(path, 'rb') as archive:
sha = sha256()
while True:
data = archive.read(2 ** 20)
if not data:
break
sha.update(data)
return encoded_hash(sha) | Return the hash of a downloaded file. | train | https://github.com/erikrose/peep/blob/c16f08c7f61e2f2afecb7cd1c93752bdd96c4968/peep.py#L217-L226 | null | #!/usr/bin/env python
"""peep ("prudently examine every package") verifies that packages conform to a
trusted, locally stored hash and only then installs them::
peep install -r requirements.txt
This makes your deployments verifiably repeatable without having to maintain a
local PyPI mirror or use a vendor lib. Just update the version numbers and
hashes in requirements.txt, and you're all set.
"""
# This is here so embedded copies of peep.py are MIT-compliant:
# Copyright (c) 2013 Erik Rose
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
from __future__ import print_function
try:
xrange = xrange
except NameError:
xrange = range
from base64 import urlsafe_b64encode, urlsafe_b64decode
from binascii import hexlify
import cgi
from collections import defaultdict
from functools import wraps
from hashlib import sha256
from itertools import chain, islice
import mimetypes
from optparse import OptionParser
from os.path import join, basename, splitext, isdir
from pickle import dumps, loads
import re
import sys
from shutil import rmtree, copy
from sys import argv, exit
from tempfile import mkdtemp
import traceback
try:
from urllib2 import build_opener, HTTPHandler, HTTPSHandler, HTTPError
except ImportError:
from urllib.request import build_opener, HTTPHandler, HTTPSHandler
from urllib.error import HTTPError
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse # 3.4
# TODO: Probably use six to make urllib stuff work across 2/3.
from pkg_resources import require, VersionConflict, DistributionNotFound, safe_name
# We don't admit our dependency on pip in setup.py, lest a naive user simply
# say `pip install peep.tar.gz` and thus pull down an untrusted copy of pip
# from PyPI. Instead, we make sure it's installed and new enough here and spit
# out an error message if not:
def activate(specifier):
"""Make a compatible version of pip importable. Raise a RuntimeError if we
couldn't."""
try:
for distro in require(specifier):
distro.activate()
except (VersionConflict, DistributionNotFound):
raise RuntimeError('The installed version of pip is too old; peep '
'requires ' + specifier)
# Before 0.6.2, the log module wasn't there, so some
# of our monkeypatching fails. It probably wouldn't be
# much work to support even earlier, though.
activate('pip>=0.6.2')
import pip
from pip.commands.install import InstallCommand
try:
from pip.download import url_to_path # 1.5.6
except ImportError:
try:
from pip.util import url_to_path # 0.7.0
except ImportError:
from pip.util import url_to_filename as url_to_path # 0.6.2
from pip.exceptions import InstallationError
from pip.index import PackageFinder, Link
try:
from pip.log import logger
except ImportError:
from pip import logger # 6.0
from pip.req import parse_requirements
try:
from pip.utils.ui import DownloadProgressBar, DownloadProgressSpinner
except ImportError:
class NullProgressBar(object):
def __init__(self, *args, **kwargs):
pass
def iter(self, ret, *args, **kwargs):
return ret
DownloadProgressBar = DownloadProgressSpinner = NullProgressBar
__version__ = 3, 1, 2
try:
from pip.index import FormatControl # noqa
FORMAT_CONTROL_ARG = 'format_control'
# The line-numbering bug will be fixed in pip 8. All 7.x releases had it.
PIP_MAJOR_VERSION = int(pip.__version__.split('.')[0])
PIP_COUNTS_COMMENTS = PIP_MAJOR_VERSION >= 8
except ImportError:
FORMAT_CONTROL_ARG = 'use_wheel' # pre-7
PIP_COUNTS_COMMENTS = True
ITS_FINE_ITS_FINE = 0
SOMETHING_WENT_WRONG = 1
# "Traditional" for command-line errors according to optparse docs:
COMMAND_LINE_ERROR = 2
UNHANDLED_EXCEPTION = 3
ARCHIVE_EXTENSIONS = ('.tar.bz2', '.tar.gz', '.tgz', '.tar', '.zip')
MARKER = object()
class PipException(Exception):
"""When I delegated to pip, it exited with an error."""
def __init__(self, error_code):
self.error_code = error_code
class UnsupportedRequirementError(Exception):
"""An unsupported line was encountered in a requirements file."""
class DownloadError(Exception):
def __init__(self, link, exc):
self.link = link
self.reason = str(exc)
def __str__(self):
return 'Downloading %s failed: %s' % (self.link, self.reason)
def encoded_hash(sha):
"""Return a short, 7-bit-safe representation of a hash.
If you pass a sha256, this results in the hash algorithm that the Wheel
format (PEP 427) uses, except here it's intended to be run across the
downloaded archive before unpacking.
"""
return urlsafe_b64encode(sha.digest()).decode('ascii').rstrip('=')
def path_and_line(req):
"""Return the path and line number of the file from which an
InstallRequirement came.
"""
path, line = (re.match(r'-r (.*) \(line (\d+)\)$',
req.comes_from).groups())
return path, int(line)
def hashes_above(path, line_number):
"""Yield hashes from contiguous comment lines before line ``line_number``.
"""
def hash_lists(path):
"""Yield lists of hashes appearing between non-comment lines.
The lists will be in order of appearance and, for each non-empty
list, their place in the results will coincide with that of the
line number of the corresponding result from `parse_requirements`
(which changed in pip 7.0 to not count comments).
"""
hashes = []
with open(path) as file:
for lineno, line in enumerate(file, 1):
match = HASH_COMMENT_RE.match(line)
if match: # Accumulate this hash.
hashes.append(match.groupdict()['hash'])
if not IGNORED_LINE_RE.match(line):
yield hashes # Report hashes seen so far.
hashes = []
elif PIP_COUNTS_COMMENTS:
# Comment: count as normal req but have no hashes.
yield []
return next(islice(hash_lists(path), line_number - 1, None))
def run_pip(initial_args):
"""Delegate to pip the given args (starting with the subcommand), and raise
``PipException`` if something goes wrong."""
status_code = pip.main(initial_args)
# Clear out the registrations in the pip "logger" singleton. Otherwise,
# loggers keep getting appended to it with every run. Pip assumes only one
# command invocation will happen per interpreter lifetime.
logger.consumers = []
if status_code:
raise PipException(status_code)
def is_git_sha(text):
"""Return whether this is probably a git sha"""
# Handle both the full sha as well as the 7-character abbreviation
if len(text) in (40, 7):
try:
int(text, 16)
return True
except ValueError:
pass
return False
def filename_from_url(url):
parsed = urlparse(url)
path = parsed.path
return path.split('/')[-1]
def requirement_args(argv, want_paths=False, want_other=False):
"""Return an iterable of filtered arguments.
:arg argv: Arguments, starting after the subcommand
:arg want_paths: If True, the returned iterable includes the paths to any
requirements files following a ``-r`` or ``--requirement`` option.
:arg want_other: If True, the returned iterable includes the args that are
not a requirement-file path or a ``-r`` or ``--requirement`` flag.
"""
was_r = False
for arg in argv:
# Allow for requirements files named "-r", don't freak out if there's a
# trailing "-r", etc.
if was_r:
if want_paths:
yield arg
was_r = False
elif arg in ['-r', '--requirement']:
was_r = True
else:
if want_other:
yield arg
# any line that is a comment or just whitespace
IGNORED_LINE_RE = re.compile(r'^(\s*#.*)?\s*$')
HASH_COMMENT_RE = re.compile(
r"""
\s*\#\s+ # Lines that start with a '#'
(?P<hash_type>sha256):\s+ # Hash type is hardcoded to be sha256 for now.
(?P<hash>[^\s]+) # Hashes can be anything except '#' or spaces.
\s* # Suck up whitespace before the comment or
# just trailing whitespace if there is no
# comment. Also strip trailing newlines.
(?:\#(?P<comment>.*))? # Comments can be anything after a whitespace+#
# and are optional.
$""", re.X)
def peep_hash(argv):
"""Return the peep hash of one or more files, returning a shell status code
or raising a PipException.
:arg argv: The commandline args, starting after the subcommand
"""
parser = OptionParser(
usage='usage: %prog hash file [file ...]',
description='Print a peep hash line for one or more files: for '
'example, "# sha256: '
'oz42dZy6Gowxw8AelDtO4gRgTW_xPdooH484k7I5EOY".')
_, paths = parser.parse_args(args=argv)
if paths:
for path in paths:
print('# sha256:', hash_of_file(path))
return ITS_FINE_ITS_FINE
else:
parser.print_usage()
return COMMAND_LINE_ERROR
class EmptyOptions(object):
"""Fake optparse options for compatibility with pip<1.2
pip<1.2 had a bug in parse_requirements() in which the ``options`` kwarg
was required. We work around that by passing it a mock object.
"""
default_vcs = None
skip_requirements_regex = None
isolated_mode = False
def memoize(func):
"""Memoize a method that should return the same result every time on a
given instance.
"""
@wraps(func)
def memoizer(self):
if not hasattr(self, '_cache'):
self._cache = {}
if func.__name__ not in self._cache:
self._cache[func.__name__] = func(self)
return self._cache[func.__name__]
return memoizer
def package_finder(argv):
"""Return a PackageFinder respecting command-line options.
:arg argv: Everything after the subcommand
"""
# We instantiate an InstallCommand and then use some of its private
# machinery--its arg parser--for our own purposes, like a virus. This
# approach is portable across many pip versions, where more fine-grained
# ones are not. Ignoring options that don't exist on the parser (for
# instance, --use-wheel) gives us a straightforward method of backward
# compatibility.
try:
command = InstallCommand()
except TypeError:
# This is likely pip 1.3.0's "__init__() takes exactly 2 arguments (1
# given)" error. In that version, InstallCommand takes a top=level
# parser passed in from outside.
from pip.baseparser import create_main_parser
command = InstallCommand(create_main_parser())
# The downside is that it essentially ruins the InstallCommand class for
# further use. Calling out to pip.main() within the same interpreter, for
# example, would result in arguments parsed this time turning up there.
# Thus, we deepcopy the arg parser so we don't trash its singletons. Of
# course, deepcopy doesn't work on these objects, because they contain
# uncopyable regex patterns, so we pickle and unpickle instead. Fun!
options, _ = loads(dumps(command.parser)).parse_args(argv)
# Carry over PackageFinder kwargs that have [about] the same names as
# options attr names:
possible_options = [
'find_links',
FORMAT_CONTROL_ARG,
('allow_all_prereleases', 'pre'),
'process_dependency_links'
]
kwargs = {}
for option in possible_options:
kw, attr = option if isinstance(option, tuple) else (option, option)
value = getattr(options, attr, MARKER)
if value is not MARKER:
kwargs[kw] = value
# Figure out index_urls:
index_urls = [options.index_url] + options.extra_index_urls
if options.no_index:
index_urls = []
index_urls += getattr(options, 'mirrors', [])
# If pip is new enough to have a PipSession, initialize one, since
# PackageFinder requires it:
if hasattr(command, '_build_session'):
kwargs['session'] = command._build_session(options)
return PackageFinder(index_urls=index_urls, **kwargs)
class DownloadedReq(object):
"""A wrapper around InstallRequirement which offers additional information
based on downloading and examining a corresponding package archive
These are conceptually immutable, so we can get away with memoizing
expensive things.
"""
def __init__(self, req, argv, finder):
"""Download a requirement, compare its hashes, and return a subclass
of DownloadedReq depending on its state.
:arg req: The InstallRequirement I am based on
:arg argv: The args, starting after the subcommand
"""
self._req = req
self._argv = argv
self._finder = finder
# We use a separate temp dir for each requirement so requirements
# (from different indices) that happen to have the same archive names
# don't overwrite each other, leading to a security hole in which the
# latter is a hash mismatch, the former has already passed the
# comparison, and the latter gets installed.
self._temp_path = mkdtemp(prefix='peep-')
# Think of DownloadedReq as a one-shot state machine. It's an abstract
# class that ratchets forward to being one of its own subclasses,
# depending on its package status. Then it doesn't move again.
self.__class__ = self._class()
def dispose(self):
"""Delete temp files and dirs I've made. Render myself useless.
Do not call further methods on me after calling dispose().
"""
rmtree(self._temp_path)
def _version(self):
"""Deduce the version number of the downloaded package from its filename."""
# TODO: Can we delete this method and just print the line from the
# reqs file verbatim instead?
def version_of_archive(filename, package_name):
# Since we know the project_name, we can strip that off the left, strip
# any archive extensions off the right, and take the rest as the
# version.
for ext in ARCHIVE_EXTENSIONS:
if filename.endswith(ext):
filename = filename[:-len(ext)]
break
# Handle github sha tarball downloads.
if is_git_sha(filename):
filename = package_name + '-' + filename
if not filename.lower().replace('_', '-').startswith(package_name.lower()):
# TODO: Should we replace runs of [^a-zA-Z0-9.], not just _, with -?
give_up(filename, package_name)
return filename[len(package_name) + 1:] # Strip off '-' before version.
def version_of_wheel(filename, package_name):
# For Wheel files (http://legacy.python.org/dev/peps/pep-0427/#file-
# name-convention) we know the format bits are '-' separated.
whl_package_name, version, _rest = filename.split('-', 2)
# Do the alteration to package_name from PEP 427:
our_package_name = re.sub(r'[^\w\d.]+', '_', package_name, re.UNICODE)
if whl_package_name != our_package_name:
give_up(filename, whl_package_name)
return version
def give_up(filename, package_name):
raise RuntimeError("The archive '%s' didn't start with the package name "
"'%s', so I couldn't figure out the version number. "
"My bad; improve me." %
(filename, package_name))
get_version = (version_of_wheel
if self._downloaded_filename().endswith('.whl')
else version_of_archive)
return get_version(self._downloaded_filename(), self._project_name())
def _is_always_unsatisfied(self):
"""Returns whether this requirement is always unsatisfied
This would happen in cases where we can't determine the version
from the filename.
"""
# If this is a github sha tarball, then it is always unsatisfied
# because the url has a commit sha in it and not the version
# number.
url = self._url()
if url:
filename = filename_from_url(url)
if filename.endswith(ARCHIVE_EXTENSIONS):
filename, ext = splitext(filename)
if is_git_sha(filename):
return True
return False
@memoize # Avoid hitting the file[cache] over and over.
def _expected_hashes(self):
"""Return a list of known-good hashes for this package."""
return hashes_above(*path_and_line(self._req))
def _download(self, link):
"""Download a file, and return its name within my temp dir.
This does no verification of HTTPS certs, but our checking hashes
makes that largely unimportant. It would be nice to be able to use the
requests lib, which can verify certs, but it is guaranteed to be
available only in pip >= 1.5.
This also drops support for proxies and basic auth, though those could
be added back in.
"""
# Based on pip 1.4.1's URLOpener but with cert verification removed
def opener(is_https):
if is_https:
opener = build_opener(HTTPSHandler())
# Strip out HTTPHandler to prevent MITM spoof:
for handler in opener.handlers:
if isinstance(handler, HTTPHandler):
opener.handlers.remove(handler)
else:
opener = build_opener()
return opener
# Descended from unpack_http_url() in pip 1.4.1
def best_filename(link, response):
"""Return the most informative possible filename for a download,
ideally with a proper extension.
"""
content_type = response.info().get('content-type', '')
filename = link.filename # fallback
# Have a look at the Content-Disposition header for a better guess:
content_disposition = response.info().get('content-disposition')
if content_disposition:
type, params = cgi.parse_header(content_disposition)
# We use ``or`` here because we don't want to use an "empty" value
# from the filename param:
filename = params.get('filename') or filename
ext = splitext(filename)[1]
if not ext:
ext = mimetypes.guess_extension(content_type)
if ext:
filename += ext
if not ext and link.url != response.geturl():
ext = splitext(response.geturl())[1]
if ext:
filename += ext
return filename
# Descended from _download_url() in pip 1.4.1
def pipe_to_file(response, path, size=0):
"""Pull the data off an HTTP response, shove it in a new file, and
show progress.
:arg response: A file-like object to read from
:arg path: The path of the new file
:arg size: The expected size, in bytes, of the download. 0 for
unknown or to suppress progress indication (as for cached
downloads)
"""
def response_chunks(chunk_size):
while True:
chunk = response.read(chunk_size)
if not chunk:
break
yield chunk
print('Downloading %s%s...' % (
self._req.req,
(' (%sK)' % (size / 1000)) if size > 1000 else ''))
progress_indicator = (DownloadProgressBar(max=size).iter if size
else DownloadProgressSpinner().iter)
with open(path, 'wb') as file:
for chunk in progress_indicator(response_chunks(4096), 4096):
file.write(chunk)
url = link.url.split('#', 1)[0]
try:
response = opener(urlparse(url).scheme != 'http').open(url)
except (HTTPError, IOError) as exc:
raise DownloadError(link, exc)
filename = best_filename(link, response)
try:
size = int(response.headers['content-length'])
except (ValueError, KeyError, TypeError):
size = 0
pipe_to_file(response, join(self._temp_path, filename), size=size)
return filename
# Based on req_set.prepare_files() in pip bb2a8428d4aebc8d313d05d590f386fa3f0bbd0f
@memoize # Avoid re-downloading.
def _downloaded_filename(self):
"""Download the package's archive if necessary, and return its
filename.
--no-deps is implied, as we have reimplemented the bits that would
ordinarily do dependency resolution.
"""
# Peep doesn't support requirements that don't come down as a single
# file, because it can't hash them. Thus, it doesn't support editable
# requirements, because pip itself doesn't support editable
# requirements except for "local projects or a VCS url". Nor does it
# support VCS requirements yet, because we haven't yet come up with a
# portable, deterministic way to hash them. In summary, all we support
# is == requirements and tarballs/zips/etc.
# TODO: Stop on reqs that are editable or aren't ==.
# If the requirement isn't already specified as a URL, get a URL
# from an index:
link = self._link() or self._finder.find_requirement(self._req, upgrade=False)
if link:
lower_scheme = link.scheme.lower() # pip lower()s it for some reason.
if lower_scheme == 'http' or lower_scheme == 'https':
file_path = self._download(link)
return basename(file_path)
elif lower_scheme == 'file':
# The following is inspired by pip's unpack_file_url():
link_path = url_to_path(link.url_without_fragment)
if isdir(link_path):
raise UnsupportedRequirementError(
"%s: %s is a directory. So that it can compute "
"a hash, peep supports only filesystem paths which "
"point to files" %
(self._req, link.url_without_fragment))
else:
copy(link_path, self._temp_path)
return basename(link_path)
else:
raise UnsupportedRequirementError(
"%s: The download link, %s, would not result in a file "
"that can be hashed. Peep supports only == requirements, "
"file:// URLs pointing to files (not folders), and "
"http:// and https:// URLs pointing to tarballs, zips, "
"etc." % (self._req, link.url))
else:
raise UnsupportedRequirementError(
"%s: couldn't determine where to download this requirement from."
% (self._req,))
def install(self):
"""Install the package I represent, without dependencies.
Obey typical pip-install options passed in on the command line.
"""
other_args = list(requirement_args(self._argv, want_other=True))
archive_path = join(self._temp_path, self._downloaded_filename())
# -U so it installs whether pip deems the requirement "satisfied" or
# not. This is necessary for GitHub-sourced zips, which change without
# their version numbers changing.
run_pip(['install'] + other_args + ['--no-deps', '-U', archive_path])
@memoize
def _actual_hash(self):
"""Download the package's archive if necessary, and return its hash."""
return hash_of_file(join(self._temp_path, self._downloaded_filename()))
def _project_name(self):
"""Return the inner Requirement's "unsafe name".
Raise ValueError if there is no name.
"""
name = getattr(self._req.req, 'project_name', '')
if name:
return name
name = getattr(self._req.req, 'name', '')
if name:
return safe_name(name)
raise ValueError('Requirement has no project_name.')
def _name(self):
return self._req.name
def _link(self):
try:
return self._req.link
except AttributeError:
# The link attribute isn't available prior to pip 6.1.0, so fall
# back to the now deprecated 'url' attribute.
return Link(self._req.url) if self._req.url else None
def _url(self):
link = self._link()
return link.url if link else None
@memoize # Avoid re-running expensive check_if_exists().
def _is_satisfied(self):
self._req.check_if_exists()
return (self._req.satisfied_by and
not self._is_always_unsatisfied())
def _class(self):
"""Return the class I should be, spanning a continuum of goodness."""
try:
self._project_name()
except ValueError:
return MalformedReq
if self._is_satisfied():
return SatisfiedReq
if not self._expected_hashes():
return MissingReq
if self._actual_hash() not in self._expected_hashes():
return MismatchedReq
return InstallableReq
@classmethod
def foot(cls):
"""Return the text to be printed once, after all of the errors from
classes of my type are printed.
"""
return ''
class MalformedReq(DownloadedReq):
"""A requirement whose package name could not be determined"""
@classmethod
def head(cls):
return 'The following requirements could not be processed:\n'
def error(self):
return '* Unable to determine package name from URL %s; add #egg=' % self._url()
class MissingReq(DownloadedReq):
"""A requirement for which no hashes were specified in the requirements file"""
@classmethod
def head(cls):
return ('The following packages had no hashes specified in the requirements file, which\n'
'leaves them open to tampering. Vet these packages to your satisfaction, then\n'
'add these "sha256" lines like so:\n\n')
def error(self):
if self._url():
# _url() always contains an #egg= part, or this would be a
# MalformedRequest.
line = self._url()
else:
line = '%s==%s' % (self._name(), self._version())
return '# sha256: %s\n%s\n' % (self._actual_hash(), line)
class MismatchedReq(DownloadedReq):
"""A requirement for which the downloaded file didn't match any of my hashes."""
@classmethod
def head(cls):
return ("THE FOLLOWING PACKAGES DIDN'T MATCH THE HASHES SPECIFIED IN THE REQUIREMENTS\n"
"FILE. If you have updated the package versions, update the hashes. If not,\n"
"freak out, because someone has tampered with the packages.\n\n")
def error(self):
preamble = ' %s: expected' % self._project_name()
if len(self._expected_hashes()) > 1:
preamble += ' one of'
padding = '\n' + ' ' * (len(preamble) + 1)
return '%s %s\n%s got %s' % (preamble,
padding.join(self._expected_hashes()),
' ' * (len(preamble) - 4),
self._actual_hash())
@classmethod
def foot(cls):
return '\n'
class SatisfiedReq(DownloadedReq):
"""A requirement which turned out to be already installed"""
@classmethod
def head(cls):
return ("These packages were already installed, so we didn't need to download or build\n"
"them again. If you installed them with peep in the first place, you should be\n"
"safe. If not, uninstall them, then re-attempt your install with peep.\n")
def error(self):
return ' %s' % (self._req,)
class InstallableReq(DownloadedReq):
"""A requirement whose hash matched and can be safely installed"""
# DownloadedReq subclasses that indicate an error that should keep us from
# going forward with installation, in the order in which their errors should
# be reported:
ERROR_CLASSES = [MismatchedReq, MissingReq, MalformedReq]
def bucket(things, key):
"""Return a map of key -> list of things."""
ret = defaultdict(list)
for thing in things:
ret[key(thing)].append(thing)
return ret
def first_every_last(iterable, first, every, last):
"""Execute something before the first item of iter, something else for each
item, and a third thing after the last.
If there are no items in the iterable, don't execute anything.
"""
did_first = False
for item in iterable:
if not did_first:
did_first = True
first(item)
every(item)
if did_first:
last(item)
def _parse_requirements(path, finder):
try:
# list() so the generator that is parse_requirements() actually runs
# far enough to report a TypeError
return list(parse_requirements(
path, options=EmptyOptions(), finder=finder))
except TypeError:
# session is a required kwarg as of pip 6.0 and will raise
# a TypeError if missing. It needs to be a PipSession instance,
# but in older versions we can't import it from pip.download
# (nor do we need it at all) so we only import it in this except block
from pip.download import PipSession
return list(parse_requirements(
path, options=EmptyOptions(), session=PipSession(), finder=finder))
def downloaded_reqs_from_path(path, argv):
"""Return a list of DownloadedReqs representing the requirements parsed
out of a given requirements file.
:arg path: The path to the requirements file
:arg argv: The commandline args, starting after the subcommand
"""
finder = package_finder(argv)
return [DownloadedReq(req, argv, finder) for req in
_parse_requirements(path, finder)]
def peep_install(argv):
"""Perform the ``peep install`` subcommand, returning a shell status code
or raising a PipException.
:arg argv: The commandline args, starting after the subcommand
"""
output = []
out = output.append
reqs = []
try:
req_paths = list(requirement_args(argv, want_paths=True))
if not req_paths:
out("You have to specify one or more requirements files with the -r option, because\n"
"otherwise there's nowhere for peep to look up the hashes.\n")
return COMMAND_LINE_ERROR
# We're a "peep install" command, and we have some requirement paths.
reqs = list(chain.from_iterable(
downloaded_reqs_from_path(path, argv)
for path in req_paths))
buckets = bucket(reqs, lambda r: r.__class__)
# Skip a line after pip's "Cleaning up..." so the important stuff
# stands out:
if any(buckets[b] for b in ERROR_CLASSES):
out('\n')
printers = (lambda r: out(r.head()),
lambda r: out(r.error() + '\n'),
lambda r: out(r.foot()))
for c in ERROR_CLASSES:
first_every_last(buckets[c], *printers)
if any(buckets[b] for b in ERROR_CLASSES):
out('-------------------------------\n'
'Not proceeding to installation.\n')
return SOMETHING_WENT_WRONG
else:
for req in buckets[InstallableReq]:
req.install()
first_every_last(buckets[SatisfiedReq], *printers)
return ITS_FINE_ITS_FINE
except (UnsupportedRequirementError, InstallationError, DownloadError) as exc:
out(str(exc))
return SOMETHING_WENT_WRONG
finally:
for req in reqs:
req.dispose()
print(''.join(output))
def peep_port(paths):
"""Convert a peep requirements file to one compatble with pip-8 hashing.
Loses comments and tromps on URLs, so the result will need a little manual
massaging, but the hard part--the hash conversion--is done for you.
"""
if not paths:
print('Please specify one or more requirements files so I have '
'something to port.\n')
return COMMAND_LINE_ERROR
comes_from = None
for req in chain.from_iterable(
_parse_requirements(path, package_finder(argv)) for path in paths):
req_path, req_line = path_and_line(req)
hashes = [hexlify(urlsafe_b64decode((hash + '=').encode('ascii'))).decode('ascii')
for hash in hashes_above(req_path, req_line)]
if req_path != comes_from:
print()
print('# from %s' % req_path)
print()
comes_from = req_path
if not hashes:
print(req.req)
else:
print('%s' % (req.link if getattr(req, 'link', None) else req.req), end='')
for hash in hashes:
print(' \\')
print(' --hash=sha256:%s' % hash, end='')
print()
def main():
"""Be the top-level entrypoint. Return a shell status code."""
commands = {'hash': peep_hash,
'install': peep_install,
'port': peep_port}
try:
if len(argv) >= 2 and argv[1] in commands:
return commands[argv[1]](argv[2:])
else:
# Fall through to top-level pip main() for everything else:
return pip.main()
except PipException as exc:
return exc.error_code
def exception_handler(exc_type, exc_value, exc_tb):
print('Oh no! Peep had a problem while trying to do stuff. Please write up a bug report')
print('with the specifics so we can fix it:')
print()
print('https://github.com/erikrose/peep/issues/new')
print()
print('Here are some particulars you can copy and paste into the bug report:')
print()
print('---')
print('peep:', repr(__version__))
print('python:', repr(sys.version))
print('pip:', repr(getattr(pip, '__version__', 'no __version__ attr')))
print('Command line: ', repr(sys.argv))
print(
''.join(traceback.format_exception(exc_type, exc_value, exc_tb)))
print('---')
if __name__ == '__main__':
try:
exit(main())
except Exception:
exception_handler(*sys.exc_info())
exit(UNHANDLED_EXCEPTION)
|
erikrose/peep | peep.py | requirement_args | python | def requirement_args(argv, want_paths=False, want_other=False):
was_r = False
for arg in argv:
# Allow for requirements files named "-r", don't freak out if there's a
# trailing "-r", etc.
if was_r:
if want_paths:
yield arg
was_r = False
elif arg in ['-r', '--requirement']:
was_r = True
else:
if want_other:
yield arg | Return an iterable of filtered arguments.
:arg argv: Arguments, starting after the subcommand
:arg want_paths: If True, the returned iterable includes the paths to any
requirements files following a ``-r`` or ``--requirement`` option.
:arg want_other: If True, the returned iterable includes the args that are
not a requirement-file path or a ``-r`` or ``--requirement`` flag. | train | https://github.com/erikrose/peep/blob/c16f08c7f61e2f2afecb7cd1c93752bdd96c4968/peep.py#L247-L269 | null | #!/usr/bin/env python
"""peep ("prudently examine every package") verifies that packages conform to a
trusted, locally stored hash and only then installs them::
peep install -r requirements.txt
This makes your deployments verifiably repeatable without having to maintain a
local PyPI mirror or use a vendor lib. Just update the version numbers and
hashes in requirements.txt, and you're all set.
"""
# This is here so embedded copies of peep.py are MIT-compliant:
# Copyright (c) 2013 Erik Rose
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
from __future__ import print_function
try:
xrange = xrange
except NameError:
xrange = range
from base64 import urlsafe_b64encode, urlsafe_b64decode
from binascii import hexlify
import cgi
from collections import defaultdict
from functools import wraps
from hashlib import sha256
from itertools import chain, islice
import mimetypes
from optparse import OptionParser
from os.path import join, basename, splitext, isdir
from pickle import dumps, loads
import re
import sys
from shutil import rmtree, copy
from sys import argv, exit
from tempfile import mkdtemp
import traceback
try:
from urllib2 import build_opener, HTTPHandler, HTTPSHandler, HTTPError
except ImportError:
from urllib.request import build_opener, HTTPHandler, HTTPSHandler
from urllib.error import HTTPError
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse # 3.4
# TODO: Probably use six to make urllib stuff work across 2/3.
from pkg_resources import require, VersionConflict, DistributionNotFound, safe_name
# We don't admit our dependency on pip in setup.py, lest a naive user simply
# say `pip install peep.tar.gz` and thus pull down an untrusted copy of pip
# from PyPI. Instead, we make sure it's installed and new enough here and spit
# out an error message if not:
def activate(specifier):
"""Make a compatible version of pip importable. Raise a RuntimeError if we
couldn't."""
try:
for distro in require(specifier):
distro.activate()
except (VersionConflict, DistributionNotFound):
raise RuntimeError('The installed version of pip is too old; peep '
'requires ' + specifier)
# Before 0.6.2, the log module wasn't there, so some
# of our monkeypatching fails. It probably wouldn't be
# much work to support even earlier, though.
activate('pip>=0.6.2')
import pip
from pip.commands.install import InstallCommand
try:
from pip.download import url_to_path # 1.5.6
except ImportError:
try:
from pip.util import url_to_path # 0.7.0
except ImportError:
from pip.util import url_to_filename as url_to_path # 0.6.2
from pip.exceptions import InstallationError
from pip.index import PackageFinder, Link
try:
from pip.log import logger
except ImportError:
from pip import logger # 6.0
from pip.req import parse_requirements
try:
from pip.utils.ui import DownloadProgressBar, DownloadProgressSpinner
except ImportError:
class NullProgressBar(object):
def __init__(self, *args, **kwargs):
pass
def iter(self, ret, *args, **kwargs):
return ret
DownloadProgressBar = DownloadProgressSpinner = NullProgressBar
__version__ = 3, 1, 2
try:
from pip.index import FormatControl # noqa
FORMAT_CONTROL_ARG = 'format_control'
# The line-numbering bug will be fixed in pip 8. All 7.x releases had it.
PIP_MAJOR_VERSION = int(pip.__version__.split('.')[0])
PIP_COUNTS_COMMENTS = PIP_MAJOR_VERSION >= 8
except ImportError:
FORMAT_CONTROL_ARG = 'use_wheel' # pre-7
PIP_COUNTS_COMMENTS = True
ITS_FINE_ITS_FINE = 0
SOMETHING_WENT_WRONG = 1
# "Traditional" for command-line errors according to optparse docs:
COMMAND_LINE_ERROR = 2
UNHANDLED_EXCEPTION = 3
ARCHIVE_EXTENSIONS = ('.tar.bz2', '.tar.gz', '.tgz', '.tar', '.zip')
MARKER = object()
class PipException(Exception):
"""When I delegated to pip, it exited with an error."""
def __init__(self, error_code):
self.error_code = error_code
class UnsupportedRequirementError(Exception):
"""An unsupported line was encountered in a requirements file."""
class DownloadError(Exception):
def __init__(self, link, exc):
self.link = link
self.reason = str(exc)
def __str__(self):
return 'Downloading %s failed: %s' % (self.link, self.reason)
def encoded_hash(sha):
"""Return a short, 7-bit-safe representation of a hash.
If you pass a sha256, this results in the hash algorithm that the Wheel
format (PEP 427) uses, except here it's intended to be run across the
downloaded archive before unpacking.
"""
return urlsafe_b64encode(sha.digest()).decode('ascii').rstrip('=')
def path_and_line(req):
"""Return the path and line number of the file from which an
InstallRequirement came.
"""
path, line = (re.match(r'-r (.*) \(line (\d+)\)$',
req.comes_from).groups())
return path, int(line)
def hashes_above(path, line_number):
"""Yield hashes from contiguous comment lines before line ``line_number``.
"""
def hash_lists(path):
"""Yield lists of hashes appearing between non-comment lines.
The lists will be in order of appearance and, for each non-empty
list, their place in the results will coincide with that of the
line number of the corresponding result from `parse_requirements`
(which changed in pip 7.0 to not count comments).
"""
hashes = []
with open(path) as file:
for lineno, line in enumerate(file, 1):
match = HASH_COMMENT_RE.match(line)
if match: # Accumulate this hash.
hashes.append(match.groupdict()['hash'])
if not IGNORED_LINE_RE.match(line):
yield hashes # Report hashes seen so far.
hashes = []
elif PIP_COUNTS_COMMENTS:
# Comment: count as normal req but have no hashes.
yield []
return next(islice(hash_lists(path), line_number - 1, None))
def run_pip(initial_args):
"""Delegate to pip the given args (starting with the subcommand), and raise
``PipException`` if something goes wrong."""
status_code = pip.main(initial_args)
# Clear out the registrations in the pip "logger" singleton. Otherwise,
# loggers keep getting appended to it with every run. Pip assumes only one
# command invocation will happen per interpreter lifetime.
logger.consumers = []
if status_code:
raise PipException(status_code)
def hash_of_file(path):
"""Return the hash of a downloaded file."""
with open(path, 'rb') as archive:
sha = sha256()
while True:
data = archive.read(2 ** 20)
if not data:
break
sha.update(data)
return encoded_hash(sha)
def is_git_sha(text):
"""Return whether this is probably a git sha"""
# Handle both the full sha as well as the 7-character abbreviation
if len(text) in (40, 7):
try:
int(text, 16)
return True
except ValueError:
pass
return False
def filename_from_url(url):
parsed = urlparse(url)
path = parsed.path
return path.split('/')[-1]
# any line that is a comment or just whitespace
IGNORED_LINE_RE = re.compile(r'^(\s*#.*)?\s*$')
HASH_COMMENT_RE = re.compile(
r"""
\s*\#\s+ # Lines that start with a '#'
(?P<hash_type>sha256):\s+ # Hash type is hardcoded to be sha256 for now.
(?P<hash>[^\s]+) # Hashes can be anything except '#' or spaces.
\s* # Suck up whitespace before the comment or
# just trailing whitespace if there is no
# comment. Also strip trailing newlines.
(?:\#(?P<comment>.*))? # Comments can be anything after a whitespace+#
# and are optional.
$""", re.X)
def peep_hash(argv):
"""Return the peep hash of one or more files, returning a shell status code
or raising a PipException.
:arg argv: The commandline args, starting after the subcommand
"""
parser = OptionParser(
usage='usage: %prog hash file [file ...]',
description='Print a peep hash line for one or more files: for '
'example, "# sha256: '
'oz42dZy6Gowxw8AelDtO4gRgTW_xPdooH484k7I5EOY".')
_, paths = parser.parse_args(args=argv)
if paths:
for path in paths:
print('# sha256:', hash_of_file(path))
return ITS_FINE_ITS_FINE
else:
parser.print_usage()
return COMMAND_LINE_ERROR
class EmptyOptions(object):
"""Fake optparse options for compatibility with pip<1.2
pip<1.2 had a bug in parse_requirements() in which the ``options`` kwarg
was required. We work around that by passing it a mock object.
"""
default_vcs = None
skip_requirements_regex = None
isolated_mode = False
def memoize(func):
"""Memoize a method that should return the same result every time on a
given instance.
"""
@wraps(func)
def memoizer(self):
if not hasattr(self, '_cache'):
self._cache = {}
if func.__name__ not in self._cache:
self._cache[func.__name__] = func(self)
return self._cache[func.__name__]
return memoizer
def package_finder(argv):
"""Return a PackageFinder respecting command-line options.
:arg argv: Everything after the subcommand
"""
# We instantiate an InstallCommand and then use some of its private
# machinery--its arg parser--for our own purposes, like a virus. This
# approach is portable across many pip versions, where more fine-grained
# ones are not. Ignoring options that don't exist on the parser (for
# instance, --use-wheel) gives us a straightforward method of backward
# compatibility.
try:
command = InstallCommand()
except TypeError:
# This is likely pip 1.3.0's "__init__() takes exactly 2 arguments (1
# given)" error. In that version, InstallCommand takes a top=level
# parser passed in from outside.
from pip.baseparser import create_main_parser
command = InstallCommand(create_main_parser())
# The downside is that it essentially ruins the InstallCommand class for
# further use. Calling out to pip.main() within the same interpreter, for
# example, would result in arguments parsed this time turning up there.
# Thus, we deepcopy the arg parser so we don't trash its singletons. Of
# course, deepcopy doesn't work on these objects, because they contain
# uncopyable regex patterns, so we pickle and unpickle instead. Fun!
options, _ = loads(dumps(command.parser)).parse_args(argv)
# Carry over PackageFinder kwargs that have [about] the same names as
# options attr names:
possible_options = [
'find_links',
FORMAT_CONTROL_ARG,
('allow_all_prereleases', 'pre'),
'process_dependency_links'
]
kwargs = {}
for option in possible_options:
kw, attr = option if isinstance(option, tuple) else (option, option)
value = getattr(options, attr, MARKER)
if value is not MARKER:
kwargs[kw] = value
# Figure out index_urls:
index_urls = [options.index_url] + options.extra_index_urls
if options.no_index:
index_urls = []
index_urls += getattr(options, 'mirrors', [])
# If pip is new enough to have a PipSession, initialize one, since
# PackageFinder requires it:
if hasattr(command, '_build_session'):
kwargs['session'] = command._build_session(options)
return PackageFinder(index_urls=index_urls, **kwargs)
class DownloadedReq(object):
"""A wrapper around InstallRequirement which offers additional information
based on downloading and examining a corresponding package archive
These are conceptually immutable, so we can get away with memoizing
expensive things.
"""
def __init__(self, req, argv, finder):
"""Download a requirement, compare its hashes, and return a subclass
of DownloadedReq depending on its state.
:arg req: The InstallRequirement I am based on
:arg argv: The args, starting after the subcommand
"""
self._req = req
self._argv = argv
self._finder = finder
# We use a separate temp dir for each requirement so requirements
# (from different indices) that happen to have the same archive names
# don't overwrite each other, leading to a security hole in which the
# latter is a hash mismatch, the former has already passed the
# comparison, and the latter gets installed.
self._temp_path = mkdtemp(prefix='peep-')
# Think of DownloadedReq as a one-shot state machine. It's an abstract
# class that ratchets forward to being one of its own subclasses,
# depending on its package status. Then it doesn't move again.
self.__class__ = self._class()
def dispose(self):
"""Delete temp files and dirs I've made. Render myself useless.
Do not call further methods on me after calling dispose().
"""
rmtree(self._temp_path)
def _version(self):
"""Deduce the version number of the downloaded package from its filename."""
# TODO: Can we delete this method and just print the line from the
# reqs file verbatim instead?
def version_of_archive(filename, package_name):
# Since we know the project_name, we can strip that off the left, strip
# any archive extensions off the right, and take the rest as the
# version.
for ext in ARCHIVE_EXTENSIONS:
if filename.endswith(ext):
filename = filename[:-len(ext)]
break
# Handle github sha tarball downloads.
if is_git_sha(filename):
filename = package_name + '-' + filename
if not filename.lower().replace('_', '-').startswith(package_name.lower()):
# TODO: Should we replace runs of [^a-zA-Z0-9.], not just _, with -?
give_up(filename, package_name)
return filename[len(package_name) + 1:] # Strip off '-' before version.
def version_of_wheel(filename, package_name):
# For Wheel files (http://legacy.python.org/dev/peps/pep-0427/#file-
# name-convention) we know the format bits are '-' separated.
whl_package_name, version, _rest = filename.split('-', 2)
# Do the alteration to package_name from PEP 427:
our_package_name = re.sub(r'[^\w\d.]+', '_', package_name, re.UNICODE)
if whl_package_name != our_package_name:
give_up(filename, whl_package_name)
return version
def give_up(filename, package_name):
raise RuntimeError("The archive '%s' didn't start with the package name "
"'%s', so I couldn't figure out the version number. "
"My bad; improve me." %
(filename, package_name))
get_version = (version_of_wheel
if self._downloaded_filename().endswith('.whl')
else version_of_archive)
return get_version(self._downloaded_filename(), self._project_name())
def _is_always_unsatisfied(self):
"""Returns whether this requirement is always unsatisfied
This would happen in cases where we can't determine the version
from the filename.
"""
# If this is a github sha tarball, then it is always unsatisfied
# because the url has a commit sha in it and not the version
# number.
url = self._url()
if url:
filename = filename_from_url(url)
if filename.endswith(ARCHIVE_EXTENSIONS):
filename, ext = splitext(filename)
if is_git_sha(filename):
return True
return False
@memoize # Avoid hitting the file[cache] over and over.
def _expected_hashes(self):
"""Return a list of known-good hashes for this package."""
return hashes_above(*path_and_line(self._req))
def _download(self, link):
"""Download a file, and return its name within my temp dir.
This does no verification of HTTPS certs, but our checking hashes
makes that largely unimportant. It would be nice to be able to use the
requests lib, which can verify certs, but it is guaranteed to be
available only in pip >= 1.5.
This also drops support for proxies and basic auth, though those could
be added back in.
"""
# Based on pip 1.4.1's URLOpener but with cert verification removed
def opener(is_https):
if is_https:
opener = build_opener(HTTPSHandler())
# Strip out HTTPHandler to prevent MITM spoof:
for handler in opener.handlers:
if isinstance(handler, HTTPHandler):
opener.handlers.remove(handler)
else:
opener = build_opener()
return opener
# Descended from unpack_http_url() in pip 1.4.1
def best_filename(link, response):
"""Return the most informative possible filename for a download,
ideally with a proper extension.
"""
content_type = response.info().get('content-type', '')
filename = link.filename # fallback
# Have a look at the Content-Disposition header for a better guess:
content_disposition = response.info().get('content-disposition')
if content_disposition:
type, params = cgi.parse_header(content_disposition)
# We use ``or`` here because we don't want to use an "empty" value
# from the filename param:
filename = params.get('filename') or filename
ext = splitext(filename)[1]
if not ext:
ext = mimetypes.guess_extension(content_type)
if ext:
filename += ext
if not ext and link.url != response.geturl():
ext = splitext(response.geturl())[1]
if ext:
filename += ext
return filename
# Descended from _download_url() in pip 1.4.1
def pipe_to_file(response, path, size=0):
"""Pull the data off an HTTP response, shove it in a new file, and
show progress.
:arg response: A file-like object to read from
:arg path: The path of the new file
:arg size: The expected size, in bytes, of the download. 0 for
unknown or to suppress progress indication (as for cached
downloads)
"""
def response_chunks(chunk_size):
while True:
chunk = response.read(chunk_size)
if not chunk:
break
yield chunk
print('Downloading %s%s...' % (
self._req.req,
(' (%sK)' % (size / 1000)) if size > 1000 else ''))
progress_indicator = (DownloadProgressBar(max=size).iter if size
else DownloadProgressSpinner().iter)
with open(path, 'wb') as file:
for chunk in progress_indicator(response_chunks(4096), 4096):
file.write(chunk)
url = link.url.split('#', 1)[0]
try:
response = opener(urlparse(url).scheme != 'http').open(url)
except (HTTPError, IOError) as exc:
raise DownloadError(link, exc)
filename = best_filename(link, response)
try:
size = int(response.headers['content-length'])
except (ValueError, KeyError, TypeError):
size = 0
pipe_to_file(response, join(self._temp_path, filename), size=size)
return filename
# Based on req_set.prepare_files() in pip bb2a8428d4aebc8d313d05d590f386fa3f0bbd0f
@memoize # Avoid re-downloading.
def _downloaded_filename(self):
"""Download the package's archive if necessary, and return its
filename.
--no-deps is implied, as we have reimplemented the bits that would
ordinarily do dependency resolution.
"""
# Peep doesn't support requirements that don't come down as a single
# file, because it can't hash them. Thus, it doesn't support editable
# requirements, because pip itself doesn't support editable
# requirements except for "local projects or a VCS url". Nor does it
# support VCS requirements yet, because we haven't yet come up with a
# portable, deterministic way to hash them. In summary, all we support
# is == requirements and tarballs/zips/etc.
# TODO: Stop on reqs that are editable or aren't ==.
# If the requirement isn't already specified as a URL, get a URL
# from an index:
link = self._link() or self._finder.find_requirement(self._req, upgrade=False)
if link:
lower_scheme = link.scheme.lower() # pip lower()s it for some reason.
if lower_scheme == 'http' or lower_scheme == 'https':
file_path = self._download(link)
return basename(file_path)
elif lower_scheme == 'file':
# The following is inspired by pip's unpack_file_url():
link_path = url_to_path(link.url_without_fragment)
if isdir(link_path):
raise UnsupportedRequirementError(
"%s: %s is a directory. So that it can compute "
"a hash, peep supports only filesystem paths which "
"point to files" %
(self._req, link.url_without_fragment))
else:
copy(link_path, self._temp_path)
return basename(link_path)
else:
raise UnsupportedRequirementError(
"%s: The download link, %s, would not result in a file "
"that can be hashed. Peep supports only == requirements, "
"file:// URLs pointing to files (not folders), and "
"http:// and https:// URLs pointing to tarballs, zips, "
"etc." % (self._req, link.url))
else:
raise UnsupportedRequirementError(
"%s: couldn't determine where to download this requirement from."
% (self._req,))
def install(self):
"""Install the package I represent, without dependencies.
Obey typical pip-install options passed in on the command line.
"""
other_args = list(requirement_args(self._argv, want_other=True))
archive_path = join(self._temp_path, self._downloaded_filename())
# -U so it installs whether pip deems the requirement "satisfied" or
# not. This is necessary for GitHub-sourced zips, which change without
# their version numbers changing.
run_pip(['install'] + other_args + ['--no-deps', '-U', archive_path])
@memoize
def _actual_hash(self):
"""Download the package's archive if necessary, and return its hash."""
return hash_of_file(join(self._temp_path, self._downloaded_filename()))
def _project_name(self):
"""Return the inner Requirement's "unsafe name".
Raise ValueError if there is no name.
"""
name = getattr(self._req.req, 'project_name', '')
if name:
return name
name = getattr(self._req.req, 'name', '')
if name:
return safe_name(name)
raise ValueError('Requirement has no project_name.')
def _name(self):
return self._req.name
def _link(self):
try:
return self._req.link
except AttributeError:
# The link attribute isn't available prior to pip 6.1.0, so fall
# back to the now deprecated 'url' attribute.
return Link(self._req.url) if self._req.url else None
def _url(self):
link = self._link()
return link.url if link else None
@memoize # Avoid re-running expensive check_if_exists().
def _is_satisfied(self):
self._req.check_if_exists()
return (self._req.satisfied_by and
not self._is_always_unsatisfied())
def _class(self):
"""Return the class I should be, spanning a continuum of goodness."""
try:
self._project_name()
except ValueError:
return MalformedReq
if self._is_satisfied():
return SatisfiedReq
if not self._expected_hashes():
return MissingReq
if self._actual_hash() not in self._expected_hashes():
return MismatchedReq
return InstallableReq
@classmethod
def foot(cls):
"""Return the text to be printed once, after all of the errors from
classes of my type are printed.
"""
return ''
class MalformedReq(DownloadedReq):
"""A requirement whose package name could not be determined"""
@classmethod
def head(cls):
return 'The following requirements could not be processed:\n'
def error(self):
return '* Unable to determine package name from URL %s; add #egg=' % self._url()
class MissingReq(DownloadedReq):
"""A requirement for which no hashes were specified in the requirements file"""
@classmethod
def head(cls):
return ('The following packages had no hashes specified in the requirements file, which\n'
'leaves them open to tampering. Vet these packages to your satisfaction, then\n'
'add these "sha256" lines like so:\n\n')
def error(self):
if self._url():
# _url() always contains an #egg= part, or this would be a
# MalformedRequest.
line = self._url()
else:
line = '%s==%s' % (self._name(), self._version())
return '# sha256: %s\n%s\n' % (self._actual_hash(), line)
class MismatchedReq(DownloadedReq):
"""A requirement for which the downloaded file didn't match any of my hashes."""
@classmethod
def head(cls):
return ("THE FOLLOWING PACKAGES DIDN'T MATCH THE HASHES SPECIFIED IN THE REQUIREMENTS\n"
"FILE. If you have updated the package versions, update the hashes. If not,\n"
"freak out, because someone has tampered with the packages.\n\n")
def error(self):
preamble = ' %s: expected' % self._project_name()
if len(self._expected_hashes()) > 1:
preamble += ' one of'
padding = '\n' + ' ' * (len(preamble) + 1)
return '%s %s\n%s got %s' % (preamble,
padding.join(self._expected_hashes()),
' ' * (len(preamble) - 4),
self._actual_hash())
@classmethod
def foot(cls):
return '\n'
class SatisfiedReq(DownloadedReq):
"""A requirement which turned out to be already installed"""
@classmethod
def head(cls):
return ("These packages were already installed, so we didn't need to download or build\n"
"them again. If you installed them with peep in the first place, you should be\n"
"safe. If not, uninstall them, then re-attempt your install with peep.\n")
def error(self):
return ' %s' % (self._req,)
class InstallableReq(DownloadedReq):
"""A requirement whose hash matched and can be safely installed"""
# DownloadedReq subclasses that indicate an error that should keep us from
# going forward with installation, in the order in which their errors should
# be reported:
ERROR_CLASSES = [MismatchedReq, MissingReq, MalformedReq]
def bucket(things, key):
"""Return a map of key -> list of things."""
ret = defaultdict(list)
for thing in things:
ret[key(thing)].append(thing)
return ret
def first_every_last(iterable, first, every, last):
"""Execute something before the first item of iter, something else for each
item, and a third thing after the last.
If there are no items in the iterable, don't execute anything.
"""
did_first = False
for item in iterable:
if not did_first:
did_first = True
first(item)
every(item)
if did_first:
last(item)
def _parse_requirements(path, finder):
try:
# list() so the generator that is parse_requirements() actually runs
# far enough to report a TypeError
return list(parse_requirements(
path, options=EmptyOptions(), finder=finder))
except TypeError:
# session is a required kwarg as of pip 6.0 and will raise
# a TypeError if missing. It needs to be a PipSession instance,
# but in older versions we can't import it from pip.download
# (nor do we need it at all) so we only import it in this except block
from pip.download import PipSession
return list(parse_requirements(
path, options=EmptyOptions(), session=PipSession(), finder=finder))
def downloaded_reqs_from_path(path, argv):
"""Return a list of DownloadedReqs representing the requirements parsed
out of a given requirements file.
:arg path: The path to the requirements file
:arg argv: The commandline args, starting after the subcommand
"""
finder = package_finder(argv)
return [DownloadedReq(req, argv, finder) for req in
_parse_requirements(path, finder)]
def peep_install(argv):
"""Perform the ``peep install`` subcommand, returning a shell status code
or raising a PipException.
:arg argv: The commandline args, starting after the subcommand
"""
output = []
out = output.append
reqs = []
try:
req_paths = list(requirement_args(argv, want_paths=True))
if not req_paths:
out("You have to specify one or more requirements files with the -r option, because\n"
"otherwise there's nowhere for peep to look up the hashes.\n")
return COMMAND_LINE_ERROR
# We're a "peep install" command, and we have some requirement paths.
reqs = list(chain.from_iterable(
downloaded_reqs_from_path(path, argv)
for path in req_paths))
buckets = bucket(reqs, lambda r: r.__class__)
# Skip a line after pip's "Cleaning up..." so the important stuff
# stands out:
if any(buckets[b] for b in ERROR_CLASSES):
out('\n')
printers = (lambda r: out(r.head()),
lambda r: out(r.error() + '\n'),
lambda r: out(r.foot()))
for c in ERROR_CLASSES:
first_every_last(buckets[c], *printers)
if any(buckets[b] for b in ERROR_CLASSES):
out('-------------------------------\n'
'Not proceeding to installation.\n')
return SOMETHING_WENT_WRONG
else:
for req in buckets[InstallableReq]:
req.install()
first_every_last(buckets[SatisfiedReq], *printers)
return ITS_FINE_ITS_FINE
except (UnsupportedRequirementError, InstallationError, DownloadError) as exc:
out(str(exc))
return SOMETHING_WENT_WRONG
finally:
for req in reqs:
req.dispose()
print(''.join(output))
def peep_port(paths):
"""Convert a peep requirements file to one compatble with pip-8 hashing.
Loses comments and tromps on URLs, so the result will need a little manual
massaging, but the hard part--the hash conversion--is done for you.
"""
if not paths:
print('Please specify one or more requirements files so I have '
'something to port.\n')
return COMMAND_LINE_ERROR
comes_from = None
for req in chain.from_iterable(
_parse_requirements(path, package_finder(argv)) for path in paths):
req_path, req_line = path_and_line(req)
hashes = [hexlify(urlsafe_b64decode((hash + '=').encode('ascii'))).decode('ascii')
for hash in hashes_above(req_path, req_line)]
if req_path != comes_from:
print()
print('# from %s' % req_path)
print()
comes_from = req_path
if not hashes:
print(req.req)
else:
print('%s' % (req.link if getattr(req, 'link', None) else req.req), end='')
for hash in hashes:
print(' \\')
print(' --hash=sha256:%s' % hash, end='')
print()
def main():
"""Be the top-level entrypoint. Return a shell status code."""
commands = {'hash': peep_hash,
'install': peep_install,
'port': peep_port}
try:
if len(argv) >= 2 and argv[1] in commands:
return commands[argv[1]](argv[2:])
else:
# Fall through to top-level pip main() for everything else:
return pip.main()
except PipException as exc:
return exc.error_code
def exception_handler(exc_type, exc_value, exc_tb):
print('Oh no! Peep had a problem while trying to do stuff. Please write up a bug report')
print('with the specifics so we can fix it:')
print()
print('https://github.com/erikrose/peep/issues/new')
print()
print('Here are some particulars you can copy and paste into the bug report:')
print()
print('---')
print('peep:', repr(__version__))
print('python:', repr(sys.version))
print('pip:', repr(getattr(pip, '__version__', 'no __version__ attr')))
print('Command line: ', repr(sys.argv))
print(
''.join(traceback.format_exception(exc_type, exc_value, exc_tb)))
print('---')
if __name__ == '__main__':
try:
exit(main())
except Exception:
exception_handler(*sys.exc_info())
exit(UNHANDLED_EXCEPTION)
|
erikrose/peep | peep.py | peep_hash | python | def peep_hash(argv):
parser = OptionParser(
usage='usage: %prog hash file [file ...]',
description='Print a peep hash line for one or more files: for '
'example, "# sha256: '
'oz42dZy6Gowxw8AelDtO4gRgTW_xPdooH484k7I5EOY".')
_, paths = parser.parse_args(args=argv)
if paths:
for path in paths:
print('# sha256:', hash_of_file(path))
return ITS_FINE_ITS_FINE
else:
parser.print_usage()
return COMMAND_LINE_ERROR | Return the peep hash of one or more files, returning a shell status code
or raising a PipException.
:arg argv: The commandline args, starting after the subcommand | train | https://github.com/erikrose/peep/blob/c16f08c7f61e2f2afecb7cd1c93752bdd96c4968/peep.py#L287-L306 | [
"def hash_of_file(path):\n \"\"\"Return the hash of a downloaded file.\"\"\"\n with open(path, 'rb') as archive:\n sha = sha256()\n while True:\n data = archive.read(2 ** 20)\n if not data:\n break\n sha.update(data)\n return encoded_hash(sha)\n"
] | #!/usr/bin/env python
"""peep ("prudently examine every package") verifies that packages conform to a
trusted, locally stored hash and only then installs them::
peep install -r requirements.txt
This makes your deployments verifiably repeatable without having to maintain a
local PyPI mirror or use a vendor lib. Just update the version numbers and
hashes in requirements.txt, and you're all set.
"""
# This is here so embedded copies of peep.py are MIT-compliant:
# Copyright (c) 2013 Erik Rose
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
from __future__ import print_function
try:
xrange = xrange
except NameError:
xrange = range
from base64 import urlsafe_b64encode, urlsafe_b64decode
from binascii import hexlify
import cgi
from collections import defaultdict
from functools import wraps
from hashlib import sha256
from itertools import chain, islice
import mimetypes
from optparse import OptionParser
from os.path import join, basename, splitext, isdir
from pickle import dumps, loads
import re
import sys
from shutil import rmtree, copy
from sys import argv, exit
from tempfile import mkdtemp
import traceback
try:
from urllib2 import build_opener, HTTPHandler, HTTPSHandler, HTTPError
except ImportError:
from urllib.request import build_opener, HTTPHandler, HTTPSHandler
from urllib.error import HTTPError
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse # 3.4
# TODO: Probably use six to make urllib stuff work across 2/3.
from pkg_resources import require, VersionConflict, DistributionNotFound, safe_name
# We don't admit our dependency on pip in setup.py, lest a naive user simply
# say `pip install peep.tar.gz` and thus pull down an untrusted copy of pip
# from PyPI. Instead, we make sure it's installed and new enough here and spit
# out an error message if not:
def activate(specifier):
"""Make a compatible version of pip importable. Raise a RuntimeError if we
couldn't."""
try:
for distro in require(specifier):
distro.activate()
except (VersionConflict, DistributionNotFound):
raise RuntimeError('The installed version of pip is too old; peep '
'requires ' + specifier)
# Before 0.6.2, the log module wasn't there, so some
# of our monkeypatching fails. It probably wouldn't be
# much work to support even earlier, though.
activate('pip>=0.6.2')
import pip
from pip.commands.install import InstallCommand
try:
from pip.download import url_to_path # 1.5.6
except ImportError:
try:
from pip.util import url_to_path # 0.7.0
except ImportError:
from pip.util import url_to_filename as url_to_path # 0.6.2
from pip.exceptions import InstallationError
from pip.index import PackageFinder, Link
try:
from pip.log import logger
except ImportError:
from pip import logger # 6.0
from pip.req import parse_requirements
try:
from pip.utils.ui import DownloadProgressBar, DownloadProgressSpinner
except ImportError:
class NullProgressBar(object):
def __init__(self, *args, **kwargs):
pass
def iter(self, ret, *args, **kwargs):
return ret
DownloadProgressBar = DownloadProgressSpinner = NullProgressBar
__version__ = 3, 1, 2
try:
from pip.index import FormatControl # noqa
FORMAT_CONTROL_ARG = 'format_control'
# The line-numbering bug will be fixed in pip 8. All 7.x releases had it.
PIP_MAJOR_VERSION = int(pip.__version__.split('.')[0])
PIP_COUNTS_COMMENTS = PIP_MAJOR_VERSION >= 8
except ImportError:
FORMAT_CONTROL_ARG = 'use_wheel' # pre-7
PIP_COUNTS_COMMENTS = True
ITS_FINE_ITS_FINE = 0
SOMETHING_WENT_WRONG = 1
# "Traditional" for command-line errors according to optparse docs:
COMMAND_LINE_ERROR = 2
UNHANDLED_EXCEPTION = 3
ARCHIVE_EXTENSIONS = ('.tar.bz2', '.tar.gz', '.tgz', '.tar', '.zip')
MARKER = object()
class PipException(Exception):
"""When I delegated to pip, it exited with an error."""
def __init__(self, error_code):
self.error_code = error_code
class UnsupportedRequirementError(Exception):
"""An unsupported line was encountered in a requirements file."""
class DownloadError(Exception):
def __init__(self, link, exc):
self.link = link
self.reason = str(exc)
def __str__(self):
return 'Downloading %s failed: %s' % (self.link, self.reason)
def encoded_hash(sha):
"""Return a short, 7-bit-safe representation of a hash.
If you pass a sha256, this results in the hash algorithm that the Wheel
format (PEP 427) uses, except here it's intended to be run across the
downloaded archive before unpacking.
"""
return urlsafe_b64encode(sha.digest()).decode('ascii').rstrip('=')
def path_and_line(req):
"""Return the path and line number of the file from which an
InstallRequirement came.
"""
path, line = (re.match(r'-r (.*) \(line (\d+)\)$',
req.comes_from).groups())
return path, int(line)
def hashes_above(path, line_number):
"""Yield hashes from contiguous comment lines before line ``line_number``.
"""
def hash_lists(path):
"""Yield lists of hashes appearing between non-comment lines.
The lists will be in order of appearance and, for each non-empty
list, their place in the results will coincide with that of the
line number of the corresponding result from `parse_requirements`
(which changed in pip 7.0 to not count comments).
"""
hashes = []
with open(path) as file:
for lineno, line in enumerate(file, 1):
match = HASH_COMMENT_RE.match(line)
if match: # Accumulate this hash.
hashes.append(match.groupdict()['hash'])
if not IGNORED_LINE_RE.match(line):
yield hashes # Report hashes seen so far.
hashes = []
elif PIP_COUNTS_COMMENTS:
# Comment: count as normal req but have no hashes.
yield []
return next(islice(hash_lists(path), line_number - 1, None))
def run_pip(initial_args):
"""Delegate to pip the given args (starting with the subcommand), and raise
``PipException`` if something goes wrong."""
status_code = pip.main(initial_args)
# Clear out the registrations in the pip "logger" singleton. Otherwise,
# loggers keep getting appended to it with every run. Pip assumes only one
# command invocation will happen per interpreter lifetime.
logger.consumers = []
if status_code:
raise PipException(status_code)
def hash_of_file(path):
"""Return the hash of a downloaded file."""
with open(path, 'rb') as archive:
sha = sha256()
while True:
data = archive.read(2 ** 20)
if not data:
break
sha.update(data)
return encoded_hash(sha)
def is_git_sha(text):
"""Return whether this is probably a git sha"""
# Handle both the full sha as well as the 7-character abbreviation
if len(text) in (40, 7):
try:
int(text, 16)
return True
except ValueError:
pass
return False
def filename_from_url(url):
parsed = urlparse(url)
path = parsed.path
return path.split('/')[-1]
def requirement_args(argv, want_paths=False, want_other=False):
"""Return an iterable of filtered arguments.
:arg argv: Arguments, starting after the subcommand
:arg want_paths: If True, the returned iterable includes the paths to any
requirements files following a ``-r`` or ``--requirement`` option.
:arg want_other: If True, the returned iterable includes the args that are
not a requirement-file path or a ``-r`` or ``--requirement`` flag.
"""
was_r = False
for arg in argv:
# Allow for requirements files named "-r", don't freak out if there's a
# trailing "-r", etc.
if was_r:
if want_paths:
yield arg
was_r = False
elif arg in ['-r', '--requirement']:
was_r = True
else:
if want_other:
yield arg
# any line that is a comment or just whitespace
IGNORED_LINE_RE = re.compile(r'^(\s*#.*)?\s*$')
HASH_COMMENT_RE = re.compile(
r"""
\s*\#\s+ # Lines that start with a '#'
(?P<hash_type>sha256):\s+ # Hash type is hardcoded to be sha256 for now.
(?P<hash>[^\s]+) # Hashes can be anything except '#' or spaces.
\s* # Suck up whitespace before the comment or
# just trailing whitespace if there is no
# comment. Also strip trailing newlines.
(?:\#(?P<comment>.*))? # Comments can be anything after a whitespace+#
# and are optional.
$""", re.X)
class EmptyOptions(object):
"""Fake optparse options for compatibility with pip<1.2
pip<1.2 had a bug in parse_requirements() in which the ``options`` kwarg
was required. We work around that by passing it a mock object.
"""
default_vcs = None
skip_requirements_regex = None
isolated_mode = False
def memoize(func):
"""Memoize a method that should return the same result every time on a
given instance.
"""
@wraps(func)
def memoizer(self):
if not hasattr(self, '_cache'):
self._cache = {}
if func.__name__ not in self._cache:
self._cache[func.__name__] = func(self)
return self._cache[func.__name__]
return memoizer
def package_finder(argv):
"""Return a PackageFinder respecting command-line options.
:arg argv: Everything after the subcommand
"""
# We instantiate an InstallCommand and then use some of its private
# machinery--its arg parser--for our own purposes, like a virus. This
# approach is portable across many pip versions, where more fine-grained
# ones are not. Ignoring options that don't exist on the parser (for
# instance, --use-wheel) gives us a straightforward method of backward
# compatibility.
try:
command = InstallCommand()
except TypeError:
# This is likely pip 1.3.0's "__init__() takes exactly 2 arguments (1
# given)" error. In that version, InstallCommand takes a top=level
# parser passed in from outside.
from pip.baseparser import create_main_parser
command = InstallCommand(create_main_parser())
# The downside is that it essentially ruins the InstallCommand class for
# further use. Calling out to pip.main() within the same interpreter, for
# example, would result in arguments parsed this time turning up there.
# Thus, we deepcopy the arg parser so we don't trash its singletons. Of
# course, deepcopy doesn't work on these objects, because they contain
# uncopyable regex patterns, so we pickle and unpickle instead. Fun!
options, _ = loads(dumps(command.parser)).parse_args(argv)
# Carry over PackageFinder kwargs that have [about] the same names as
# options attr names:
possible_options = [
'find_links',
FORMAT_CONTROL_ARG,
('allow_all_prereleases', 'pre'),
'process_dependency_links'
]
kwargs = {}
for option in possible_options:
kw, attr = option if isinstance(option, tuple) else (option, option)
value = getattr(options, attr, MARKER)
if value is not MARKER:
kwargs[kw] = value
# Figure out index_urls:
index_urls = [options.index_url] + options.extra_index_urls
if options.no_index:
index_urls = []
index_urls += getattr(options, 'mirrors', [])
# If pip is new enough to have a PipSession, initialize one, since
# PackageFinder requires it:
if hasattr(command, '_build_session'):
kwargs['session'] = command._build_session(options)
return PackageFinder(index_urls=index_urls, **kwargs)
class DownloadedReq(object):
"""A wrapper around InstallRequirement which offers additional information
based on downloading and examining a corresponding package archive
These are conceptually immutable, so we can get away with memoizing
expensive things.
"""
def __init__(self, req, argv, finder):
"""Download a requirement, compare its hashes, and return a subclass
of DownloadedReq depending on its state.
:arg req: The InstallRequirement I am based on
:arg argv: The args, starting after the subcommand
"""
self._req = req
self._argv = argv
self._finder = finder
# We use a separate temp dir for each requirement so requirements
# (from different indices) that happen to have the same archive names
# don't overwrite each other, leading to a security hole in which the
# latter is a hash mismatch, the former has already passed the
# comparison, and the latter gets installed.
self._temp_path = mkdtemp(prefix='peep-')
# Think of DownloadedReq as a one-shot state machine. It's an abstract
# class that ratchets forward to being one of its own subclasses,
# depending on its package status. Then it doesn't move again.
self.__class__ = self._class()
def dispose(self):
"""Delete temp files and dirs I've made. Render myself useless.
Do not call further methods on me after calling dispose().
"""
rmtree(self._temp_path)
def _version(self):
"""Deduce the version number of the downloaded package from its filename."""
# TODO: Can we delete this method and just print the line from the
# reqs file verbatim instead?
def version_of_archive(filename, package_name):
# Since we know the project_name, we can strip that off the left, strip
# any archive extensions off the right, and take the rest as the
# version.
for ext in ARCHIVE_EXTENSIONS:
if filename.endswith(ext):
filename = filename[:-len(ext)]
break
# Handle github sha tarball downloads.
if is_git_sha(filename):
filename = package_name + '-' + filename
if not filename.lower().replace('_', '-').startswith(package_name.lower()):
# TODO: Should we replace runs of [^a-zA-Z0-9.], not just _, with -?
give_up(filename, package_name)
return filename[len(package_name) + 1:] # Strip off '-' before version.
def version_of_wheel(filename, package_name):
# For Wheel files (http://legacy.python.org/dev/peps/pep-0427/#file-
# name-convention) we know the format bits are '-' separated.
whl_package_name, version, _rest = filename.split('-', 2)
# Do the alteration to package_name from PEP 427:
our_package_name = re.sub(r'[^\w\d.]+', '_', package_name, re.UNICODE)
if whl_package_name != our_package_name:
give_up(filename, whl_package_name)
return version
def give_up(filename, package_name):
raise RuntimeError("The archive '%s' didn't start with the package name "
"'%s', so I couldn't figure out the version number. "
"My bad; improve me." %
(filename, package_name))
get_version = (version_of_wheel
if self._downloaded_filename().endswith('.whl')
else version_of_archive)
return get_version(self._downloaded_filename(), self._project_name())
def _is_always_unsatisfied(self):
"""Returns whether this requirement is always unsatisfied
This would happen in cases where we can't determine the version
from the filename.
"""
# If this is a github sha tarball, then it is always unsatisfied
# because the url has a commit sha in it and not the version
# number.
url = self._url()
if url:
filename = filename_from_url(url)
if filename.endswith(ARCHIVE_EXTENSIONS):
filename, ext = splitext(filename)
if is_git_sha(filename):
return True
return False
@memoize # Avoid hitting the file[cache] over and over.
def _expected_hashes(self):
"""Return a list of known-good hashes for this package."""
return hashes_above(*path_and_line(self._req))
def _download(self, link):
"""Download a file, and return its name within my temp dir.
This does no verification of HTTPS certs, but our checking hashes
makes that largely unimportant. It would be nice to be able to use the
requests lib, which can verify certs, but it is guaranteed to be
available only in pip >= 1.5.
This also drops support for proxies and basic auth, though those could
be added back in.
"""
# Based on pip 1.4.1's URLOpener but with cert verification removed
def opener(is_https):
if is_https:
opener = build_opener(HTTPSHandler())
# Strip out HTTPHandler to prevent MITM spoof:
for handler in opener.handlers:
if isinstance(handler, HTTPHandler):
opener.handlers.remove(handler)
else:
opener = build_opener()
return opener
# Descended from unpack_http_url() in pip 1.4.1
def best_filename(link, response):
"""Return the most informative possible filename for a download,
ideally with a proper extension.
"""
content_type = response.info().get('content-type', '')
filename = link.filename # fallback
# Have a look at the Content-Disposition header for a better guess:
content_disposition = response.info().get('content-disposition')
if content_disposition:
type, params = cgi.parse_header(content_disposition)
# We use ``or`` here because we don't want to use an "empty" value
# from the filename param:
filename = params.get('filename') or filename
ext = splitext(filename)[1]
if not ext:
ext = mimetypes.guess_extension(content_type)
if ext:
filename += ext
if not ext and link.url != response.geturl():
ext = splitext(response.geturl())[1]
if ext:
filename += ext
return filename
# Descended from _download_url() in pip 1.4.1
def pipe_to_file(response, path, size=0):
"""Pull the data off an HTTP response, shove it in a new file, and
show progress.
:arg response: A file-like object to read from
:arg path: The path of the new file
:arg size: The expected size, in bytes, of the download. 0 for
unknown or to suppress progress indication (as for cached
downloads)
"""
def response_chunks(chunk_size):
while True:
chunk = response.read(chunk_size)
if not chunk:
break
yield chunk
print('Downloading %s%s...' % (
self._req.req,
(' (%sK)' % (size / 1000)) if size > 1000 else ''))
progress_indicator = (DownloadProgressBar(max=size).iter if size
else DownloadProgressSpinner().iter)
with open(path, 'wb') as file:
for chunk in progress_indicator(response_chunks(4096), 4096):
file.write(chunk)
url = link.url.split('#', 1)[0]
try:
response = opener(urlparse(url).scheme != 'http').open(url)
except (HTTPError, IOError) as exc:
raise DownloadError(link, exc)
filename = best_filename(link, response)
try:
size = int(response.headers['content-length'])
except (ValueError, KeyError, TypeError):
size = 0
pipe_to_file(response, join(self._temp_path, filename), size=size)
return filename
# Based on req_set.prepare_files() in pip bb2a8428d4aebc8d313d05d590f386fa3f0bbd0f
@memoize # Avoid re-downloading.
def _downloaded_filename(self):
"""Download the package's archive if necessary, and return its
filename.
--no-deps is implied, as we have reimplemented the bits that would
ordinarily do dependency resolution.
"""
# Peep doesn't support requirements that don't come down as a single
# file, because it can't hash them. Thus, it doesn't support editable
# requirements, because pip itself doesn't support editable
# requirements except for "local projects or a VCS url". Nor does it
# support VCS requirements yet, because we haven't yet come up with a
# portable, deterministic way to hash them. In summary, all we support
# is == requirements and tarballs/zips/etc.
# TODO: Stop on reqs that are editable or aren't ==.
# If the requirement isn't already specified as a URL, get a URL
# from an index:
link = self._link() or self._finder.find_requirement(self._req, upgrade=False)
if link:
lower_scheme = link.scheme.lower() # pip lower()s it for some reason.
if lower_scheme == 'http' or lower_scheme == 'https':
file_path = self._download(link)
return basename(file_path)
elif lower_scheme == 'file':
# The following is inspired by pip's unpack_file_url():
link_path = url_to_path(link.url_without_fragment)
if isdir(link_path):
raise UnsupportedRequirementError(
"%s: %s is a directory. So that it can compute "
"a hash, peep supports only filesystem paths which "
"point to files" %
(self._req, link.url_without_fragment))
else:
copy(link_path, self._temp_path)
return basename(link_path)
else:
raise UnsupportedRequirementError(
"%s: The download link, %s, would not result in a file "
"that can be hashed. Peep supports only == requirements, "
"file:// URLs pointing to files (not folders), and "
"http:// and https:// URLs pointing to tarballs, zips, "
"etc." % (self._req, link.url))
else:
raise UnsupportedRequirementError(
"%s: couldn't determine where to download this requirement from."
% (self._req,))
def install(self):
"""Install the package I represent, without dependencies.
Obey typical pip-install options passed in on the command line.
"""
other_args = list(requirement_args(self._argv, want_other=True))
archive_path = join(self._temp_path, self._downloaded_filename())
# -U so it installs whether pip deems the requirement "satisfied" or
# not. This is necessary for GitHub-sourced zips, which change without
# their version numbers changing.
run_pip(['install'] + other_args + ['--no-deps', '-U', archive_path])
@memoize
def _actual_hash(self):
"""Download the package's archive if necessary, and return its hash."""
return hash_of_file(join(self._temp_path, self._downloaded_filename()))
def _project_name(self):
"""Return the inner Requirement's "unsafe name".
Raise ValueError if there is no name.
"""
name = getattr(self._req.req, 'project_name', '')
if name:
return name
name = getattr(self._req.req, 'name', '')
if name:
return safe_name(name)
raise ValueError('Requirement has no project_name.')
def _name(self):
return self._req.name
def _link(self):
try:
return self._req.link
except AttributeError:
# The link attribute isn't available prior to pip 6.1.0, so fall
# back to the now deprecated 'url' attribute.
return Link(self._req.url) if self._req.url else None
def _url(self):
link = self._link()
return link.url if link else None
@memoize # Avoid re-running expensive check_if_exists().
def _is_satisfied(self):
self._req.check_if_exists()
return (self._req.satisfied_by and
not self._is_always_unsatisfied())
def _class(self):
"""Return the class I should be, spanning a continuum of goodness."""
try:
self._project_name()
except ValueError:
return MalformedReq
if self._is_satisfied():
return SatisfiedReq
if not self._expected_hashes():
return MissingReq
if self._actual_hash() not in self._expected_hashes():
return MismatchedReq
return InstallableReq
@classmethod
def foot(cls):
"""Return the text to be printed once, after all of the errors from
classes of my type are printed.
"""
return ''
class MalformedReq(DownloadedReq):
"""A requirement whose package name could not be determined"""
@classmethod
def head(cls):
return 'The following requirements could not be processed:\n'
def error(self):
return '* Unable to determine package name from URL %s; add #egg=' % self._url()
class MissingReq(DownloadedReq):
"""A requirement for which no hashes were specified in the requirements file"""
@classmethod
def head(cls):
return ('The following packages had no hashes specified in the requirements file, which\n'
'leaves them open to tampering. Vet these packages to your satisfaction, then\n'
'add these "sha256" lines like so:\n\n')
def error(self):
if self._url():
# _url() always contains an #egg= part, or this would be a
# MalformedRequest.
line = self._url()
else:
line = '%s==%s' % (self._name(), self._version())
return '# sha256: %s\n%s\n' % (self._actual_hash(), line)
class MismatchedReq(DownloadedReq):
"""A requirement for which the downloaded file didn't match any of my hashes."""
@classmethod
def head(cls):
return ("THE FOLLOWING PACKAGES DIDN'T MATCH THE HASHES SPECIFIED IN THE REQUIREMENTS\n"
"FILE. If you have updated the package versions, update the hashes. If not,\n"
"freak out, because someone has tampered with the packages.\n\n")
def error(self):
preamble = ' %s: expected' % self._project_name()
if len(self._expected_hashes()) > 1:
preamble += ' one of'
padding = '\n' + ' ' * (len(preamble) + 1)
return '%s %s\n%s got %s' % (preamble,
padding.join(self._expected_hashes()),
' ' * (len(preamble) - 4),
self._actual_hash())
@classmethod
def foot(cls):
return '\n'
class SatisfiedReq(DownloadedReq):
"""A requirement which turned out to be already installed"""
@classmethod
def head(cls):
return ("These packages were already installed, so we didn't need to download or build\n"
"them again. If you installed them with peep in the first place, you should be\n"
"safe. If not, uninstall them, then re-attempt your install with peep.\n")
def error(self):
return ' %s' % (self._req,)
class InstallableReq(DownloadedReq):
"""A requirement whose hash matched and can be safely installed"""
# DownloadedReq subclasses that indicate an error that should keep us from
# going forward with installation, in the order in which their errors should
# be reported:
ERROR_CLASSES = [MismatchedReq, MissingReq, MalformedReq]
def bucket(things, key):
"""Return a map of key -> list of things."""
ret = defaultdict(list)
for thing in things:
ret[key(thing)].append(thing)
return ret
def first_every_last(iterable, first, every, last):
"""Execute something before the first item of iter, something else for each
item, and a third thing after the last.
If there are no items in the iterable, don't execute anything.
"""
did_first = False
for item in iterable:
if not did_first:
did_first = True
first(item)
every(item)
if did_first:
last(item)
def _parse_requirements(path, finder):
try:
# list() so the generator that is parse_requirements() actually runs
# far enough to report a TypeError
return list(parse_requirements(
path, options=EmptyOptions(), finder=finder))
except TypeError:
# session is a required kwarg as of pip 6.0 and will raise
# a TypeError if missing. It needs to be a PipSession instance,
# but in older versions we can't import it from pip.download
# (nor do we need it at all) so we only import it in this except block
from pip.download import PipSession
return list(parse_requirements(
path, options=EmptyOptions(), session=PipSession(), finder=finder))
def downloaded_reqs_from_path(path, argv):
"""Return a list of DownloadedReqs representing the requirements parsed
out of a given requirements file.
:arg path: The path to the requirements file
:arg argv: The commandline args, starting after the subcommand
"""
finder = package_finder(argv)
return [DownloadedReq(req, argv, finder) for req in
_parse_requirements(path, finder)]
def peep_install(argv):
"""Perform the ``peep install`` subcommand, returning a shell status code
or raising a PipException.
:arg argv: The commandline args, starting after the subcommand
"""
output = []
out = output.append
reqs = []
try:
req_paths = list(requirement_args(argv, want_paths=True))
if not req_paths:
out("You have to specify one or more requirements files with the -r option, because\n"
"otherwise there's nowhere for peep to look up the hashes.\n")
return COMMAND_LINE_ERROR
# We're a "peep install" command, and we have some requirement paths.
reqs = list(chain.from_iterable(
downloaded_reqs_from_path(path, argv)
for path in req_paths))
buckets = bucket(reqs, lambda r: r.__class__)
# Skip a line after pip's "Cleaning up..." so the important stuff
# stands out:
if any(buckets[b] for b in ERROR_CLASSES):
out('\n')
printers = (lambda r: out(r.head()),
lambda r: out(r.error() + '\n'),
lambda r: out(r.foot()))
for c in ERROR_CLASSES:
first_every_last(buckets[c], *printers)
if any(buckets[b] for b in ERROR_CLASSES):
out('-------------------------------\n'
'Not proceeding to installation.\n')
return SOMETHING_WENT_WRONG
else:
for req in buckets[InstallableReq]:
req.install()
first_every_last(buckets[SatisfiedReq], *printers)
return ITS_FINE_ITS_FINE
except (UnsupportedRequirementError, InstallationError, DownloadError) as exc:
out(str(exc))
return SOMETHING_WENT_WRONG
finally:
for req in reqs:
req.dispose()
print(''.join(output))
def peep_port(paths):
"""Convert a peep requirements file to one compatble with pip-8 hashing.
Loses comments and tromps on URLs, so the result will need a little manual
massaging, but the hard part--the hash conversion--is done for you.
"""
if not paths:
print('Please specify one or more requirements files so I have '
'something to port.\n')
return COMMAND_LINE_ERROR
comes_from = None
for req in chain.from_iterable(
_parse_requirements(path, package_finder(argv)) for path in paths):
req_path, req_line = path_and_line(req)
hashes = [hexlify(urlsafe_b64decode((hash + '=').encode('ascii'))).decode('ascii')
for hash in hashes_above(req_path, req_line)]
if req_path != comes_from:
print()
print('# from %s' % req_path)
print()
comes_from = req_path
if not hashes:
print(req.req)
else:
print('%s' % (req.link if getattr(req, 'link', None) else req.req), end='')
for hash in hashes:
print(' \\')
print(' --hash=sha256:%s' % hash, end='')
print()
def main():
"""Be the top-level entrypoint. Return a shell status code."""
commands = {'hash': peep_hash,
'install': peep_install,
'port': peep_port}
try:
if len(argv) >= 2 and argv[1] in commands:
return commands[argv[1]](argv[2:])
else:
# Fall through to top-level pip main() for everything else:
return pip.main()
except PipException as exc:
return exc.error_code
def exception_handler(exc_type, exc_value, exc_tb):
print('Oh no! Peep had a problem while trying to do stuff. Please write up a bug report')
print('with the specifics so we can fix it:')
print()
print('https://github.com/erikrose/peep/issues/new')
print()
print('Here are some particulars you can copy and paste into the bug report:')
print()
print('---')
print('peep:', repr(__version__))
print('python:', repr(sys.version))
print('pip:', repr(getattr(pip, '__version__', 'no __version__ attr')))
print('Command line: ', repr(sys.argv))
print(
''.join(traceback.format_exception(exc_type, exc_value, exc_tb)))
print('---')
if __name__ == '__main__':
try:
exit(main())
except Exception:
exception_handler(*sys.exc_info())
exit(UNHANDLED_EXCEPTION)
|
erikrose/peep | peep.py | memoize | python | def memoize(func):
@wraps(func)
def memoizer(self):
if not hasattr(self, '_cache'):
self._cache = {}
if func.__name__ not in self._cache:
self._cache[func.__name__] = func(self)
return self._cache[func.__name__]
return memoizer | Memoize a method that should return the same result every time on a
given instance. | train | https://github.com/erikrose/peep/blob/c16f08c7f61e2f2afecb7cd1c93752bdd96c4968/peep.py#L321-L333 | null | #!/usr/bin/env python
"""peep ("prudently examine every package") verifies that packages conform to a
trusted, locally stored hash and only then installs them::
peep install -r requirements.txt
This makes your deployments verifiably repeatable without having to maintain a
local PyPI mirror or use a vendor lib. Just update the version numbers and
hashes in requirements.txt, and you're all set.
"""
# This is here so embedded copies of peep.py are MIT-compliant:
# Copyright (c) 2013 Erik Rose
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
from __future__ import print_function
try:
xrange = xrange
except NameError:
xrange = range
from base64 import urlsafe_b64encode, urlsafe_b64decode
from binascii import hexlify
import cgi
from collections import defaultdict
from functools import wraps
from hashlib import sha256
from itertools import chain, islice
import mimetypes
from optparse import OptionParser
from os.path import join, basename, splitext, isdir
from pickle import dumps, loads
import re
import sys
from shutil import rmtree, copy
from sys import argv, exit
from tempfile import mkdtemp
import traceback
try:
from urllib2 import build_opener, HTTPHandler, HTTPSHandler, HTTPError
except ImportError:
from urllib.request import build_opener, HTTPHandler, HTTPSHandler
from urllib.error import HTTPError
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse # 3.4
# TODO: Probably use six to make urllib stuff work across 2/3.
from pkg_resources import require, VersionConflict, DistributionNotFound, safe_name
# We don't admit our dependency on pip in setup.py, lest a naive user simply
# say `pip install peep.tar.gz` and thus pull down an untrusted copy of pip
# from PyPI. Instead, we make sure it's installed and new enough here and spit
# out an error message if not:
def activate(specifier):
"""Make a compatible version of pip importable. Raise a RuntimeError if we
couldn't."""
try:
for distro in require(specifier):
distro.activate()
except (VersionConflict, DistributionNotFound):
raise RuntimeError('The installed version of pip is too old; peep '
'requires ' + specifier)
# Before 0.6.2, the log module wasn't there, so some
# of our monkeypatching fails. It probably wouldn't be
# much work to support even earlier, though.
activate('pip>=0.6.2')
import pip
from pip.commands.install import InstallCommand
try:
from pip.download import url_to_path # 1.5.6
except ImportError:
try:
from pip.util import url_to_path # 0.7.0
except ImportError:
from pip.util import url_to_filename as url_to_path # 0.6.2
from pip.exceptions import InstallationError
from pip.index import PackageFinder, Link
try:
from pip.log import logger
except ImportError:
from pip import logger # 6.0
from pip.req import parse_requirements
try:
from pip.utils.ui import DownloadProgressBar, DownloadProgressSpinner
except ImportError:
class NullProgressBar(object):
def __init__(self, *args, **kwargs):
pass
def iter(self, ret, *args, **kwargs):
return ret
DownloadProgressBar = DownloadProgressSpinner = NullProgressBar
__version__ = 3, 1, 2
try:
from pip.index import FormatControl # noqa
FORMAT_CONTROL_ARG = 'format_control'
# The line-numbering bug will be fixed in pip 8. All 7.x releases had it.
PIP_MAJOR_VERSION = int(pip.__version__.split('.')[0])
PIP_COUNTS_COMMENTS = PIP_MAJOR_VERSION >= 8
except ImportError:
FORMAT_CONTROL_ARG = 'use_wheel' # pre-7
PIP_COUNTS_COMMENTS = True
ITS_FINE_ITS_FINE = 0
SOMETHING_WENT_WRONG = 1
# "Traditional" for command-line errors according to optparse docs:
COMMAND_LINE_ERROR = 2
UNHANDLED_EXCEPTION = 3
ARCHIVE_EXTENSIONS = ('.tar.bz2', '.tar.gz', '.tgz', '.tar', '.zip')
MARKER = object()
class PipException(Exception):
"""When I delegated to pip, it exited with an error."""
def __init__(self, error_code):
self.error_code = error_code
class UnsupportedRequirementError(Exception):
"""An unsupported line was encountered in a requirements file."""
class DownloadError(Exception):
def __init__(self, link, exc):
self.link = link
self.reason = str(exc)
def __str__(self):
return 'Downloading %s failed: %s' % (self.link, self.reason)
def encoded_hash(sha):
"""Return a short, 7-bit-safe representation of a hash.
If you pass a sha256, this results in the hash algorithm that the Wheel
format (PEP 427) uses, except here it's intended to be run across the
downloaded archive before unpacking.
"""
return urlsafe_b64encode(sha.digest()).decode('ascii').rstrip('=')
def path_and_line(req):
"""Return the path and line number of the file from which an
InstallRequirement came.
"""
path, line = (re.match(r'-r (.*) \(line (\d+)\)$',
req.comes_from).groups())
return path, int(line)
def hashes_above(path, line_number):
"""Yield hashes from contiguous comment lines before line ``line_number``.
"""
def hash_lists(path):
"""Yield lists of hashes appearing between non-comment lines.
The lists will be in order of appearance and, for each non-empty
list, their place in the results will coincide with that of the
line number of the corresponding result from `parse_requirements`
(which changed in pip 7.0 to not count comments).
"""
hashes = []
with open(path) as file:
for lineno, line in enumerate(file, 1):
match = HASH_COMMENT_RE.match(line)
if match: # Accumulate this hash.
hashes.append(match.groupdict()['hash'])
if not IGNORED_LINE_RE.match(line):
yield hashes # Report hashes seen so far.
hashes = []
elif PIP_COUNTS_COMMENTS:
# Comment: count as normal req but have no hashes.
yield []
return next(islice(hash_lists(path), line_number - 1, None))
def run_pip(initial_args):
"""Delegate to pip the given args (starting with the subcommand), and raise
``PipException`` if something goes wrong."""
status_code = pip.main(initial_args)
# Clear out the registrations in the pip "logger" singleton. Otherwise,
# loggers keep getting appended to it with every run. Pip assumes only one
# command invocation will happen per interpreter lifetime.
logger.consumers = []
if status_code:
raise PipException(status_code)
def hash_of_file(path):
"""Return the hash of a downloaded file."""
with open(path, 'rb') as archive:
sha = sha256()
while True:
data = archive.read(2 ** 20)
if not data:
break
sha.update(data)
return encoded_hash(sha)
def is_git_sha(text):
"""Return whether this is probably a git sha"""
# Handle both the full sha as well as the 7-character abbreviation
if len(text) in (40, 7):
try:
int(text, 16)
return True
except ValueError:
pass
return False
def filename_from_url(url):
parsed = urlparse(url)
path = parsed.path
return path.split('/')[-1]
def requirement_args(argv, want_paths=False, want_other=False):
"""Return an iterable of filtered arguments.
:arg argv: Arguments, starting after the subcommand
:arg want_paths: If True, the returned iterable includes the paths to any
requirements files following a ``-r`` or ``--requirement`` option.
:arg want_other: If True, the returned iterable includes the args that are
not a requirement-file path or a ``-r`` or ``--requirement`` flag.
"""
was_r = False
for arg in argv:
# Allow for requirements files named "-r", don't freak out if there's a
# trailing "-r", etc.
if was_r:
if want_paths:
yield arg
was_r = False
elif arg in ['-r', '--requirement']:
was_r = True
else:
if want_other:
yield arg
# any line that is a comment or just whitespace
IGNORED_LINE_RE = re.compile(r'^(\s*#.*)?\s*$')
HASH_COMMENT_RE = re.compile(
r"""
\s*\#\s+ # Lines that start with a '#'
(?P<hash_type>sha256):\s+ # Hash type is hardcoded to be sha256 for now.
(?P<hash>[^\s]+) # Hashes can be anything except '#' or spaces.
\s* # Suck up whitespace before the comment or
# just trailing whitespace if there is no
# comment. Also strip trailing newlines.
(?:\#(?P<comment>.*))? # Comments can be anything after a whitespace+#
# and are optional.
$""", re.X)
def peep_hash(argv):
"""Return the peep hash of one or more files, returning a shell status code
or raising a PipException.
:arg argv: The commandline args, starting after the subcommand
"""
parser = OptionParser(
usage='usage: %prog hash file [file ...]',
description='Print a peep hash line for one or more files: for '
'example, "# sha256: '
'oz42dZy6Gowxw8AelDtO4gRgTW_xPdooH484k7I5EOY".')
_, paths = parser.parse_args(args=argv)
if paths:
for path in paths:
print('# sha256:', hash_of_file(path))
return ITS_FINE_ITS_FINE
else:
parser.print_usage()
return COMMAND_LINE_ERROR
class EmptyOptions(object):
"""Fake optparse options for compatibility with pip<1.2
pip<1.2 had a bug in parse_requirements() in which the ``options`` kwarg
was required. We work around that by passing it a mock object.
"""
default_vcs = None
skip_requirements_regex = None
isolated_mode = False
def package_finder(argv):
"""Return a PackageFinder respecting command-line options.
:arg argv: Everything after the subcommand
"""
# We instantiate an InstallCommand and then use some of its private
# machinery--its arg parser--for our own purposes, like a virus. This
# approach is portable across many pip versions, where more fine-grained
# ones are not. Ignoring options that don't exist on the parser (for
# instance, --use-wheel) gives us a straightforward method of backward
# compatibility.
try:
command = InstallCommand()
except TypeError:
# This is likely pip 1.3.0's "__init__() takes exactly 2 arguments (1
# given)" error. In that version, InstallCommand takes a top=level
# parser passed in from outside.
from pip.baseparser import create_main_parser
command = InstallCommand(create_main_parser())
# The downside is that it essentially ruins the InstallCommand class for
# further use. Calling out to pip.main() within the same interpreter, for
# example, would result in arguments parsed this time turning up there.
# Thus, we deepcopy the arg parser so we don't trash its singletons. Of
# course, deepcopy doesn't work on these objects, because they contain
# uncopyable regex patterns, so we pickle and unpickle instead. Fun!
options, _ = loads(dumps(command.parser)).parse_args(argv)
# Carry over PackageFinder kwargs that have [about] the same names as
# options attr names:
possible_options = [
'find_links',
FORMAT_CONTROL_ARG,
('allow_all_prereleases', 'pre'),
'process_dependency_links'
]
kwargs = {}
for option in possible_options:
kw, attr = option if isinstance(option, tuple) else (option, option)
value = getattr(options, attr, MARKER)
if value is not MARKER:
kwargs[kw] = value
# Figure out index_urls:
index_urls = [options.index_url] + options.extra_index_urls
if options.no_index:
index_urls = []
index_urls += getattr(options, 'mirrors', [])
# If pip is new enough to have a PipSession, initialize one, since
# PackageFinder requires it:
if hasattr(command, '_build_session'):
kwargs['session'] = command._build_session(options)
return PackageFinder(index_urls=index_urls, **kwargs)
class DownloadedReq(object):
"""A wrapper around InstallRequirement which offers additional information
based on downloading and examining a corresponding package archive
These are conceptually immutable, so we can get away with memoizing
expensive things.
"""
def __init__(self, req, argv, finder):
"""Download a requirement, compare its hashes, and return a subclass
of DownloadedReq depending on its state.
:arg req: The InstallRequirement I am based on
:arg argv: The args, starting after the subcommand
"""
self._req = req
self._argv = argv
self._finder = finder
# We use a separate temp dir for each requirement so requirements
# (from different indices) that happen to have the same archive names
# don't overwrite each other, leading to a security hole in which the
# latter is a hash mismatch, the former has already passed the
# comparison, and the latter gets installed.
self._temp_path = mkdtemp(prefix='peep-')
# Think of DownloadedReq as a one-shot state machine. It's an abstract
# class that ratchets forward to being one of its own subclasses,
# depending on its package status. Then it doesn't move again.
self.__class__ = self._class()
def dispose(self):
"""Delete temp files and dirs I've made. Render myself useless.
Do not call further methods on me after calling dispose().
"""
rmtree(self._temp_path)
def _version(self):
"""Deduce the version number of the downloaded package from its filename."""
# TODO: Can we delete this method and just print the line from the
# reqs file verbatim instead?
def version_of_archive(filename, package_name):
# Since we know the project_name, we can strip that off the left, strip
# any archive extensions off the right, and take the rest as the
# version.
for ext in ARCHIVE_EXTENSIONS:
if filename.endswith(ext):
filename = filename[:-len(ext)]
break
# Handle github sha tarball downloads.
if is_git_sha(filename):
filename = package_name + '-' + filename
if not filename.lower().replace('_', '-').startswith(package_name.lower()):
# TODO: Should we replace runs of [^a-zA-Z0-9.], not just _, with -?
give_up(filename, package_name)
return filename[len(package_name) + 1:] # Strip off '-' before version.
def version_of_wheel(filename, package_name):
# For Wheel files (http://legacy.python.org/dev/peps/pep-0427/#file-
# name-convention) we know the format bits are '-' separated.
whl_package_name, version, _rest = filename.split('-', 2)
# Do the alteration to package_name from PEP 427:
our_package_name = re.sub(r'[^\w\d.]+', '_', package_name, re.UNICODE)
if whl_package_name != our_package_name:
give_up(filename, whl_package_name)
return version
def give_up(filename, package_name):
raise RuntimeError("The archive '%s' didn't start with the package name "
"'%s', so I couldn't figure out the version number. "
"My bad; improve me." %
(filename, package_name))
get_version = (version_of_wheel
if self._downloaded_filename().endswith('.whl')
else version_of_archive)
return get_version(self._downloaded_filename(), self._project_name())
def _is_always_unsatisfied(self):
"""Returns whether this requirement is always unsatisfied
This would happen in cases where we can't determine the version
from the filename.
"""
# If this is a github sha tarball, then it is always unsatisfied
# because the url has a commit sha in it and not the version
# number.
url = self._url()
if url:
filename = filename_from_url(url)
if filename.endswith(ARCHIVE_EXTENSIONS):
filename, ext = splitext(filename)
if is_git_sha(filename):
return True
return False
@memoize # Avoid hitting the file[cache] over and over.
def _expected_hashes(self):
"""Return a list of known-good hashes for this package."""
return hashes_above(*path_and_line(self._req))
def _download(self, link):
"""Download a file, and return its name within my temp dir.
This does no verification of HTTPS certs, but our checking hashes
makes that largely unimportant. It would be nice to be able to use the
requests lib, which can verify certs, but it is guaranteed to be
available only in pip >= 1.5.
This also drops support for proxies and basic auth, though those could
be added back in.
"""
# Based on pip 1.4.1's URLOpener but with cert verification removed
def opener(is_https):
if is_https:
opener = build_opener(HTTPSHandler())
# Strip out HTTPHandler to prevent MITM spoof:
for handler in opener.handlers:
if isinstance(handler, HTTPHandler):
opener.handlers.remove(handler)
else:
opener = build_opener()
return opener
# Descended from unpack_http_url() in pip 1.4.1
def best_filename(link, response):
"""Return the most informative possible filename for a download,
ideally with a proper extension.
"""
content_type = response.info().get('content-type', '')
filename = link.filename # fallback
# Have a look at the Content-Disposition header for a better guess:
content_disposition = response.info().get('content-disposition')
if content_disposition:
type, params = cgi.parse_header(content_disposition)
# We use ``or`` here because we don't want to use an "empty" value
# from the filename param:
filename = params.get('filename') or filename
ext = splitext(filename)[1]
if not ext:
ext = mimetypes.guess_extension(content_type)
if ext:
filename += ext
if not ext and link.url != response.geturl():
ext = splitext(response.geturl())[1]
if ext:
filename += ext
return filename
# Descended from _download_url() in pip 1.4.1
def pipe_to_file(response, path, size=0):
"""Pull the data off an HTTP response, shove it in a new file, and
show progress.
:arg response: A file-like object to read from
:arg path: The path of the new file
:arg size: The expected size, in bytes, of the download. 0 for
unknown or to suppress progress indication (as for cached
downloads)
"""
def response_chunks(chunk_size):
while True:
chunk = response.read(chunk_size)
if not chunk:
break
yield chunk
print('Downloading %s%s...' % (
self._req.req,
(' (%sK)' % (size / 1000)) if size > 1000 else ''))
progress_indicator = (DownloadProgressBar(max=size).iter if size
else DownloadProgressSpinner().iter)
with open(path, 'wb') as file:
for chunk in progress_indicator(response_chunks(4096), 4096):
file.write(chunk)
url = link.url.split('#', 1)[0]
try:
response = opener(urlparse(url).scheme != 'http').open(url)
except (HTTPError, IOError) as exc:
raise DownloadError(link, exc)
filename = best_filename(link, response)
try:
size = int(response.headers['content-length'])
except (ValueError, KeyError, TypeError):
size = 0
pipe_to_file(response, join(self._temp_path, filename), size=size)
return filename
# Based on req_set.prepare_files() in pip bb2a8428d4aebc8d313d05d590f386fa3f0bbd0f
@memoize # Avoid re-downloading.
def _downloaded_filename(self):
"""Download the package's archive if necessary, and return its
filename.
--no-deps is implied, as we have reimplemented the bits that would
ordinarily do dependency resolution.
"""
# Peep doesn't support requirements that don't come down as a single
# file, because it can't hash them. Thus, it doesn't support editable
# requirements, because pip itself doesn't support editable
# requirements except for "local projects or a VCS url". Nor does it
# support VCS requirements yet, because we haven't yet come up with a
# portable, deterministic way to hash them. In summary, all we support
# is == requirements and tarballs/zips/etc.
# TODO: Stop on reqs that are editable or aren't ==.
# If the requirement isn't already specified as a URL, get a URL
# from an index:
link = self._link() or self._finder.find_requirement(self._req, upgrade=False)
if link:
lower_scheme = link.scheme.lower() # pip lower()s it for some reason.
if lower_scheme == 'http' or lower_scheme == 'https':
file_path = self._download(link)
return basename(file_path)
elif lower_scheme == 'file':
# The following is inspired by pip's unpack_file_url():
link_path = url_to_path(link.url_without_fragment)
if isdir(link_path):
raise UnsupportedRequirementError(
"%s: %s is a directory. So that it can compute "
"a hash, peep supports only filesystem paths which "
"point to files" %
(self._req, link.url_without_fragment))
else:
copy(link_path, self._temp_path)
return basename(link_path)
else:
raise UnsupportedRequirementError(
"%s: The download link, %s, would not result in a file "
"that can be hashed. Peep supports only == requirements, "
"file:// URLs pointing to files (not folders), and "
"http:// and https:// URLs pointing to tarballs, zips, "
"etc." % (self._req, link.url))
else:
raise UnsupportedRequirementError(
"%s: couldn't determine where to download this requirement from."
% (self._req,))
def install(self):
"""Install the package I represent, without dependencies.
Obey typical pip-install options passed in on the command line.
"""
other_args = list(requirement_args(self._argv, want_other=True))
archive_path = join(self._temp_path, self._downloaded_filename())
# -U so it installs whether pip deems the requirement "satisfied" or
# not. This is necessary for GitHub-sourced zips, which change without
# their version numbers changing.
run_pip(['install'] + other_args + ['--no-deps', '-U', archive_path])
@memoize
def _actual_hash(self):
"""Download the package's archive if necessary, and return its hash."""
return hash_of_file(join(self._temp_path, self._downloaded_filename()))
def _project_name(self):
"""Return the inner Requirement's "unsafe name".
Raise ValueError if there is no name.
"""
name = getattr(self._req.req, 'project_name', '')
if name:
return name
name = getattr(self._req.req, 'name', '')
if name:
return safe_name(name)
raise ValueError('Requirement has no project_name.')
def _name(self):
return self._req.name
def _link(self):
try:
return self._req.link
except AttributeError:
# The link attribute isn't available prior to pip 6.1.0, so fall
# back to the now deprecated 'url' attribute.
return Link(self._req.url) if self._req.url else None
def _url(self):
link = self._link()
return link.url if link else None
@memoize # Avoid re-running expensive check_if_exists().
def _is_satisfied(self):
self._req.check_if_exists()
return (self._req.satisfied_by and
not self._is_always_unsatisfied())
def _class(self):
"""Return the class I should be, spanning a continuum of goodness."""
try:
self._project_name()
except ValueError:
return MalformedReq
if self._is_satisfied():
return SatisfiedReq
if not self._expected_hashes():
return MissingReq
if self._actual_hash() not in self._expected_hashes():
return MismatchedReq
return InstallableReq
@classmethod
def foot(cls):
"""Return the text to be printed once, after all of the errors from
classes of my type are printed.
"""
return ''
class MalformedReq(DownloadedReq):
"""A requirement whose package name could not be determined"""
@classmethod
def head(cls):
return 'The following requirements could not be processed:\n'
def error(self):
return '* Unable to determine package name from URL %s; add #egg=' % self._url()
class MissingReq(DownloadedReq):
"""A requirement for which no hashes were specified in the requirements file"""
@classmethod
def head(cls):
return ('The following packages had no hashes specified in the requirements file, which\n'
'leaves them open to tampering. Vet these packages to your satisfaction, then\n'
'add these "sha256" lines like so:\n\n')
def error(self):
if self._url():
# _url() always contains an #egg= part, or this would be a
# MalformedRequest.
line = self._url()
else:
line = '%s==%s' % (self._name(), self._version())
return '# sha256: %s\n%s\n' % (self._actual_hash(), line)
class MismatchedReq(DownloadedReq):
"""A requirement for which the downloaded file didn't match any of my hashes."""
@classmethod
def head(cls):
return ("THE FOLLOWING PACKAGES DIDN'T MATCH THE HASHES SPECIFIED IN THE REQUIREMENTS\n"
"FILE. If you have updated the package versions, update the hashes. If not,\n"
"freak out, because someone has tampered with the packages.\n\n")
def error(self):
preamble = ' %s: expected' % self._project_name()
if len(self._expected_hashes()) > 1:
preamble += ' one of'
padding = '\n' + ' ' * (len(preamble) + 1)
return '%s %s\n%s got %s' % (preamble,
padding.join(self._expected_hashes()),
' ' * (len(preamble) - 4),
self._actual_hash())
@classmethod
def foot(cls):
return '\n'
class SatisfiedReq(DownloadedReq):
"""A requirement which turned out to be already installed"""
@classmethod
def head(cls):
return ("These packages were already installed, so we didn't need to download or build\n"
"them again. If you installed them with peep in the first place, you should be\n"
"safe. If not, uninstall them, then re-attempt your install with peep.\n")
def error(self):
return ' %s' % (self._req,)
class InstallableReq(DownloadedReq):
"""A requirement whose hash matched and can be safely installed"""
# DownloadedReq subclasses that indicate an error that should keep us from
# going forward with installation, in the order in which their errors should
# be reported:
ERROR_CLASSES = [MismatchedReq, MissingReq, MalformedReq]
def bucket(things, key):
"""Return a map of key -> list of things."""
ret = defaultdict(list)
for thing in things:
ret[key(thing)].append(thing)
return ret
def first_every_last(iterable, first, every, last):
"""Execute something before the first item of iter, something else for each
item, and a third thing after the last.
If there are no items in the iterable, don't execute anything.
"""
did_first = False
for item in iterable:
if not did_first:
did_first = True
first(item)
every(item)
if did_first:
last(item)
def _parse_requirements(path, finder):
try:
# list() so the generator that is parse_requirements() actually runs
# far enough to report a TypeError
return list(parse_requirements(
path, options=EmptyOptions(), finder=finder))
except TypeError:
# session is a required kwarg as of pip 6.0 and will raise
# a TypeError if missing. It needs to be a PipSession instance,
# but in older versions we can't import it from pip.download
# (nor do we need it at all) so we only import it in this except block
from pip.download import PipSession
return list(parse_requirements(
path, options=EmptyOptions(), session=PipSession(), finder=finder))
def downloaded_reqs_from_path(path, argv):
"""Return a list of DownloadedReqs representing the requirements parsed
out of a given requirements file.
:arg path: The path to the requirements file
:arg argv: The commandline args, starting after the subcommand
"""
finder = package_finder(argv)
return [DownloadedReq(req, argv, finder) for req in
_parse_requirements(path, finder)]
def peep_install(argv):
"""Perform the ``peep install`` subcommand, returning a shell status code
or raising a PipException.
:arg argv: The commandline args, starting after the subcommand
"""
output = []
out = output.append
reqs = []
try:
req_paths = list(requirement_args(argv, want_paths=True))
if not req_paths:
out("You have to specify one or more requirements files with the -r option, because\n"
"otherwise there's nowhere for peep to look up the hashes.\n")
return COMMAND_LINE_ERROR
# We're a "peep install" command, and we have some requirement paths.
reqs = list(chain.from_iterable(
downloaded_reqs_from_path(path, argv)
for path in req_paths))
buckets = bucket(reqs, lambda r: r.__class__)
# Skip a line after pip's "Cleaning up..." so the important stuff
# stands out:
if any(buckets[b] for b in ERROR_CLASSES):
out('\n')
printers = (lambda r: out(r.head()),
lambda r: out(r.error() + '\n'),
lambda r: out(r.foot()))
for c in ERROR_CLASSES:
first_every_last(buckets[c], *printers)
if any(buckets[b] for b in ERROR_CLASSES):
out('-------------------------------\n'
'Not proceeding to installation.\n')
return SOMETHING_WENT_WRONG
else:
for req in buckets[InstallableReq]:
req.install()
first_every_last(buckets[SatisfiedReq], *printers)
return ITS_FINE_ITS_FINE
except (UnsupportedRequirementError, InstallationError, DownloadError) as exc:
out(str(exc))
return SOMETHING_WENT_WRONG
finally:
for req in reqs:
req.dispose()
print(''.join(output))
def peep_port(paths):
"""Convert a peep requirements file to one compatble with pip-8 hashing.
Loses comments and tromps on URLs, so the result will need a little manual
massaging, but the hard part--the hash conversion--is done for you.
"""
if not paths:
print('Please specify one or more requirements files so I have '
'something to port.\n')
return COMMAND_LINE_ERROR
comes_from = None
for req in chain.from_iterable(
_parse_requirements(path, package_finder(argv)) for path in paths):
req_path, req_line = path_and_line(req)
hashes = [hexlify(urlsafe_b64decode((hash + '=').encode('ascii'))).decode('ascii')
for hash in hashes_above(req_path, req_line)]
if req_path != comes_from:
print()
print('# from %s' % req_path)
print()
comes_from = req_path
if not hashes:
print(req.req)
else:
print('%s' % (req.link if getattr(req, 'link', None) else req.req), end='')
for hash in hashes:
print(' \\')
print(' --hash=sha256:%s' % hash, end='')
print()
def main():
"""Be the top-level entrypoint. Return a shell status code."""
commands = {'hash': peep_hash,
'install': peep_install,
'port': peep_port}
try:
if len(argv) >= 2 and argv[1] in commands:
return commands[argv[1]](argv[2:])
else:
# Fall through to top-level pip main() for everything else:
return pip.main()
except PipException as exc:
return exc.error_code
def exception_handler(exc_type, exc_value, exc_tb):
print('Oh no! Peep had a problem while trying to do stuff. Please write up a bug report')
print('with the specifics so we can fix it:')
print()
print('https://github.com/erikrose/peep/issues/new')
print()
print('Here are some particulars you can copy and paste into the bug report:')
print()
print('---')
print('peep:', repr(__version__))
print('python:', repr(sys.version))
print('pip:', repr(getattr(pip, '__version__', 'no __version__ attr')))
print('Command line: ', repr(sys.argv))
print(
''.join(traceback.format_exception(exc_type, exc_value, exc_tb)))
print('---')
if __name__ == '__main__':
try:
exit(main())
except Exception:
exception_handler(*sys.exc_info())
exit(UNHANDLED_EXCEPTION)
|
erikrose/peep | peep.py | package_finder | python | def package_finder(argv):
# We instantiate an InstallCommand and then use some of its private
# machinery--its arg parser--for our own purposes, like a virus. This
# approach is portable across many pip versions, where more fine-grained
# ones are not. Ignoring options that don't exist on the parser (for
# instance, --use-wheel) gives us a straightforward method of backward
# compatibility.
try:
command = InstallCommand()
except TypeError:
# This is likely pip 1.3.0's "__init__() takes exactly 2 arguments (1
# given)" error. In that version, InstallCommand takes a top=level
# parser passed in from outside.
from pip.baseparser import create_main_parser
command = InstallCommand(create_main_parser())
# The downside is that it essentially ruins the InstallCommand class for
# further use. Calling out to pip.main() within the same interpreter, for
# example, would result in arguments parsed this time turning up there.
# Thus, we deepcopy the arg parser so we don't trash its singletons. Of
# course, deepcopy doesn't work on these objects, because they contain
# uncopyable regex patterns, so we pickle and unpickle instead. Fun!
options, _ = loads(dumps(command.parser)).parse_args(argv)
# Carry over PackageFinder kwargs that have [about] the same names as
# options attr names:
possible_options = [
'find_links',
FORMAT_CONTROL_ARG,
('allow_all_prereleases', 'pre'),
'process_dependency_links'
]
kwargs = {}
for option in possible_options:
kw, attr = option if isinstance(option, tuple) else (option, option)
value = getattr(options, attr, MARKER)
if value is not MARKER:
kwargs[kw] = value
# Figure out index_urls:
index_urls = [options.index_url] + options.extra_index_urls
if options.no_index:
index_urls = []
index_urls += getattr(options, 'mirrors', [])
# If pip is new enough to have a PipSession, initialize one, since
# PackageFinder requires it:
if hasattr(command, '_build_session'):
kwargs['session'] = command._build_session(options)
return PackageFinder(index_urls=index_urls, **kwargs) | Return a PackageFinder respecting command-line options.
:arg argv: Everything after the subcommand | train | https://github.com/erikrose/peep/blob/c16f08c7f61e2f2afecb7cd1c93752bdd96c4968/peep.py#L336-L390 | null | #!/usr/bin/env python
"""peep ("prudently examine every package") verifies that packages conform to a
trusted, locally stored hash and only then installs them::
peep install -r requirements.txt
This makes your deployments verifiably repeatable without having to maintain a
local PyPI mirror or use a vendor lib. Just update the version numbers and
hashes in requirements.txt, and you're all set.
"""
# This is here so embedded copies of peep.py are MIT-compliant:
# Copyright (c) 2013 Erik Rose
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
from __future__ import print_function
try:
xrange = xrange
except NameError:
xrange = range
from base64 import urlsafe_b64encode, urlsafe_b64decode
from binascii import hexlify
import cgi
from collections import defaultdict
from functools import wraps
from hashlib import sha256
from itertools import chain, islice
import mimetypes
from optparse import OptionParser
from os.path import join, basename, splitext, isdir
from pickle import dumps, loads
import re
import sys
from shutil import rmtree, copy
from sys import argv, exit
from tempfile import mkdtemp
import traceback
try:
from urllib2 import build_opener, HTTPHandler, HTTPSHandler, HTTPError
except ImportError:
from urllib.request import build_opener, HTTPHandler, HTTPSHandler
from urllib.error import HTTPError
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse # 3.4
# TODO: Probably use six to make urllib stuff work across 2/3.
from pkg_resources import require, VersionConflict, DistributionNotFound, safe_name
# We don't admit our dependency on pip in setup.py, lest a naive user simply
# say `pip install peep.tar.gz` and thus pull down an untrusted copy of pip
# from PyPI. Instead, we make sure it's installed and new enough here and spit
# out an error message if not:
def activate(specifier):
"""Make a compatible version of pip importable. Raise a RuntimeError if we
couldn't."""
try:
for distro in require(specifier):
distro.activate()
except (VersionConflict, DistributionNotFound):
raise RuntimeError('The installed version of pip is too old; peep '
'requires ' + specifier)
# Before 0.6.2, the log module wasn't there, so some
# of our monkeypatching fails. It probably wouldn't be
# much work to support even earlier, though.
activate('pip>=0.6.2')
import pip
from pip.commands.install import InstallCommand
try:
from pip.download import url_to_path # 1.5.6
except ImportError:
try:
from pip.util import url_to_path # 0.7.0
except ImportError:
from pip.util import url_to_filename as url_to_path # 0.6.2
from pip.exceptions import InstallationError
from pip.index import PackageFinder, Link
try:
from pip.log import logger
except ImportError:
from pip import logger # 6.0
from pip.req import parse_requirements
try:
from pip.utils.ui import DownloadProgressBar, DownloadProgressSpinner
except ImportError:
class NullProgressBar(object):
def __init__(self, *args, **kwargs):
pass
def iter(self, ret, *args, **kwargs):
return ret
DownloadProgressBar = DownloadProgressSpinner = NullProgressBar
__version__ = 3, 1, 2
try:
from pip.index import FormatControl # noqa
FORMAT_CONTROL_ARG = 'format_control'
# The line-numbering bug will be fixed in pip 8. All 7.x releases had it.
PIP_MAJOR_VERSION = int(pip.__version__.split('.')[0])
PIP_COUNTS_COMMENTS = PIP_MAJOR_VERSION >= 8
except ImportError:
FORMAT_CONTROL_ARG = 'use_wheel' # pre-7
PIP_COUNTS_COMMENTS = True
ITS_FINE_ITS_FINE = 0
SOMETHING_WENT_WRONG = 1
# "Traditional" for command-line errors according to optparse docs:
COMMAND_LINE_ERROR = 2
UNHANDLED_EXCEPTION = 3
ARCHIVE_EXTENSIONS = ('.tar.bz2', '.tar.gz', '.tgz', '.tar', '.zip')
MARKER = object()
class PipException(Exception):
"""When I delegated to pip, it exited with an error."""
def __init__(self, error_code):
self.error_code = error_code
class UnsupportedRequirementError(Exception):
"""An unsupported line was encountered in a requirements file."""
class DownloadError(Exception):
def __init__(self, link, exc):
self.link = link
self.reason = str(exc)
def __str__(self):
return 'Downloading %s failed: %s' % (self.link, self.reason)
def encoded_hash(sha):
"""Return a short, 7-bit-safe representation of a hash.
If you pass a sha256, this results in the hash algorithm that the Wheel
format (PEP 427) uses, except here it's intended to be run across the
downloaded archive before unpacking.
"""
return urlsafe_b64encode(sha.digest()).decode('ascii').rstrip('=')
def path_and_line(req):
"""Return the path and line number of the file from which an
InstallRequirement came.
"""
path, line = (re.match(r'-r (.*) \(line (\d+)\)$',
req.comes_from).groups())
return path, int(line)
def hashes_above(path, line_number):
"""Yield hashes from contiguous comment lines before line ``line_number``.
"""
def hash_lists(path):
"""Yield lists of hashes appearing between non-comment lines.
The lists will be in order of appearance and, for each non-empty
list, their place in the results will coincide with that of the
line number of the corresponding result from `parse_requirements`
(which changed in pip 7.0 to not count comments).
"""
hashes = []
with open(path) as file:
for lineno, line in enumerate(file, 1):
match = HASH_COMMENT_RE.match(line)
if match: # Accumulate this hash.
hashes.append(match.groupdict()['hash'])
if not IGNORED_LINE_RE.match(line):
yield hashes # Report hashes seen so far.
hashes = []
elif PIP_COUNTS_COMMENTS:
# Comment: count as normal req but have no hashes.
yield []
return next(islice(hash_lists(path), line_number - 1, None))
def run_pip(initial_args):
"""Delegate to pip the given args (starting with the subcommand), and raise
``PipException`` if something goes wrong."""
status_code = pip.main(initial_args)
# Clear out the registrations in the pip "logger" singleton. Otherwise,
# loggers keep getting appended to it with every run. Pip assumes only one
# command invocation will happen per interpreter lifetime.
logger.consumers = []
if status_code:
raise PipException(status_code)
def hash_of_file(path):
"""Return the hash of a downloaded file."""
with open(path, 'rb') as archive:
sha = sha256()
while True:
data = archive.read(2 ** 20)
if not data:
break
sha.update(data)
return encoded_hash(sha)
def is_git_sha(text):
"""Return whether this is probably a git sha"""
# Handle both the full sha as well as the 7-character abbreviation
if len(text) in (40, 7):
try:
int(text, 16)
return True
except ValueError:
pass
return False
def filename_from_url(url):
parsed = urlparse(url)
path = parsed.path
return path.split('/')[-1]
def requirement_args(argv, want_paths=False, want_other=False):
"""Return an iterable of filtered arguments.
:arg argv: Arguments, starting after the subcommand
:arg want_paths: If True, the returned iterable includes the paths to any
requirements files following a ``-r`` or ``--requirement`` option.
:arg want_other: If True, the returned iterable includes the args that are
not a requirement-file path or a ``-r`` or ``--requirement`` flag.
"""
was_r = False
for arg in argv:
# Allow for requirements files named "-r", don't freak out if there's a
# trailing "-r", etc.
if was_r:
if want_paths:
yield arg
was_r = False
elif arg in ['-r', '--requirement']:
was_r = True
else:
if want_other:
yield arg
# any line that is a comment or just whitespace
IGNORED_LINE_RE = re.compile(r'^(\s*#.*)?\s*$')
HASH_COMMENT_RE = re.compile(
r"""
\s*\#\s+ # Lines that start with a '#'
(?P<hash_type>sha256):\s+ # Hash type is hardcoded to be sha256 for now.
(?P<hash>[^\s]+) # Hashes can be anything except '#' or spaces.
\s* # Suck up whitespace before the comment or
# just trailing whitespace if there is no
# comment. Also strip trailing newlines.
(?:\#(?P<comment>.*))? # Comments can be anything after a whitespace+#
# and are optional.
$""", re.X)
def peep_hash(argv):
"""Return the peep hash of one or more files, returning a shell status code
or raising a PipException.
:arg argv: The commandline args, starting after the subcommand
"""
parser = OptionParser(
usage='usage: %prog hash file [file ...]',
description='Print a peep hash line for one or more files: for '
'example, "# sha256: '
'oz42dZy6Gowxw8AelDtO4gRgTW_xPdooH484k7I5EOY".')
_, paths = parser.parse_args(args=argv)
if paths:
for path in paths:
print('# sha256:', hash_of_file(path))
return ITS_FINE_ITS_FINE
else:
parser.print_usage()
return COMMAND_LINE_ERROR
class EmptyOptions(object):
"""Fake optparse options for compatibility with pip<1.2
pip<1.2 had a bug in parse_requirements() in which the ``options`` kwarg
was required. We work around that by passing it a mock object.
"""
default_vcs = None
skip_requirements_regex = None
isolated_mode = False
def memoize(func):
"""Memoize a method that should return the same result every time on a
given instance.
"""
@wraps(func)
def memoizer(self):
if not hasattr(self, '_cache'):
self._cache = {}
if func.__name__ not in self._cache:
self._cache[func.__name__] = func(self)
return self._cache[func.__name__]
return memoizer
class DownloadedReq(object):
"""A wrapper around InstallRequirement which offers additional information
based on downloading and examining a corresponding package archive
These are conceptually immutable, so we can get away with memoizing
expensive things.
"""
def __init__(self, req, argv, finder):
"""Download a requirement, compare its hashes, and return a subclass
of DownloadedReq depending on its state.
:arg req: The InstallRequirement I am based on
:arg argv: The args, starting after the subcommand
"""
self._req = req
self._argv = argv
self._finder = finder
# We use a separate temp dir for each requirement so requirements
# (from different indices) that happen to have the same archive names
# don't overwrite each other, leading to a security hole in which the
# latter is a hash mismatch, the former has already passed the
# comparison, and the latter gets installed.
self._temp_path = mkdtemp(prefix='peep-')
# Think of DownloadedReq as a one-shot state machine. It's an abstract
# class that ratchets forward to being one of its own subclasses,
# depending on its package status. Then it doesn't move again.
self.__class__ = self._class()
def dispose(self):
"""Delete temp files and dirs I've made. Render myself useless.
Do not call further methods on me after calling dispose().
"""
rmtree(self._temp_path)
def _version(self):
"""Deduce the version number of the downloaded package from its filename."""
# TODO: Can we delete this method and just print the line from the
# reqs file verbatim instead?
def version_of_archive(filename, package_name):
# Since we know the project_name, we can strip that off the left, strip
# any archive extensions off the right, and take the rest as the
# version.
for ext in ARCHIVE_EXTENSIONS:
if filename.endswith(ext):
filename = filename[:-len(ext)]
break
# Handle github sha tarball downloads.
if is_git_sha(filename):
filename = package_name + '-' + filename
if not filename.lower().replace('_', '-').startswith(package_name.lower()):
# TODO: Should we replace runs of [^a-zA-Z0-9.], not just _, with -?
give_up(filename, package_name)
return filename[len(package_name) + 1:] # Strip off '-' before version.
def version_of_wheel(filename, package_name):
# For Wheel files (http://legacy.python.org/dev/peps/pep-0427/#file-
# name-convention) we know the format bits are '-' separated.
whl_package_name, version, _rest = filename.split('-', 2)
# Do the alteration to package_name from PEP 427:
our_package_name = re.sub(r'[^\w\d.]+', '_', package_name, re.UNICODE)
if whl_package_name != our_package_name:
give_up(filename, whl_package_name)
return version
def give_up(filename, package_name):
raise RuntimeError("The archive '%s' didn't start with the package name "
"'%s', so I couldn't figure out the version number. "
"My bad; improve me." %
(filename, package_name))
get_version = (version_of_wheel
if self._downloaded_filename().endswith('.whl')
else version_of_archive)
return get_version(self._downloaded_filename(), self._project_name())
def _is_always_unsatisfied(self):
"""Returns whether this requirement is always unsatisfied
This would happen in cases where we can't determine the version
from the filename.
"""
# If this is a github sha tarball, then it is always unsatisfied
# because the url has a commit sha in it and not the version
# number.
url = self._url()
if url:
filename = filename_from_url(url)
if filename.endswith(ARCHIVE_EXTENSIONS):
filename, ext = splitext(filename)
if is_git_sha(filename):
return True
return False
@memoize # Avoid hitting the file[cache] over and over.
def _expected_hashes(self):
"""Return a list of known-good hashes for this package."""
return hashes_above(*path_and_line(self._req))
def _download(self, link):
"""Download a file, and return its name within my temp dir.
This does no verification of HTTPS certs, but our checking hashes
makes that largely unimportant. It would be nice to be able to use the
requests lib, which can verify certs, but it is guaranteed to be
available only in pip >= 1.5.
This also drops support for proxies and basic auth, though those could
be added back in.
"""
# Based on pip 1.4.1's URLOpener but with cert verification removed
def opener(is_https):
if is_https:
opener = build_opener(HTTPSHandler())
# Strip out HTTPHandler to prevent MITM spoof:
for handler in opener.handlers:
if isinstance(handler, HTTPHandler):
opener.handlers.remove(handler)
else:
opener = build_opener()
return opener
# Descended from unpack_http_url() in pip 1.4.1
def best_filename(link, response):
"""Return the most informative possible filename for a download,
ideally with a proper extension.
"""
content_type = response.info().get('content-type', '')
filename = link.filename # fallback
# Have a look at the Content-Disposition header for a better guess:
content_disposition = response.info().get('content-disposition')
if content_disposition:
type, params = cgi.parse_header(content_disposition)
# We use ``or`` here because we don't want to use an "empty" value
# from the filename param:
filename = params.get('filename') or filename
ext = splitext(filename)[1]
if not ext:
ext = mimetypes.guess_extension(content_type)
if ext:
filename += ext
if not ext and link.url != response.geturl():
ext = splitext(response.geturl())[1]
if ext:
filename += ext
return filename
# Descended from _download_url() in pip 1.4.1
def pipe_to_file(response, path, size=0):
"""Pull the data off an HTTP response, shove it in a new file, and
show progress.
:arg response: A file-like object to read from
:arg path: The path of the new file
:arg size: The expected size, in bytes, of the download. 0 for
unknown or to suppress progress indication (as for cached
downloads)
"""
def response_chunks(chunk_size):
while True:
chunk = response.read(chunk_size)
if not chunk:
break
yield chunk
print('Downloading %s%s...' % (
self._req.req,
(' (%sK)' % (size / 1000)) if size > 1000 else ''))
progress_indicator = (DownloadProgressBar(max=size).iter if size
else DownloadProgressSpinner().iter)
with open(path, 'wb') as file:
for chunk in progress_indicator(response_chunks(4096), 4096):
file.write(chunk)
url = link.url.split('#', 1)[0]
try:
response = opener(urlparse(url).scheme != 'http').open(url)
except (HTTPError, IOError) as exc:
raise DownloadError(link, exc)
filename = best_filename(link, response)
try:
size = int(response.headers['content-length'])
except (ValueError, KeyError, TypeError):
size = 0
pipe_to_file(response, join(self._temp_path, filename), size=size)
return filename
# Based on req_set.prepare_files() in pip bb2a8428d4aebc8d313d05d590f386fa3f0bbd0f
@memoize # Avoid re-downloading.
def _downloaded_filename(self):
"""Download the package's archive if necessary, and return its
filename.
--no-deps is implied, as we have reimplemented the bits that would
ordinarily do dependency resolution.
"""
# Peep doesn't support requirements that don't come down as a single
# file, because it can't hash them. Thus, it doesn't support editable
# requirements, because pip itself doesn't support editable
# requirements except for "local projects or a VCS url". Nor does it
# support VCS requirements yet, because we haven't yet come up with a
# portable, deterministic way to hash them. In summary, all we support
# is == requirements and tarballs/zips/etc.
# TODO: Stop on reqs that are editable or aren't ==.
# If the requirement isn't already specified as a URL, get a URL
# from an index:
link = self._link() or self._finder.find_requirement(self._req, upgrade=False)
if link:
lower_scheme = link.scheme.lower() # pip lower()s it for some reason.
if lower_scheme == 'http' or lower_scheme == 'https':
file_path = self._download(link)
return basename(file_path)
elif lower_scheme == 'file':
# The following is inspired by pip's unpack_file_url():
link_path = url_to_path(link.url_without_fragment)
if isdir(link_path):
raise UnsupportedRequirementError(
"%s: %s is a directory. So that it can compute "
"a hash, peep supports only filesystem paths which "
"point to files" %
(self._req, link.url_without_fragment))
else:
copy(link_path, self._temp_path)
return basename(link_path)
else:
raise UnsupportedRequirementError(
"%s: The download link, %s, would not result in a file "
"that can be hashed. Peep supports only == requirements, "
"file:// URLs pointing to files (not folders), and "
"http:// and https:// URLs pointing to tarballs, zips, "
"etc." % (self._req, link.url))
else:
raise UnsupportedRequirementError(
"%s: couldn't determine where to download this requirement from."
% (self._req,))
def install(self):
"""Install the package I represent, without dependencies.
Obey typical pip-install options passed in on the command line.
"""
other_args = list(requirement_args(self._argv, want_other=True))
archive_path = join(self._temp_path, self._downloaded_filename())
# -U so it installs whether pip deems the requirement "satisfied" or
# not. This is necessary for GitHub-sourced zips, which change without
# their version numbers changing.
run_pip(['install'] + other_args + ['--no-deps', '-U', archive_path])
@memoize
def _actual_hash(self):
"""Download the package's archive if necessary, and return its hash."""
return hash_of_file(join(self._temp_path, self._downloaded_filename()))
def _project_name(self):
"""Return the inner Requirement's "unsafe name".
Raise ValueError if there is no name.
"""
name = getattr(self._req.req, 'project_name', '')
if name:
return name
name = getattr(self._req.req, 'name', '')
if name:
return safe_name(name)
raise ValueError('Requirement has no project_name.')
def _name(self):
return self._req.name
def _link(self):
try:
return self._req.link
except AttributeError:
# The link attribute isn't available prior to pip 6.1.0, so fall
# back to the now deprecated 'url' attribute.
return Link(self._req.url) if self._req.url else None
def _url(self):
link = self._link()
return link.url if link else None
@memoize # Avoid re-running expensive check_if_exists().
def _is_satisfied(self):
self._req.check_if_exists()
return (self._req.satisfied_by and
not self._is_always_unsatisfied())
def _class(self):
"""Return the class I should be, spanning a continuum of goodness."""
try:
self._project_name()
except ValueError:
return MalformedReq
if self._is_satisfied():
return SatisfiedReq
if not self._expected_hashes():
return MissingReq
if self._actual_hash() not in self._expected_hashes():
return MismatchedReq
return InstallableReq
@classmethod
def foot(cls):
"""Return the text to be printed once, after all of the errors from
classes of my type are printed.
"""
return ''
class MalformedReq(DownloadedReq):
"""A requirement whose package name could not be determined"""
@classmethod
def head(cls):
return 'The following requirements could not be processed:\n'
def error(self):
return '* Unable to determine package name from URL %s; add #egg=' % self._url()
class MissingReq(DownloadedReq):
"""A requirement for which no hashes were specified in the requirements file"""
@classmethod
def head(cls):
return ('The following packages had no hashes specified in the requirements file, which\n'
'leaves them open to tampering. Vet these packages to your satisfaction, then\n'
'add these "sha256" lines like so:\n\n')
def error(self):
if self._url():
# _url() always contains an #egg= part, or this would be a
# MalformedRequest.
line = self._url()
else:
line = '%s==%s' % (self._name(), self._version())
return '# sha256: %s\n%s\n' % (self._actual_hash(), line)
class MismatchedReq(DownloadedReq):
"""A requirement for which the downloaded file didn't match any of my hashes."""
@classmethod
def head(cls):
return ("THE FOLLOWING PACKAGES DIDN'T MATCH THE HASHES SPECIFIED IN THE REQUIREMENTS\n"
"FILE. If you have updated the package versions, update the hashes. If not,\n"
"freak out, because someone has tampered with the packages.\n\n")
def error(self):
preamble = ' %s: expected' % self._project_name()
if len(self._expected_hashes()) > 1:
preamble += ' one of'
padding = '\n' + ' ' * (len(preamble) + 1)
return '%s %s\n%s got %s' % (preamble,
padding.join(self._expected_hashes()),
' ' * (len(preamble) - 4),
self._actual_hash())
@classmethod
def foot(cls):
return '\n'
class SatisfiedReq(DownloadedReq):
"""A requirement which turned out to be already installed"""
@classmethod
def head(cls):
return ("These packages were already installed, so we didn't need to download or build\n"
"them again. If you installed them with peep in the first place, you should be\n"
"safe. If not, uninstall them, then re-attempt your install with peep.\n")
def error(self):
return ' %s' % (self._req,)
class InstallableReq(DownloadedReq):
"""A requirement whose hash matched and can be safely installed"""
# DownloadedReq subclasses that indicate an error that should keep us from
# going forward with installation, in the order in which their errors should
# be reported:
ERROR_CLASSES = [MismatchedReq, MissingReq, MalformedReq]
def bucket(things, key):
"""Return a map of key -> list of things."""
ret = defaultdict(list)
for thing in things:
ret[key(thing)].append(thing)
return ret
def first_every_last(iterable, first, every, last):
"""Execute something before the first item of iter, something else for each
item, and a third thing after the last.
If there are no items in the iterable, don't execute anything.
"""
did_first = False
for item in iterable:
if not did_first:
did_first = True
first(item)
every(item)
if did_first:
last(item)
def _parse_requirements(path, finder):
try:
# list() so the generator that is parse_requirements() actually runs
# far enough to report a TypeError
return list(parse_requirements(
path, options=EmptyOptions(), finder=finder))
except TypeError:
# session is a required kwarg as of pip 6.0 and will raise
# a TypeError if missing. It needs to be a PipSession instance,
# but in older versions we can't import it from pip.download
# (nor do we need it at all) so we only import it in this except block
from pip.download import PipSession
return list(parse_requirements(
path, options=EmptyOptions(), session=PipSession(), finder=finder))
def downloaded_reqs_from_path(path, argv):
"""Return a list of DownloadedReqs representing the requirements parsed
out of a given requirements file.
:arg path: The path to the requirements file
:arg argv: The commandline args, starting after the subcommand
"""
finder = package_finder(argv)
return [DownloadedReq(req, argv, finder) for req in
_parse_requirements(path, finder)]
def peep_install(argv):
"""Perform the ``peep install`` subcommand, returning a shell status code
or raising a PipException.
:arg argv: The commandline args, starting after the subcommand
"""
output = []
out = output.append
reqs = []
try:
req_paths = list(requirement_args(argv, want_paths=True))
if not req_paths:
out("You have to specify one or more requirements files with the -r option, because\n"
"otherwise there's nowhere for peep to look up the hashes.\n")
return COMMAND_LINE_ERROR
# We're a "peep install" command, and we have some requirement paths.
reqs = list(chain.from_iterable(
downloaded_reqs_from_path(path, argv)
for path in req_paths))
buckets = bucket(reqs, lambda r: r.__class__)
# Skip a line after pip's "Cleaning up..." so the important stuff
# stands out:
if any(buckets[b] for b in ERROR_CLASSES):
out('\n')
printers = (lambda r: out(r.head()),
lambda r: out(r.error() + '\n'),
lambda r: out(r.foot()))
for c in ERROR_CLASSES:
first_every_last(buckets[c], *printers)
if any(buckets[b] for b in ERROR_CLASSES):
out('-------------------------------\n'
'Not proceeding to installation.\n')
return SOMETHING_WENT_WRONG
else:
for req in buckets[InstallableReq]:
req.install()
first_every_last(buckets[SatisfiedReq], *printers)
return ITS_FINE_ITS_FINE
except (UnsupportedRequirementError, InstallationError, DownloadError) as exc:
out(str(exc))
return SOMETHING_WENT_WRONG
finally:
for req in reqs:
req.dispose()
print(''.join(output))
def peep_port(paths):
"""Convert a peep requirements file to one compatble with pip-8 hashing.
Loses comments and tromps on URLs, so the result will need a little manual
massaging, but the hard part--the hash conversion--is done for you.
"""
if not paths:
print('Please specify one or more requirements files so I have '
'something to port.\n')
return COMMAND_LINE_ERROR
comes_from = None
for req in chain.from_iterable(
_parse_requirements(path, package_finder(argv)) for path in paths):
req_path, req_line = path_and_line(req)
hashes = [hexlify(urlsafe_b64decode((hash + '=').encode('ascii'))).decode('ascii')
for hash in hashes_above(req_path, req_line)]
if req_path != comes_from:
print()
print('# from %s' % req_path)
print()
comes_from = req_path
if not hashes:
print(req.req)
else:
print('%s' % (req.link if getattr(req, 'link', None) else req.req), end='')
for hash in hashes:
print(' \\')
print(' --hash=sha256:%s' % hash, end='')
print()
def main():
"""Be the top-level entrypoint. Return a shell status code."""
commands = {'hash': peep_hash,
'install': peep_install,
'port': peep_port}
try:
if len(argv) >= 2 and argv[1] in commands:
return commands[argv[1]](argv[2:])
else:
# Fall through to top-level pip main() for everything else:
return pip.main()
except PipException as exc:
return exc.error_code
def exception_handler(exc_type, exc_value, exc_tb):
print('Oh no! Peep had a problem while trying to do stuff. Please write up a bug report')
print('with the specifics so we can fix it:')
print()
print('https://github.com/erikrose/peep/issues/new')
print()
print('Here are some particulars you can copy and paste into the bug report:')
print()
print('---')
print('peep:', repr(__version__))
print('python:', repr(sys.version))
print('pip:', repr(getattr(pip, '__version__', 'no __version__ attr')))
print('Command line: ', repr(sys.argv))
print(
''.join(traceback.format_exception(exc_type, exc_value, exc_tb)))
print('---')
if __name__ == '__main__':
try:
exit(main())
except Exception:
exception_handler(*sys.exc_info())
exit(UNHANDLED_EXCEPTION)
|
erikrose/peep | peep.py | bucket | python | def bucket(things, key):
ret = defaultdict(list)
for thing in things:
ret[key(thing)].append(thing)
return ret | Return a map of key -> list of things. | train | https://github.com/erikrose/peep/blob/c16f08c7f61e2f2afecb7cd1c93752bdd96c4968/peep.py#L793-L798 | [
"buckets = bucket(reqs, lambda r: r.__class__)\n"
] | #!/usr/bin/env python
"""peep ("prudently examine every package") verifies that packages conform to a
trusted, locally stored hash and only then installs them::
peep install -r requirements.txt
This makes your deployments verifiably repeatable without having to maintain a
local PyPI mirror or use a vendor lib. Just update the version numbers and
hashes in requirements.txt, and you're all set.
"""
# This is here so embedded copies of peep.py are MIT-compliant:
# Copyright (c) 2013 Erik Rose
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
from __future__ import print_function
try:
xrange = xrange
except NameError:
xrange = range
from base64 import urlsafe_b64encode, urlsafe_b64decode
from binascii import hexlify
import cgi
from collections import defaultdict
from functools import wraps
from hashlib import sha256
from itertools import chain, islice
import mimetypes
from optparse import OptionParser
from os.path import join, basename, splitext, isdir
from pickle import dumps, loads
import re
import sys
from shutil import rmtree, copy
from sys import argv, exit
from tempfile import mkdtemp
import traceback
try:
from urllib2 import build_opener, HTTPHandler, HTTPSHandler, HTTPError
except ImportError:
from urllib.request import build_opener, HTTPHandler, HTTPSHandler
from urllib.error import HTTPError
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse # 3.4
# TODO: Probably use six to make urllib stuff work across 2/3.
from pkg_resources import require, VersionConflict, DistributionNotFound, safe_name
# We don't admit our dependency on pip in setup.py, lest a naive user simply
# say `pip install peep.tar.gz` and thus pull down an untrusted copy of pip
# from PyPI. Instead, we make sure it's installed and new enough here and spit
# out an error message if not:
def activate(specifier):
"""Make a compatible version of pip importable. Raise a RuntimeError if we
couldn't."""
try:
for distro in require(specifier):
distro.activate()
except (VersionConflict, DistributionNotFound):
raise RuntimeError('The installed version of pip is too old; peep '
'requires ' + specifier)
# Before 0.6.2, the log module wasn't there, so some
# of our monkeypatching fails. It probably wouldn't be
# much work to support even earlier, though.
activate('pip>=0.6.2')
import pip
from pip.commands.install import InstallCommand
try:
from pip.download import url_to_path # 1.5.6
except ImportError:
try:
from pip.util import url_to_path # 0.7.0
except ImportError:
from pip.util import url_to_filename as url_to_path # 0.6.2
from pip.exceptions import InstallationError
from pip.index import PackageFinder, Link
try:
from pip.log import logger
except ImportError:
from pip import logger # 6.0
from pip.req import parse_requirements
try:
from pip.utils.ui import DownloadProgressBar, DownloadProgressSpinner
except ImportError:
class NullProgressBar(object):
def __init__(self, *args, **kwargs):
pass
def iter(self, ret, *args, **kwargs):
return ret
DownloadProgressBar = DownloadProgressSpinner = NullProgressBar
__version__ = 3, 1, 2
try:
from pip.index import FormatControl # noqa
FORMAT_CONTROL_ARG = 'format_control'
# The line-numbering bug will be fixed in pip 8. All 7.x releases had it.
PIP_MAJOR_VERSION = int(pip.__version__.split('.')[0])
PIP_COUNTS_COMMENTS = PIP_MAJOR_VERSION >= 8
except ImportError:
FORMAT_CONTROL_ARG = 'use_wheel' # pre-7
PIP_COUNTS_COMMENTS = True
ITS_FINE_ITS_FINE = 0
SOMETHING_WENT_WRONG = 1
# "Traditional" for command-line errors according to optparse docs:
COMMAND_LINE_ERROR = 2
UNHANDLED_EXCEPTION = 3
ARCHIVE_EXTENSIONS = ('.tar.bz2', '.tar.gz', '.tgz', '.tar', '.zip')
MARKER = object()
class PipException(Exception):
"""When I delegated to pip, it exited with an error."""
def __init__(self, error_code):
self.error_code = error_code
class UnsupportedRequirementError(Exception):
"""An unsupported line was encountered in a requirements file."""
class DownloadError(Exception):
def __init__(self, link, exc):
self.link = link
self.reason = str(exc)
def __str__(self):
return 'Downloading %s failed: %s' % (self.link, self.reason)
def encoded_hash(sha):
"""Return a short, 7-bit-safe representation of a hash.
If you pass a sha256, this results in the hash algorithm that the Wheel
format (PEP 427) uses, except here it's intended to be run across the
downloaded archive before unpacking.
"""
return urlsafe_b64encode(sha.digest()).decode('ascii').rstrip('=')
def path_and_line(req):
"""Return the path and line number of the file from which an
InstallRequirement came.
"""
path, line = (re.match(r'-r (.*) \(line (\d+)\)$',
req.comes_from).groups())
return path, int(line)
def hashes_above(path, line_number):
"""Yield hashes from contiguous comment lines before line ``line_number``.
"""
def hash_lists(path):
"""Yield lists of hashes appearing between non-comment lines.
The lists will be in order of appearance and, for each non-empty
list, their place in the results will coincide with that of the
line number of the corresponding result from `parse_requirements`
(which changed in pip 7.0 to not count comments).
"""
hashes = []
with open(path) as file:
for lineno, line in enumerate(file, 1):
match = HASH_COMMENT_RE.match(line)
if match: # Accumulate this hash.
hashes.append(match.groupdict()['hash'])
if not IGNORED_LINE_RE.match(line):
yield hashes # Report hashes seen so far.
hashes = []
elif PIP_COUNTS_COMMENTS:
# Comment: count as normal req but have no hashes.
yield []
return next(islice(hash_lists(path), line_number - 1, None))
def run_pip(initial_args):
"""Delegate to pip the given args (starting with the subcommand), and raise
``PipException`` if something goes wrong."""
status_code = pip.main(initial_args)
# Clear out the registrations in the pip "logger" singleton. Otherwise,
# loggers keep getting appended to it with every run. Pip assumes only one
# command invocation will happen per interpreter lifetime.
logger.consumers = []
if status_code:
raise PipException(status_code)
def hash_of_file(path):
"""Return the hash of a downloaded file."""
with open(path, 'rb') as archive:
sha = sha256()
while True:
data = archive.read(2 ** 20)
if not data:
break
sha.update(data)
return encoded_hash(sha)
def is_git_sha(text):
"""Return whether this is probably a git sha"""
# Handle both the full sha as well as the 7-character abbreviation
if len(text) in (40, 7):
try:
int(text, 16)
return True
except ValueError:
pass
return False
def filename_from_url(url):
parsed = urlparse(url)
path = parsed.path
return path.split('/')[-1]
def requirement_args(argv, want_paths=False, want_other=False):
"""Return an iterable of filtered arguments.
:arg argv: Arguments, starting after the subcommand
:arg want_paths: If True, the returned iterable includes the paths to any
requirements files following a ``-r`` or ``--requirement`` option.
:arg want_other: If True, the returned iterable includes the args that are
not a requirement-file path or a ``-r`` or ``--requirement`` flag.
"""
was_r = False
for arg in argv:
# Allow for requirements files named "-r", don't freak out if there's a
# trailing "-r", etc.
if was_r:
if want_paths:
yield arg
was_r = False
elif arg in ['-r', '--requirement']:
was_r = True
else:
if want_other:
yield arg
# any line that is a comment or just whitespace
IGNORED_LINE_RE = re.compile(r'^(\s*#.*)?\s*$')
HASH_COMMENT_RE = re.compile(
r"""
\s*\#\s+ # Lines that start with a '#'
(?P<hash_type>sha256):\s+ # Hash type is hardcoded to be sha256 for now.
(?P<hash>[^\s]+) # Hashes can be anything except '#' or spaces.
\s* # Suck up whitespace before the comment or
# just trailing whitespace if there is no
# comment. Also strip trailing newlines.
(?:\#(?P<comment>.*))? # Comments can be anything after a whitespace+#
# and are optional.
$""", re.X)
def peep_hash(argv):
"""Return the peep hash of one or more files, returning a shell status code
or raising a PipException.
:arg argv: The commandline args, starting after the subcommand
"""
parser = OptionParser(
usage='usage: %prog hash file [file ...]',
description='Print a peep hash line for one or more files: for '
'example, "# sha256: '
'oz42dZy6Gowxw8AelDtO4gRgTW_xPdooH484k7I5EOY".')
_, paths = parser.parse_args(args=argv)
if paths:
for path in paths:
print('# sha256:', hash_of_file(path))
return ITS_FINE_ITS_FINE
else:
parser.print_usage()
return COMMAND_LINE_ERROR
class EmptyOptions(object):
"""Fake optparse options for compatibility with pip<1.2
pip<1.2 had a bug in parse_requirements() in which the ``options`` kwarg
was required. We work around that by passing it a mock object.
"""
default_vcs = None
skip_requirements_regex = None
isolated_mode = False
def memoize(func):
"""Memoize a method that should return the same result every time on a
given instance.
"""
@wraps(func)
def memoizer(self):
if not hasattr(self, '_cache'):
self._cache = {}
if func.__name__ not in self._cache:
self._cache[func.__name__] = func(self)
return self._cache[func.__name__]
return memoizer
def package_finder(argv):
"""Return a PackageFinder respecting command-line options.
:arg argv: Everything after the subcommand
"""
# We instantiate an InstallCommand and then use some of its private
# machinery--its arg parser--for our own purposes, like a virus. This
# approach is portable across many pip versions, where more fine-grained
# ones are not. Ignoring options that don't exist on the parser (for
# instance, --use-wheel) gives us a straightforward method of backward
# compatibility.
try:
command = InstallCommand()
except TypeError:
# This is likely pip 1.3.0's "__init__() takes exactly 2 arguments (1
# given)" error. In that version, InstallCommand takes a top=level
# parser passed in from outside.
from pip.baseparser import create_main_parser
command = InstallCommand(create_main_parser())
# The downside is that it essentially ruins the InstallCommand class for
# further use. Calling out to pip.main() within the same interpreter, for
# example, would result in arguments parsed this time turning up there.
# Thus, we deepcopy the arg parser so we don't trash its singletons. Of
# course, deepcopy doesn't work on these objects, because they contain
# uncopyable regex patterns, so we pickle and unpickle instead. Fun!
options, _ = loads(dumps(command.parser)).parse_args(argv)
# Carry over PackageFinder kwargs that have [about] the same names as
# options attr names:
possible_options = [
'find_links',
FORMAT_CONTROL_ARG,
('allow_all_prereleases', 'pre'),
'process_dependency_links'
]
kwargs = {}
for option in possible_options:
kw, attr = option if isinstance(option, tuple) else (option, option)
value = getattr(options, attr, MARKER)
if value is not MARKER:
kwargs[kw] = value
# Figure out index_urls:
index_urls = [options.index_url] + options.extra_index_urls
if options.no_index:
index_urls = []
index_urls += getattr(options, 'mirrors', [])
# If pip is new enough to have a PipSession, initialize one, since
# PackageFinder requires it:
if hasattr(command, '_build_session'):
kwargs['session'] = command._build_session(options)
return PackageFinder(index_urls=index_urls, **kwargs)
class DownloadedReq(object):
"""A wrapper around InstallRequirement which offers additional information
based on downloading and examining a corresponding package archive
These are conceptually immutable, so we can get away with memoizing
expensive things.
"""
def __init__(self, req, argv, finder):
"""Download a requirement, compare its hashes, and return a subclass
of DownloadedReq depending on its state.
:arg req: The InstallRequirement I am based on
:arg argv: The args, starting after the subcommand
"""
self._req = req
self._argv = argv
self._finder = finder
# We use a separate temp dir for each requirement so requirements
# (from different indices) that happen to have the same archive names
# don't overwrite each other, leading to a security hole in which the
# latter is a hash mismatch, the former has already passed the
# comparison, and the latter gets installed.
self._temp_path = mkdtemp(prefix='peep-')
# Think of DownloadedReq as a one-shot state machine. It's an abstract
# class that ratchets forward to being one of its own subclasses,
# depending on its package status. Then it doesn't move again.
self.__class__ = self._class()
def dispose(self):
"""Delete temp files and dirs I've made. Render myself useless.
Do not call further methods on me after calling dispose().
"""
rmtree(self._temp_path)
def _version(self):
"""Deduce the version number of the downloaded package from its filename."""
# TODO: Can we delete this method and just print the line from the
# reqs file verbatim instead?
def version_of_archive(filename, package_name):
# Since we know the project_name, we can strip that off the left, strip
# any archive extensions off the right, and take the rest as the
# version.
for ext in ARCHIVE_EXTENSIONS:
if filename.endswith(ext):
filename = filename[:-len(ext)]
break
# Handle github sha tarball downloads.
if is_git_sha(filename):
filename = package_name + '-' + filename
if not filename.lower().replace('_', '-').startswith(package_name.lower()):
# TODO: Should we replace runs of [^a-zA-Z0-9.], not just _, with -?
give_up(filename, package_name)
return filename[len(package_name) + 1:] # Strip off '-' before version.
def version_of_wheel(filename, package_name):
# For Wheel files (http://legacy.python.org/dev/peps/pep-0427/#file-
# name-convention) we know the format bits are '-' separated.
whl_package_name, version, _rest = filename.split('-', 2)
# Do the alteration to package_name from PEP 427:
our_package_name = re.sub(r'[^\w\d.]+', '_', package_name, re.UNICODE)
if whl_package_name != our_package_name:
give_up(filename, whl_package_name)
return version
def give_up(filename, package_name):
raise RuntimeError("The archive '%s' didn't start with the package name "
"'%s', so I couldn't figure out the version number. "
"My bad; improve me." %
(filename, package_name))
get_version = (version_of_wheel
if self._downloaded_filename().endswith('.whl')
else version_of_archive)
return get_version(self._downloaded_filename(), self._project_name())
def _is_always_unsatisfied(self):
"""Returns whether this requirement is always unsatisfied
This would happen in cases where we can't determine the version
from the filename.
"""
# If this is a github sha tarball, then it is always unsatisfied
# because the url has a commit sha in it and not the version
# number.
url = self._url()
if url:
filename = filename_from_url(url)
if filename.endswith(ARCHIVE_EXTENSIONS):
filename, ext = splitext(filename)
if is_git_sha(filename):
return True
return False
@memoize # Avoid hitting the file[cache] over and over.
def _expected_hashes(self):
"""Return a list of known-good hashes for this package."""
return hashes_above(*path_and_line(self._req))
def _download(self, link):
"""Download a file, and return its name within my temp dir.
This does no verification of HTTPS certs, but our checking hashes
makes that largely unimportant. It would be nice to be able to use the
requests lib, which can verify certs, but it is guaranteed to be
available only in pip >= 1.5.
This also drops support for proxies and basic auth, though those could
be added back in.
"""
# Based on pip 1.4.1's URLOpener but with cert verification removed
def opener(is_https):
if is_https:
opener = build_opener(HTTPSHandler())
# Strip out HTTPHandler to prevent MITM spoof:
for handler in opener.handlers:
if isinstance(handler, HTTPHandler):
opener.handlers.remove(handler)
else:
opener = build_opener()
return opener
# Descended from unpack_http_url() in pip 1.4.1
def best_filename(link, response):
"""Return the most informative possible filename for a download,
ideally with a proper extension.
"""
content_type = response.info().get('content-type', '')
filename = link.filename # fallback
# Have a look at the Content-Disposition header for a better guess:
content_disposition = response.info().get('content-disposition')
if content_disposition:
type, params = cgi.parse_header(content_disposition)
# We use ``or`` here because we don't want to use an "empty" value
# from the filename param:
filename = params.get('filename') or filename
ext = splitext(filename)[1]
if not ext:
ext = mimetypes.guess_extension(content_type)
if ext:
filename += ext
if not ext and link.url != response.geturl():
ext = splitext(response.geturl())[1]
if ext:
filename += ext
return filename
# Descended from _download_url() in pip 1.4.1
def pipe_to_file(response, path, size=0):
"""Pull the data off an HTTP response, shove it in a new file, and
show progress.
:arg response: A file-like object to read from
:arg path: The path of the new file
:arg size: The expected size, in bytes, of the download. 0 for
unknown or to suppress progress indication (as for cached
downloads)
"""
def response_chunks(chunk_size):
while True:
chunk = response.read(chunk_size)
if not chunk:
break
yield chunk
print('Downloading %s%s...' % (
self._req.req,
(' (%sK)' % (size / 1000)) if size > 1000 else ''))
progress_indicator = (DownloadProgressBar(max=size).iter if size
else DownloadProgressSpinner().iter)
with open(path, 'wb') as file:
for chunk in progress_indicator(response_chunks(4096), 4096):
file.write(chunk)
url = link.url.split('#', 1)[0]
try:
response = opener(urlparse(url).scheme != 'http').open(url)
except (HTTPError, IOError) as exc:
raise DownloadError(link, exc)
filename = best_filename(link, response)
try:
size = int(response.headers['content-length'])
except (ValueError, KeyError, TypeError):
size = 0
pipe_to_file(response, join(self._temp_path, filename), size=size)
return filename
# Based on req_set.prepare_files() in pip bb2a8428d4aebc8d313d05d590f386fa3f0bbd0f
@memoize # Avoid re-downloading.
def _downloaded_filename(self):
"""Download the package's archive if necessary, and return its
filename.
--no-deps is implied, as we have reimplemented the bits that would
ordinarily do dependency resolution.
"""
# Peep doesn't support requirements that don't come down as a single
# file, because it can't hash them. Thus, it doesn't support editable
# requirements, because pip itself doesn't support editable
# requirements except for "local projects or a VCS url". Nor does it
# support VCS requirements yet, because we haven't yet come up with a
# portable, deterministic way to hash them. In summary, all we support
# is == requirements and tarballs/zips/etc.
# TODO: Stop on reqs that are editable or aren't ==.
# If the requirement isn't already specified as a URL, get a URL
# from an index:
link = self._link() or self._finder.find_requirement(self._req, upgrade=False)
if link:
lower_scheme = link.scheme.lower() # pip lower()s it for some reason.
if lower_scheme == 'http' or lower_scheme == 'https':
file_path = self._download(link)
return basename(file_path)
elif lower_scheme == 'file':
# The following is inspired by pip's unpack_file_url():
link_path = url_to_path(link.url_without_fragment)
if isdir(link_path):
raise UnsupportedRequirementError(
"%s: %s is a directory. So that it can compute "
"a hash, peep supports only filesystem paths which "
"point to files" %
(self._req, link.url_without_fragment))
else:
copy(link_path, self._temp_path)
return basename(link_path)
else:
raise UnsupportedRequirementError(
"%s: The download link, %s, would not result in a file "
"that can be hashed. Peep supports only == requirements, "
"file:// URLs pointing to files (not folders), and "
"http:// and https:// URLs pointing to tarballs, zips, "
"etc." % (self._req, link.url))
else:
raise UnsupportedRequirementError(
"%s: couldn't determine where to download this requirement from."
% (self._req,))
def install(self):
"""Install the package I represent, without dependencies.
Obey typical pip-install options passed in on the command line.
"""
other_args = list(requirement_args(self._argv, want_other=True))
archive_path = join(self._temp_path, self._downloaded_filename())
# -U so it installs whether pip deems the requirement "satisfied" or
# not. This is necessary for GitHub-sourced zips, which change without
# their version numbers changing.
run_pip(['install'] + other_args + ['--no-deps', '-U', archive_path])
@memoize
def _actual_hash(self):
"""Download the package's archive if necessary, and return its hash."""
return hash_of_file(join(self._temp_path, self._downloaded_filename()))
def _project_name(self):
"""Return the inner Requirement's "unsafe name".
Raise ValueError if there is no name.
"""
name = getattr(self._req.req, 'project_name', '')
if name:
return name
name = getattr(self._req.req, 'name', '')
if name:
return safe_name(name)
raise ValueError('Requirement has no project_name.')
def _name(self):
return self._req.name
def _link(self):
try:
return self._req.link
except AttributeError:
# The link attribute isn't available prior to pip 6.1.0, so fall
# back to the now deprecated 'url' attribute.
return Link(self._req.url) if self._req.url else None
def _url(self):
link = self._link()
return link.url if link else None
@memoize # Avoid re-running expensive check_if_exists().
def _is_satisfied(self):
self._req.check_if_exists()
return (self._req.satisfied_by and
not self._is_always_unsatisfied())
def _class(self):
"""Return the class I should be, spanning a continuum of goodness."""
try:
self._project_name()
except ValueError:
return MalformedReq
if self._is_satisfied():
return SatisfiedReq
if not self._expected_hashes():
return MissingReq
if self._actual_hash() not in self._expected_hashes():
return MismatchedReq
return InstallableReq
@classmethod
def foot(cls):
"""Return the text to be printed once, after all of the errors from
classes of my type are printed.
"""
return ''
class MalformedReq(DownloadedReq):
"""A requirement whose package name could not be determined"""
@classmethod
def head(cls):
return 'The following requirements could not be processed:\n'
def error(self):
return '* Unable to determine package name from URL %s; add #egg=' % self._url()
class MissingReq(DownloadedReq):
"""A requirement for which no hashes were specified in the requirements file"""
@classmethod
def head(cls):
return ('The following packages had no hashes specified in the requirements file, which\n'
'leaves them open to tampering. Vet these packages to your satisfaction, then\n'
'add these "sha256" lines like so:\n\n')
def error(self):
if self._url():
# _url() always contains an #egg= part, or this would be a
# MalformedRequest.
line = self._url()
else:
line = '%s==%s' % (self._name(), self._version())
return '# sha256: %s\n%s\n' % (self._actual_hash(), line)
class MismatchedReq(DownloadedReq):
"""A requirement for which the downloaded file didn't match any of my hashes."""
@classmethod
def head(cls):
return ("THE FOLLOWING PACKAGES DIDN'T MATCH THE HASHES SPECIFIED IN THE REQUIREMENTS\n"
"FILE. If you have updated the package versions, update the hashes. If not,\n"
"freak out, because someone has tampered with the packages.\n\n")
def error(self):
preamble = ' %s: expected' % self._project_name()
if len(self._expected_hashes()) > 1:
preamble += ' one of'
padding = '\n' + ' ' * (len(preamble) + 1)
return '%s %s\n%s got %s' % (preamble,
padding.join(self._expected_hashes()),
' ' * (len(preamble) - 4),
self._actual_hash())
@classmethod
def foot(cls):
return '\n'
class SatisfiedReq(DownloadedReq):
"""A requirement which turned out to be already installed"""
@classmethod
def head(cls):
return ("These packages were already installed, so we didn't need to download or build\n"
"them again. If you installed them with peep in the first place, you should be\n"
"safe. If not, uninstall them, then re-attempt your install with peep.\n")
def error(self):
return ' %s' % (self._req,)
class InstallableReq(DownloadedReq):
"""A requirement whose hash matched and can be safely installed"""
# DownloadedReq subclasses that indicate an error that should keep us from
# going forward with installation, in the order in which their errors should
# be reported:
ERROR_CLASSES = [MismatchedReq, MissingReq, MalformedReq]
def first_every_last(iterable, first, every, last):
"""Execute something before the first item of iter, something else for each
item, and a third thing after the last.
If there are no items in the iterable, don't execute anything.
"""
did_first = False
for item in iterable:
if not did_first:
did_first = True
first(item)
every(item)
if did_first:
last(item)
def _parse_requirements(path, finder):
try:
# list() so the generator that is parse_requirements() actually runs
# far enough to report a TypeError
return list(parse_requirements(
path, options=EmptyOptions(), finder=finder))
except TypeError:
# session is a required kwarg as of pip 6.0 and will raise
# a TypeError if missing. It needs to be a PipSession instance,
# but in older versions we can't import it from pip.download
# (nor do we need it at all) so we only import it in this except block
from pip.download import PipSession
return list(parse_requirements(
path, options=EmptyOptions(), session=PipSession(), finder=finder))
def downloaded_reqs_from_path(path, argv):
"""Return a list of DownloadedReqs representing the requirements parsed
out of a given requirements file.
:arg path: The path to the requirements file
:arg argv: The commandline args, starting after the subcommand
"""
finder = package_finder(argv)
return [DownloadedReq(req, argv, finder) for req in
_parse_requirements(path, finder)]
def peep_install(argv):
"""Perform the ``peep install`` subcommand, returning a shell status code
or raising a PipException.
:arg argv: The commandline args, starting after the subcommand
"""
output = []
out = output.append
reqs = []
try:
req_paths = list(requirement_args(argv, want_paths=True))
if not req_paths:
out("You have to specify one or more requirements files with the -r option, because\n"
"otherwise there's nowhere for peep to look up the hashes.\n")
return COMMAND_LINE_ERROR
# We're a "peep install" command, and we have some requirement paths.
reqs = list(chain.from_iterable(
downloaded_reqs_from_path(path, argv)
for path in req_paths))
buckets = bucket(reqs, lambda r: r.__class__)
# Skip a line after pip's "Cleaning up..." so the important stuff
# stands out:
if any(buckets[b] for b in ERROR_CLASSES):
out('\n')
printers = (lambda r: out(r.head()),
lambda r: out(r.error() + '\n'),
lambda r: out(r.foot()))
for c in ERROR_CLASSES:
first_every_last(buckets[c], *printers)
if any(buckets[b] for b in ERROR_CLASSES):
out('-------------------------------\n'
'Not proceeding to installation.\n')
return SOMETHING_WENT_WRONG
else:
for req in buckets[InstallableReq]:
req.install()
first_every_last(buckets[SatisfiedReq], *printers)
return ITS_FINE_ITS_FINE
except (UnsupportedRequirementError, InstallationError, DownloadError) as exc:
out(str(exc))
return SOMETHING_WENT_WRONG
finally:
for req in reqs:
req.dispose()
print(''.join(output))
def peep_port(paths):
"""Convert a peep requirements file to one compatble with pip-8 hashing.
Loses comments and tromps on URLs, so the result will need a little manual
massaging, but the hard part--the hash conversion--is done for you.
"""
if not paths:
print('Please specify one or more requirements files so I have '
'something to port.\n')
return COMMAND_LINE_ERROR
comes_from = None
for req in chain.from_iterable(
_parse_requirements(path, package_finder(argv)) for path in paths):
req_path, req_line = path_and_line(req)
hashes = [hexlify(urlsafe_b64decode((hash + '=').encode('ascii'))).decode('ascii')
for hash in hashes_above(req_path, req_line)]
if req_path != comes_from:
print()
print('# from %s' % req_path)
print()
comes_from = req_path
if not hashes:
print(req.req)
else:
print('%s' % (req.link if getattr(req, 'link', None) else req.req), end='')
for hash in hashes:
print(' \\')
print(' --hash=sha256:%s' % hash, end='')
print()
def main():
"""Be the top-level entrypoint. Return a shell status code."""
commands = {'hash': peep_hash,
'install': peep_install,
'port': peep_port}
try:
if len(argv) >= 2 and argv[1] in commands:
return commands[argv[1]](argv[2:])
else:
# Fall through to top-level pip main() for everything else:
return pip.main()
except PipException as exc:
return exc.error_code
def exception_handler(exc_type, exc_value, exc_tb):
print('Oh no! Peep had a problem while trying to do stuff. Please write up a bug report')
print('with the specifics so we can fix it:')
print()
print('https://github.com/erikrose/peep/issues/new')
print()
print('Here are some particulars you can copy and paste into the bug report:')
print()
print('---')
print('peep:', repr(__version__))
print('python:', repr(sys.version))
print('pip:', repr(getattr(pip, '__version__', 'no __version__ attr')))
print('Command line: ', repr(sys.argv))
print(
''.join(traceback.format_exception(exc_type, exc_value, exc_tb)))
print('---')
if __name__ == '__main__':
try:
exit(main())
except Exception:
exception_handler(*sys.exc_info())
exit(UNHANDLED_EXCEPTION)
|
erikrose/peep | peep.py | first_every_last | python | def first_every_last(iterable, first, every, last):
did_first = False
for item in iterable:
if not did_first:
did_first = True
first(item)
every(item)
if did_first:
last(item) | Execute something before the first item of iter, something else for each
item, and a third thing after the last.
If there are no items in the iterable, don't execute anything. | train | https://github.com/erikrose/peep/blob/c16f08c7f61e2f2afecb7cd1c93752bdd96c4968/peep.py#L801-L815 | [
"printers = (lambda r: out(r.head()),\n",
"lambda r: out(r.error() + '\\n'),\n",
"lambda r: out(r.foot()))\n"
] | #!/usr/bin/env python
"""peep ("prudently examine every package") verifies that packages conform to a
trusted, locally stored hash and only then installs them::
peep install -r requirements.txt
This makes your deployments verifiably repeatable without having to maintain a
local PyPI mirror or use a vendor lib. Just update the version numbers and
hashes in requirements.txt, and you're all set.
"""
# This is here so embedded copies of peep.py are MIT-compliant:
# Copyright (c) 2013 Erik Rose
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
from __future__ import print_function
try:
xrange = xrange
except NameError:
xrange = range
from base64 import urlsafe_b64encode, urlsafe_b64decode
from binascii import hexlify
import cgi
from collections import defaultdict
from functools import wraps
from hashlib import sha256
from itertools import chain, islice
import mimetypes
from optparse import OptionParser
from os.path import join, basename, splitext, isdir
from pickle import dumps, loads
import re
import sys
from shutil import rmtree, copy
from sys import argv, exit
from tempfile import mkdtemp
import traceback
try:
from urllib2 import build_opener, HTTPHandler, HTTPSHandler, HTTPError
except ImportError:
from urllib.request import build_opener, HTTPHandler, HTTPSHandler
from urllib.error import HTTPError
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse # 3.4
# TODO: Probably use six to make urllib stuff work across 2/3.
from pkg_resources import require, VersionConflict, DistributionNotFound, safe_name
# We don't admit our dependency on pip in setup.py, lest a naive user simply
# say `pip install peep.tar.gz` and thus pull down an untrusted copy of pip
# from PyPI. Instead, we make sure it's installed and new enough here and spit
# out an error message if not:
def activate(specifier):
"""Make a compatible version of pip importable. Raise a RuntimeError if we
couldn't."""
try:
for distro in require(specifier):
distro.activate()
except (VersionConflict, DistributionNotFound):
raise RuntimeError('The installed version of pip is too old; peep '
'requires ' + specifier)
# Before 0.6.2, the log module wasn't there, so some
# of our monkeypatching fails. It probably wouldn't be
# much work to support even earlier, though.
activate('pip>=0.6.2')
import pip
from pip.commands.install import InstallCommand
try:
from pip.download import url_to_path # 1.5.6
except ImportError:
try:
from pip.util import url_to_path # 0.7.0
except ImportError:
from pip.util import url_to_filename as url_to_path # 0.6.2
from pip.exceptions import InstallationError
from pip.index import PackageFinder, Link
try:
from pip.log import logger
except ImportError:
from pip import logger # 6.0
from pip.req import parse_requirements
try:
from pip.utils.ui import DownloadProgressBar, DownloadProgressSpinner
except ImportError:
class NullProgressBar(object):
def __init__(self, *args, **kwargs):
pass
def iter(self, ret, *args, **kwargs):
return ret
DownloadProgressBar = DownloadProgressSpinner = NullProgressBar
__version__ = 3, 1, 2
try:
from pip.index import FormatControl # noqa
FORMAT_CONTROL_ARG = 'format_control'
# The line-numbering bug will be fixed in pip 8. All 7.x releases had it.
PIP_MAJOR_VERSION = int(pip.__version__.split('.')[0])
PIP_COUNTS_COMMENTS = PIP_MAJOR_VERSION >= 8
except ImportError:
FORMAT_CONTROL_ARG = 'use_wheel' # pre-7
PIP_COUNTS_COMMENTS = True
ITS_FINE_ITS_FINE = 0
SOMETHING_WENT_WRONG = 1
# "Traditional" for command-line errors according to optparse docs:
COMMAND_LINE_ERROR = 2
UNHANDLED_EXCEPTION = 3
ARCHIVE_EXTENSIONS = ('.tar.bz2', '.tar.gz', '.tgz', '.tar', '.zip')
MARKER = object()
class PipException(Exception):
"""When I delegated to pip, it exited with an error."""
def __init__(self, error_code):
self.error_code = error_code
class UnsupportedRequirementError(Exception):
"""An unsupported line was encountered in a requirements file."""
class DownloadError(Exception):
def __init__(self, link, exc):
self.link = link
self.reason = str(exc)
def __str__(self):
return 'Downloading %s failed: %s' % (self.link, self.reason)
def encoded_hash(sha):
"""Return a short, 7-bit-safe representation of a hash.
If you pass a sha256, this results in the hash algorithm that the Wheel
format (PEP 427) uses, except here it's intended to be run across the
downloaded archive before unpacking.
"""
return urlsafe_b64encode(sha.digest()).decode('ascii').rstrip('=')
def path_and_line(req):
"""Return the path and line number of the file from which an
InstallRequirement came.
"""
path, line = (re.match(r'-r (.*) \(line (\d+)\)$',
req.comes_from).groups())
return path, int(line)
def hashes_above(path, line_number):
"""Yield hashes from contiguous comment lines before line ``line_number``.
"""
def hash_lists(path):
"""Yield lists of hashes appearing between non-comment lines.
The lists will be in order of appearance and, for each non-empty
list, their place in the results will coincide with that of the
line number of the corresponding result from `parse_requirements`
(which changed in pip 7.0 to not count comments).
"""
hashes = []
with open(path) as file:
for lineno, line in enumerate(file, 1):
match = HASH_COMMENT_RE.match(line)
if match: # Accumulate this hash.
hashes.append(match.groupdict()['hash'])
if not IGNORED_LINE_RE.match(line):
yield hashes # Report hashes seen so far.
hashes = []
elif PIP_COUNTS_COMMENTS:
# Comment: count as normal req but have no hashes.
yield []
return next(islice(hash_lists(path), line_number - 1, None))
def run_pip(initial_args):
"""Delegate to pip the given args (starting with the subcommand), and raise
``PipException`` if something goes wrong."""
status_code = pip.main(initial_args)
# Clear out the registrations in the pip "logger" singleton. Otherwise,
# loggers keep getting appended to it with every run. Pip assumes only one
# command invocation will happen per interpreter lifetime.
logger.consumers = []
if status_code:
raise PipException(status_code)
def hash_of_file(path):
"""Return the hash of a downloaded file."""
with open(path, 'rb') as archive:
sha = sha256()
while True:
data = archive.read(2 ** 20)
if not data:
break
sha.update(data)
return encoded_hash(sha)
def is_git_sha(text):
"""Return whether this is probably a git sha"""
# Handle both the full sha as well as the 7-character abbreviation
if len(text) in (40, 7):
try:
int(text, 16)
return True
except ValueError:
pass
return False
def filename_from_url(url):
parsed = urlparse(url)
path = parsed.path
return path.split('/')[-1]
def requirement_args(argv, want_paths=False, want_other=False):
"""Return an iterable of filtered arguments.
:arg argv: Arguments, starting after the subcommand
:arg want_paths: If True, the returned iterable includes the paths to any
requirements files following a ``-r`` or ``--requirement`` option.
:arg want_other: If True, the returned iterable includes the args that are
not a requirement-file path or a ``-r`` or ``--requirement`` flag.
"""
was_r = False
for arg in argv:
# Allow for requirements files named "-r", don't freak out if there's a
# trailing "-r", etc.
if was_r:
if want_paths:
yield arg
was_r = False
elif arg in ['-r', '--requirement']:
was_r = True
else:
if want_other:
yield arg
# any line that is a comment or just whitespace
IGNORED_LINE_RE = re.compile(r'^(\s*#.*)?\s*$')
HASH_COMMENT_RE = re.compile(
r"""
\s*\#\s+ # Lines that start with a '#'
(?P<hash_type>sha256):\s+ # Hash type is hardcoded to be sha256 for now.
(?P<hash>[^\s]+) # Hashes can be anything except '#' or spaces.
\s* # Suck up whitespace before the comment or
# just trailing whitespace if there is no
# comment. Also strip trailing newlines.
(?:\#(?P<comment>.*))? # Comments can be anything after a whitespace+#
# and are optional.
$""", re.X)
def peep_hash(argv):
"""Return the peep hash of one or more files, returning a shell status code
or raising a PipException.
:arg argv: The commandline args, starting after the subcommand
"""
parser = OptionParser(
usage='usage: %prog hash file [file ...]',
description='Print a peep hash line for one or more files: for '
'example, "# sha256: '
'oz42dZy6Gowxw8AelDtO4gRgTW_xPdooH484k7I5EOY".')
_, paths = parser.parse_args(args=argv)
if paths:
for path in paths:
print('# sha256:', hash_of_file(path))
return ITS_FINE_ITS_FINE
else:
parser.print_usage()
return COMMAND_LINE_ERROR
class EmptyOptions(object):
"""Fake optparse options for compatibility with pip<1.2
pip<1.2 had a bug in parse_requirements() in which the ``options`` kwarg
was required. We work around that by passing it a mock object.
"""
default_vcs = None
skip_requirements_regex = None
isolated_mode = False
def memoize(func):
"""Memoize a method that should return the same result every time on a
given instance.
"""
@wraps(func)
def memoizer(self):
if not hasattr(self, '_cache'):
self._cache = {}
if func.__name__ not in self._cache:
self._cache[func.__name__] = func(self)
return self._cache[func.__name__]
return memoizer
def package_finder(argv):
"""Return a PackageFinder respecting command-line options.
:arg argv: Everything after the subcommand
"""
# We instantiate an InstallCommand and then use some of its private
# machinery--its arg parser--for our own purposes, like a virus. This
# approach is portable across many pip versions, where more fine-grained
# ones are not. Ignoring options that don't exist on the parser (for
# instance, --use-wheel) gives us a straightforward method of backward
# compatibility.
try:
command = InstallCommand()
except TypeError:
# This is likely pip 1.3.0's "__init__() takes exactly 2 arguments (1
# given)" error. In that version, InstallCommand takes a top=level
# parser passed in from outside.
from pip.baseparser import create_main_parser
command = InstallCommand(create_main_parser())
# The downside is that it essentially ruins the InstallCommand class for
# further use. Calling out to pip.main() within the same interpreter, for
# example, would result in arguments parsed this time turning up there.
# Thus, we deepcopy the arg parser so we don't trash its singletons. Of
# course, deepcopy doesn't work on these objects, because they contain
# uncopyable regex patterns, so we pickle and unpickle instead. Fun!
options, _ = loads(dumps(command.parser)).parse_args(argv)
# Carry over PackageFinder kwargs that have [about] the same names as
# options attr names:
possible_options = [
'find_links',
FORMAT_CONTROL_ARG,
('allow_all_prereleases', 'pre'),
'process_dependency_links'
]
kwargs = {}
for option in possible_options:
kw, attr = option if isinstance(option, tuple) else (option, option)
value = getattr(options, attr, MARKER)
if value is not MARKER:
kwargs[kw] = value
# Figure out index_urls:
index_urls = [options.index_url] + options.extra_index_urls
if options.no_index:
index_urls = []
index_urls += getattr(options, 'mirrors', [])
# If pip is new enough to have a PipSession, initialize one, since
# PackageFinder requires it:
if hasattr(command, '_build_session'):
kwargs['session'] = command._build_session(options)
return PackageFinder(index_urls=index_urls, **kwargs)
class DownloadedReq(object):
"""A wrapper around InstallRequirement which offers additional information
based on downloading and examining a corresponding package archive
These are conceptually immutable, so we can get away with memoizing
expensive things.
"""
def __init__(self, req, argv, finder):
"""Download a requirement, compare its hashes, and return a subclass
of DownloadedReq depending on its state.
:arg req: The InstallRequirement I am based on
:arg argv: The args, starting after the subcommand
"""
self._req = req
self._argv = argv
self._finder = finder
# We use a separate temp dir for each requirement so requirements
# (from different indices) that happen to have the same archive names
# don't overwrite each other, leading to a security hole in which the
# latter is a hash mismatch, the former has already passed the
# comparison, and the latter gets installed.
self._temp_path = mkdtemp(prefix='peep-')
# Think of DownloadedReq as a one-shot state machine. It's an abstract
# class that ratchets forward to being one of its own subclasses,
# depending on its package status. Then it doesn't move again.
self.__class__ = self._class()
def dispose(self):
"""Delete temp files and dirs I've made. Render myself useless.
Do not call further methods on me after calling dispose().
"""
rmtree(self._temp_path)
def _version(self):
"""Deduce the version number of the downloaded package from its filename."""
# TODO: Can we delete this method and just print the line from the
# reqs file verbatim instead?
def version_of_archive(filename, package_name):
# Since we know the project_name, we can strip that off the left, strip
# any archive extensions off the right, and take the rest as the
# version.
for ext in ARCHIVE_EXTENSIONS:
if filename.endswith(ext):
filename = filename[:-len(ext)]
break
# Handle github sha tarball downloads.
if is_git_sha(filename):
filename = package_name + '-' + filename
if not filename.lower().replace('_', '-').startswith(package_name.lower()):
# TODO: Should we replace runs of [^a-zA-Z0-9.], not just _, with -?
give_up(filename, package_name)
return filename[len(package_name) + 1:] # Strip off '-' before version.
def version_of_wheel(filename, package_name):
# For Wheel files (http://legacy.python.org/dev/peps/pep-0427/#file-
# name-convention) we know the format bits are '-' separated.
whl_package_name, version, _rest = filename.split('-', 2)
# Do the alteration to package_name from PEP 427:
our_package_name = re.sub(r'[^\w\d.]+', '_', package_name, re.UNICODE)
if whl_package_name != our_package_name:
give_up(filename, whl_package_name)
return version
def give_up(filename, package_name):
raise RuntimeError("The archive '%s' didn't start with the package name "
"'%s', so I couldn't figure out the version number. "
"My bad; improve me." %
(filename, package_name))
get_version = (version_of_wheel
if self._downloaded_filename().endswith('.whl')
else version_of_archive)
return get_version(self._downloaded_filename(), self._project_name())
def _is_always_unsatisfied(self):
"""Returns whether this requirement is always unsatisfied
This would happen in cases where we can't determine the version
from the filename.
"""
# If this is a github sha tarball, then it is always unsatisfied
# because the url has a commit sha in it and not the version
# number.
url = self._url()
if url:
filename = filename_from_url(url)
if filename.endswith(ARCHIVE_EXTENSIONS):
filename, ext = splitext(filename)
if is_git_sha(filename):
return True
return False
@memoize # Avoid hitting the file[cache] over and over.
def _expected_hashes(self):
"""Return a list of known-good hashes for this package."""
return hashes_above(*path_and_line(self._req))
def _download(self, link):
"""Download a file, and return its name within my temp dir.
This does no verification of HTTPS certs, but our checking hashes
makes that largely unimportant. It would be nice to be able to use the
requests lib, which can verify certs, but it is guaranteed to be
available only in pip >= 1.5.
This also drops support for proxies and basic auth, though those could
be added back in.
"""
# Based on pip 1.4.1's URLOpener but with cert verification removed
def opener(is_https):
if is_https:
opener = build_opener(HTTPSHandler())
# Strip out HTTPHandler to prevent MITM spoof:
for handler in opener.handlers:
if isinstance(handler, HTTPHandler):
opener.handlers.remove(handler)
else:
opener = build_opener()
return opener
# Descended from unpack_http_url() in pip 1.4.1
def best_filename(link, response):
"""Return the most informative possible filename for a download,
ideally with a proper extension.
"""
content_type = response.info().get('content-type', '')
filename = link.filename # fallback
# Have a look at the Content-Disposition header for a better guess:
content_disposition = response.info().get('content-disposition')
if content_disposition:
type, params = cgi.parse_header(content_disposition)
# We use ``or`` here because we don't want to use an "empty" value
# from the filename param:
filename = params.get('filename') or filename
ext = splitext(filename)[1]
if not ext:
ext = mimetypes.guess_extension(content_type)
if ext:
filename += ext
if not ext and link.url != response.geturl():
ext = splitext(response.geturl())[1]
if ext:
filename += ext
return filename
# Descended from _download_url() in pip 1.4.1
def pipe_to_file(response, path, size=0):
"""Pull the data off an HTTP response, shove it in a new file, and
show progress.
:arg response: A file-like object to read from
:arg path: The path of the new file
:arg size: The expected size, in bytes, of the download. 0 for
unknown or to suppress progress indication (as for cached
downloads)
"""
def response_chunks(chunk_size):
while True:
chunk = response.read(chunk_size)
if not chunk:
break
yield chunk
print('Downloading %s%s...' % (
self._req.req,
(' (%sK)' % (size / 1000)) if size > 1000 else ''))
progress_indicator = (DownloadProgressBar(max=size).iter if size
else DownloadProgressSpinner().iter)
with open(path, 'wb') as file:
for chunk in progress_indicator(response_chunks(4096), 4096):
file.write(chunk)
url = link.url.split('#', 1)[0]
try:
response = opener(urlparse(url).scheme != 'http').open(url)
except (HTTPError, IOError) as exc:
raise DownloadError(link, exc)
filename = best_filename(link, response)
try:
size = int(response.headers['content-length'])
except (ValueError, KeyError, TypeError):
size = 0
pipe_to_file(response, join(self._temp_path, filename), size=size)
return filename
# Based on req_set.prepare_files() in pip bb2a8428d4aebc8d313d05d590f386fa3f0bbd0f
@memoize # Avoid re-downloading.
def _downloaded_filename(self):
"""Download the package's archive if necessary, and return its
filename.
--no-deps is implied, as we have reimplemented the bits that would
ordinarily do dependency resolution.
"""
# Peep doesn't support requirements that don't come down as a single
# file, because it can't hash them. Thus, it doesn't support editable
# requirements, because pip itself doesn't support editable
# requirements except for "local projects or a VCS url". Nor does it
# support VCS requirements yet, because we haven't yet come up with a
# portable, deterministic way to hash them. In summary, all we support
# is == requirements and tarballs/zips/etc.
# TODO: Stop on reqs that are editable or aren't ==.
# If the requirement isn't already specified as a URL, get a URL
# from an index:
link = self._link() or self._finder.find_requirement(self._req, upgrade=False)
if link:
lower_scheme = link.scheme.lower() # pip lower()s it for some reason.
if lower_scheme == 'http' or lower_scheme == 'https':
file_path = self._download(link)
return basename(file_path)
elif lower_scheme == 'file':
# The following is inspired by pip's unpack_file_url():
link_path = url_to_path(link.url_without_fragment)
if isdir(link_path):
raise UnsupportedRequirementError(
"%s: %s is a directory. So that it can compute "
"a hash, peep supports only filesystem paths which "
"point to files" %
(self._req, link.url_without_fragment))
else:
copy(link_path, self._temp_path)
return basename(link_path)
else:
raise UnsupportedRequirementError(
"%s: The download link, %s, would not result in a file "
"that can be hashed. Peep supports only == requirements, "
"file:// URLs pointing to files (not folders), and "
"http:// and https:// URLs pointing to tarballs, zips, "
"etc." % (self._req, link.url))
else:
raise UnsupportedRequirementError(
"%s: couldn't determine where to download this requirement from."
% (self._req,))
def install(self):
"""Install the package I represent, without dependencies.
Obey typical pip-install options passed in on the command line.
"""
other_args = list(requirement_args(self._argv, want_other=True))
archive_path = join(self._temp_path, self._downloaded_filename())
# -U so it installs whether pip deems the requirement "satisfied" or
# not. This is necessary for GitHub-sourced zips, which change without
# their version numbers changing.
run_pip(['install'] + other_args + ['--no-deps', '-U', archive_path])
@memoize
def _actual_hash(self):
"""Download the package's archive if necessary, and return its hash."""
return hash_of_file(join(self._temp_path, self._downloaded_filename()))
def _project_name(self):
"""Return the inner Requirement's "unsafe name".
Raise ValueError if there is no name.
"""
name = getattr(self._req.req, 'project_name', '')
if name:
return name
name = getattr(self._req.req, 'name', '')
if name:
return safe_name(name)
raise ValueError('Requirement has no project_name.')
def _name(self):
return self._req.name
def _link(self):
try:
return self._req.link
except AttributeError:
# The link attribute isn't available prior to pip 6.1.0, so fall
# back to the now deprecated 'url' attribute.
return Link(self._req.url) if self._req.url else None
def _url(self):
link = self._link()
return link.url if link else None
@memoize # Avoid re-running expensive check_if_exists().
def _is_satisfied(self):
self._req.check_if_exists()
return (self._req.satisfied_by and
not self._is_always_unsatisfied())
def _class(self):
"""Return the class I should be, spanning a continuum of goodness."""
try:
self._project_name()
except ValueError:
return MalformedReq
if self._is_satisfied():
return SatisfiedReq
if not self._expected_hashes():
return MissingReq
if self._actual_hash() not in self._expected_hashes():
return MismatchedReq
return InstallableReq
@classmethod
def foot(cls):
"""Return the text to be printed once, after all of the errors from
classes of my type are printed.
"""
return ''
class MalformedReq(DownloadedReq):
"""A requirement whose package name could not be determined"""
@classmethod
def head(cls):
return 'The following requirements could not be processed:\n'
def error(self):
return '* Unable to determine package name from URL %s; add #egg=' % self._url()
class MissingReq(DownloadedReq):
"""A requirement for which no hashes were specified in the requirements file"""
@classmethod
def head(cls):
return ('The following packages had no hashes specified in the requirements file, which\n'
'leaves them open to tampering. Vet these packages to your satisfaction, then\n'
'add these "sha256" lines like so:\n\n')
def error(self):
if self._url():
# _url() always contains an #egg= part, or this would be a
# MalformedRequest.
line = self._url()
else:
line = '%s==%s' % (self._name(), self._version())
return '# sha256: %s\n%s\n' % (self._actual_hash(), line)
class MismatchedReq(DownloadedReq):
"""A requirement for which the downloaded file didn't match any of my hashes."""
@classmethod
def head(cls):
return ("THE FOLLOWING PACKAGES DIDN'T MATCH THE HASHES SPECIFIED IN THE REQUIREMENTS\n"
"FILE. If you have updated the package versions, update the hashes. If not,\n"
"freak out, because someone has tampered with the packages.\n\n")
def error(self):
preamble = ' %s: expected' % self._project_name()
if len(self._expected_hashes()) > 1:
preamble += ' one of'
padding = '\n' + ' ' * (len(preamble) + 1)
return '%s %s\n%s got %s' % (preamble,
padding.join(self._expected_hashes()),
' ' * (len(preamble) - 4),
self._actual_hash())
@classmethod
def foot(cls):
return '\n'
class SatisfiedReq(DownloadedReq):
"""A requirement which turned out to be already installed"""
@classmethod
def head(cls):
return ("These packages were already installed, so we didn't need to download or build\n"
"them again. If you installed them with peep in the first place, you should be\n"
"safe. If not, uninstall them, then re-attempt your install with peep.\n")
def error(self):
return ' %s' % (self._req,)
class InstallableReq(DownloadedReq):
"""A requirement whose hash matched and can be safely installed"""
# DownloadedReq subclasses that indicate an error that should keep us from
# going forward with installation, in the order in which their errors should
# be reported:
ERROR_CLASSES = [MismatchedReq, MissingReq, MalformedReq]
def bucket(things, key):
"""Return a map of key -> list of things."""
ret = defaultdict(list)
for thing in things:
ret[key(thing)].append(thing)
return ret
def _parse_requirements(path, finder):
try:
# list() so the generator that is parse_requirements() actually runs
# far enough to report a TypeError
return list(parse_requirements(
path, options=EmptyOptions(), finder=finder))
except TypeError:
# session is a required kwarg as of pip 6.0 and will raise
# a TypeError if missing. It needs to be a PipSession instance,
# but in older versions we can't import it from pip.download
# (nor do we need it at all) so we only import it in this except block
from pip.download import PipSession
return list(parse_requirements(
path, options=EmptyOptions(), session=PipSession(), finder=finder))
def downloaded_reqs_from_path(path, argv):
"""Return a list of DownloadedReqs representing the requirements parsed
out of a given requirements file.
:arg path: The path to the requirements file
:arg argv: The commandline args, starting after the subcommand
"""
finder = package_finder(argv)
return [DownloadedReq(req, argv, finder) for req in
_parse_requirements(path, finder)]
def peep_install(argv):
"""Perform the ``peep install`` subcommand, returning a shell status code
or raising a PipException.
:arg argv: The commandline args, starting after the subcommand
"""
output = []
out = output.append
reqs = []
try:
req_paths = list(requirement_args(argv, want_paths=True))
if not req_paths:
out("You have to specify one or more requirements files with the -r option, because\n"
"otherwise there's nowhere for peep to look up the hashes.\n")
return COMMAND_LINE_ERROR
# We're a "peep install" command, and we have some requirement paths.
reqs = list(chain.from_iterable(
downloaded_reqs_from_path(path, argv)
for path in req_paths))
buckets = bucket(reqs, lambda r: r.__class__)
# Skip a line after pip's "Cleaning up..." so the important stuff
# stands out:
if any(buckets[b] for b in ERROR_CLASSES):
out('\n')
printers = (lambda r: out(r.head()),
lambda r: out(r.error() + '\n'),
lambda r: out(r.foot()))
for c in ERROR_CLASSES:
first_every_last(buckets[c], *printers)
if any(buckets[b] for b in ERROR_CLASSES):
out('-------------------------------\n'
'Not proceeding to installation.\n')
return SOMETHING_WENT_WRONG
else:
for req in buckets[InstallableReq]:
req.install()
first_every_last(buckets[SatisfiedReq], *printers)
return ITS_FINE_ITS_FINE
except (UnsupportedRequirementError, InstallationError, DownloadError) as exc:
out(str(exc))
return SOMETHING_WENT_WRONG
finally:
for req in reqs:
req.dispose()
print(''.join(output))
def peep_port(paths):
"""Convert a peep requirements file to one compatble with pip-8 hashing.
Loses comments and tromps on URLs, so the result will need a little manual
massaging, but the hard part--the hash conversion--is done for you.
"""
if not paths:
print('Please specify one or more requirements files so I have '
'something to port.\n')
return COMMAND_LINE_ERROR
comes_from = None
for req in chain.from_iterable(
_parse_requirements(path, package_finder(argv)) for path in paths):
req_path, req_line = path_and_line(req)
hashes = [hexlify(urlsafe_b64decode((hash + '=').encode('ascii'))).decode('ascii')
for hash in hashes_above(req_path, req_line)]
if req_path != comes_from:
print()
print('# from %s' % req_path)
print()
comes_from = req_path
if not hashes:
print(req.req)
else:
print('%s' % (req.link if getattr(req, 'link', None) else req.req), end='')
for hash in hashes:
print(' \\')
print(' --hash=sha256:%s' % hash, end='')
print()
def main():
"""Be the top-level entrypoint. Return a shell status code."""
commands = {'hash': peep_hash,
'install': peep_install,
'port': peep_port}
try:
if len(argv) >= 2 and argv[1] in commands:
return commands[argv[1]](argv[2:])
else:
# Fall through to top-level pip main() for everything else:
return pip.main()
except PipException as exc:
return exc.error_code
def exception_handler(exc_type, exc_value, exc_tb):
print('Oh no! Peep had a problem while trying to do stuff. Please write up a bug report')
print('with the specifics so we can fix it:')
print()
print('https://github.com/erikrose/peep/issues/new')
print()
print('Here are some particulars you can copy and paste into the bug report:')
print()
print('---')
print('peep:', repr(__version__))
print('python:', repr(sys.version))
print('pip:', repr(getattr(pip, '__version__', 'no __version__ attr')))
print('Command line: ', repr(sys.argv))
print(
''.join(traceback.format_exception(exc_type, exc_value, exc_tb)))
print('---')
if __name__ == '__main__':
try:
exit(main())
except Exception:
exception_handler(*sys.exc_info())
exit(UNHANDLED_EXCEPTION)
|
erikrose/peep | peep.py | downloaded_reqs_from_path | python | def downloaded_reqs_from_path(path, argv):
finder = package_finder(argv)
return [DownloadedReq(req, argv, finder) for req in
_parse_requirements(path, finder)] | Return a list of DownloadedReqs representing the requirements parsed
out of a given requirements file.
:arg path: The path to the requirements file
:arg argv: The commandline args, starting after the subcommand | train | https://github.com/erikrose/peep/blob/c16f08c7f61e2f2afecb7cd1c93752bdd96c4968/peep.py#L834-L844 | [
"def package_finder(argv):\n \"\"\"Return a PackageFinder respecting command-line options.\n\n :arg argv: Everything after the subcommand\n\n \"\"\"\n # We instantiate an InstallCommand and then use some of its private\n # machinery--its arg parser--for our own purposes, like a virus. This\n # approach is portable across many pip versions, where more fine-grained\n # ones are not. Ignoring options that don't exist on the parser (for\n # instance, --use-wheel) gives us a straightforward method of backward\n # compatibility.\n try:\n command = InstallCommand()\n except TypeError:\n # This is likely pip 1.3.0's \"__init__() takes exactly 2 arguments (1\n # given)\" error. In that version, InstallCommand takes a top=level\n # parser passed in from outside.\n from pip.baseparser import create_main_parser\n command = InstallCommand(create_main_parser())\n # The downside is that it essentially ruins the InstallCommand class for\n # further use. Calling out to pip.main() within the same interpreter, for\n # example, would result in arguments parsed this time turning up there.\n # Thus, we deepcopy the arg parser so we don't trash its singletons. Of\n # course, deepcopy doesn't work on these objects, because they contain\n # uncopyable regex patterns, so we pickle and unpickle instead. Fun!\n options, _ = loads(dumps(command.parser)).parse_args(argv)\n\n # Carry over PackageFinder kwargs that have [about] the same names as\n # options attr names:\n possible_options = [\n 'find_links',\n FORMAT_CONTROL_ARG,\n ('allow_all_prereleases', 'pre'),\n 'process_dependency_links'\n ]\n kwargs = {}\n for option in possible_options:\n kw, attr = option if isinstance(option, tuple) else (option, option)\n value = getattr(options, attr, MARKER)\n if value is not MARKER:\n kwargs[kw] = value\n\n # Figure out index_urls:\n index_urls = [options.index_url] + options.extra_index_urls\n if options.no_index:\n index_urls = []\n index_urls += getattr(options, 'mirrors', [])\n\n # If pip is new enough to have a PipSession, initialize one, since\n # PackageFinder requires it:\n if hasattr(command, '_build_session'):\n kwargs['session'] = command._build_session(options)\n\n return PackageFinder(index_urls=index_urls, **kwargs)\n",
"def _parse_requirements(path, finder):\n try:\n # list() so the generator that is parse_requirements() actually runs\n # far enough to report a TypeError\n return list(parse_requirements(\n path, options=EmptyOptions(), finder=finder))\n except TypeError:\n # session is a required kwarg as of pip 6.0 and will raise\n # a TypeError if missing. It needs to be a PipSession instance,\n # but in older versions we can't import it from pip.download\n # (nor do we need it at all) so we only import it in this except block\n from pip.download import PipSession\n return list(parse_requirements(\n path, options=EmptyOptions(), session=PipSession(), finder=finder))\n"
] | #!/usr/bin/env python
"""peep ("prudently examine every package") verifies that packages conform to a
trusted, locally stored hash and only then installs them::
peep install -r requirements.txt
This makes your deployments verifiably repeatable without having to maintain a
local PyPI mirror or use a vendor lib. Just update the version numbers and
hashes in requirements.txt, and you're all set.
"""
# This is here so embedded copies of peep.py are MIT-compliant:
# Copyright (c) 2013 Erik Rose
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
from __future__ import print_function
try:
xrange = xrange
except NameError:
xrange = range
from base64 import urlsafe_b64encode, urlsafe_b64decode
from binascii import hexlify
import cgi
from collections import defaultdict
from functools import wraps
from hashlib import sha256
from itertools import chain, islice
import mimetypes
from optparse import OptionParser
from os.path import join, basename, splitext, isdir
from pickle import dumps, loads
import re
import sys
from shutil import rmtree, copy
from sys import argv, exit
from tempfile import mkdtemp
import traceback
try:
from urllib2 import build_opener, HTTPHandler, HTTPSHandler, HTTPError
except ImportError:
from urllib.request import build_opener, HTTPHandler, HTTPSHandler
from urllib.error import HTTPError
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse # 3.4
# TODO: Probably use six to make urllib stuff work across 2/3.
from pkg_resources import require, VersionConflict, DistributionNotFound, safe_name
# We don't admit our dependency on pip in setup.py, lest a naive user simply
# say `pip install peep.tar.gz` and thus pull down an untrusted copy of pip
# from PyPI. Instead, we make sure it's installed and new enough here and spit
# out an error message if not:
def activate(specifier):
"""Make a compatible version of pip importable. Raise a RuntimeError if we
couldn't."""
try:
for distro in require(specifier):
distro.activate()
except (VersionConflict, DistributionNotFound):
raise RuntimeError('The installed version of pip is too old; peep '
'requires ' + specifier)
# Before 0.6.2, the log module wasn't there, so some
# of our monkeypatching fails. It probably wouldn't be
# much work to support even earlier, though.
activate('pip>=0.6.2')
import pip
from pip.commands.install import InstallCommand
try:
from pip.download import url_to_path # 1.5.6
except ImportError:
try:
from pip.util import url_to_path # 0.7.0
except ImportError:
from pip.util import url_to_filename as url_to_path # 0.6.2
from pip.exceptions import InstallationError
from pip.index import PackageFinder, Link
try:
from pip.log import logger
except ImportError:
from pip import logger # 6.0
from pip.req import parse_requirements
try:
from pip.utils.ui import DownloadProgressBar, DownloadProgressSpinner
except ImportError:
class NullProgressBar(object):
def __init__(self, *args, **kwargs):
pass
def iter(self, ret, *args, **kwargs):
return ret
DownloadProgressBar = DownloadProgressSpinner = NullProgressBar
__version__ = 3, 1, 2
try:
from pip.index import FormatControl # noqa
FORMAT_CONTROL_ARG = 'format_control'
# The line-numbering bug will be fixed in pip 8. All 7.x releases had it.
PIP_MAJOR_VERSION = int(pip.__version__.split('.')[0])
PIP_COUNTS_COMMENTS = PIP_MAJOR_VERSION >= 8
except ImportError:
FORMAT_CONTROL_ARG = 'use_wheel' # pre-7
PIP_COUNTS_COMMENTS = True
ITS_FINE_ITS_FINE = 0
SOMETHING_WENT_WRONG = 1
# "Traditional" for command-line errors according to optparse docs:
COMMAND_LINE_ERROR = 2
UNHANDLED_EXCEPTION = 3
ARCHIVE_EXTENSIONS = ('.tar.bz2', '.tar.gz', '.tgz', '.tar', '.zip')
MARKER = object()
class PipException(Exception):
"""When I delegated to pip, it exited with an error."""
def __init__(self, error_code):
self.error_code = error_code
class UnsupportedRequirementError(Exception):
"""An unsupported line was encountered in a requirements file."""
class DownloadError(Exception):
def __init__(self, link, exc):
self.link = link
self.reason = str(exc)
def __str__(self):
return 'Downloading %s failed: %s' % (self.link, self.reason)
def encoded_hash(sha):
"""Return a short, 7-bit-safe representation of a hash.
If you pass a sha256, this results in the hash algorithm that the Wheel
format (PEP 427) uses, except here it's intended to be run across the
downloaded archive before unpacking.
"""
return urlsafe_b64encode(sha.digest()).decode('ascii').rstrip('=')
def path_and_line(req):
"""Return the path and line number of the file from which an
InstallRequirement came.
"""
path, line = (re.match(r'-r (.*) \(line (\d+)\)$',
req.comes_from).groups())
return path, int(line)
def hashes_above(path, line_number):
"""Yield hashes from contiguous comment lines before line ``line_number``.
"""
def hash_lists(path):
"""Yield lists of hashes appearing between non-comment lines.
The lists will be in order of appearance and, for each non-empty
list, their place in the results will coincide with that of the
line number of the corresponding result from `parse_requirements`
(which changed in pip 7.0 to not count comments).
"""
hashes = []
with open(path) as file:
for lineno, line in enumerate(file, 1):
match = HASH_COMMENT_RE.match(line)
if match: # Accumulate this hash.
hashes.append(match.groupdict()['hash'])
if not IGNORED_LINE_RE.match(line):
yield hashes # Report hashes seen so far.
hashes = []
elif PIP_COUNTS_COMMENTS:
# Comment: count as normal req but have no hashes.
yield []
return next(islice(hash_lists(path), line_number - 1, None))
def run_pip(initial_args):
"""Delegate to pip the given args (starting with the subcommand), and raise
``PipException`` if something goes wrong."""
status_code = pip.main(initial_args)
# Clear out the registrations in the pip "logger" singleton. Otherwise,
# loggers keep getting appended to it with every run. Pip assumes only one
# command invocation will happen per interpreter lifetime.
logger.consumers = []
if status_code:
raise PipException(status_code)
def hash_of_file(path):
"""Return the hash of a downloaded file."""
with open(path, 'rb') as archive:
sha = sha256()
while True:
data = archive.read(2 ** 20)
if not data:
break
sha.update(data)
return encoded_hash(sha)
def is_git_sha(text):
"""Return whether this is probably a git sha"""
# Handle both the full sha as well as the 7-character abbreviation
if len(text) in (40, 7):
try:
int(text, 16)
return True
except ValueError:
pass
return False
def filename_from_url(url):
parsed = urlparse(url)
path = parsed.path
return path.split('/')[-1]
def requirement_args(argv, want_paths=False, want_other=False):
"""Return an iterable of filtered arguments.
:arg argv: Arguments, starting after the subcommand
:arg want_paths: If True, the returned iterable includes the paths to any
requirements files following a ``-r`` or ``--requirement`` option.
:arg want_other: If True, the returned iterable includes the args that are
not a requirement-file path or a ``-r`` or ``--requirement`` flag.
"""
was_r = False
for arg in argv:
# Allow for requirements files named "-r", don't freak out if there's a
# trailing "-r", etc.
if was_r:
if want_paths:
yield arg
was_r = False
elif arg in ['-r', '--requirement']:
was_r = True
else:
if want_other:
yield arg
# any line that is a comment or just whitespace
IGNORED_LINE_RE = re.compile(r'^(\s*#.*)?\s*$')
HASH_COMMENT_RE = re.compile(
r"""
\s*\#\s+ # Lines that start with a '#'
(?P<hash_type>sha256):\s+ # Hash type is hardcoded to be sha256 for now.
(?P<hash>[^\s]+) # Hashes can be anything except '#' or spaces.
\s* # Suck up whitespace before the comment or
# just trailing whitespace if there is no
# comment. Also strip trailing newlines.
(?:\#(?P<comment>.*))? # Comments can be anything after a whitespace+#
# and are optional.
$""", re.X)
def peep_hash(argv):
"""Return the peep hash of one or more files, returning a shell status code
or raising a PipException.
:arg argv: The commandline args, starting after the subcommand
"""
parser = OptionParser(
usage='usage: %prog hash file [file ...]',
description='Print a peep hash line for one or more files: for '
'example, "# sha256: '
'oz42dZy6Gowxw8AelDtO4gRgTW_xPdooH484k7I5EOY".')
_, paths = parser.parse_args(args=argv)
if paths:
for path in paths:
print('# sha256:', hash_of_file(path))
return ITS_FINE_ITS_FINE
else:
parser.print_usage()
return COMMAND_LINE_ERROR
class EmptyOptions(object):
"""Fake optparse options for compatibility with pip<1.2
pip<1.2 had a bug in parse_requirements() in which the ``options`` kwarg
was required. We work around that by passing it a mock object.
"""
default_vcs = None
skip_requirements_regex = None
isolated_mode = False
def memoize(func):
"""Memoize a method that should return the same result every time on a
given instance.
"""
@wraps(func)
def memoizer(self):
if not hasattr(self, '_cache'):
self._cache = {}
if func.__name__ not in self._cache:
self._cache[func.__name__] = func(self)
return self._cache[func.__name__]
return memoizer
def package_finder(argv):
"""Return a PackageFinder respecting command-line options.
:arg argv: Everything after the subcommand
"""
# We instantiate an InstallCommand and then use some of its private
# machinery--its arg parser--for our own purposes, like a virus. This
# approach is portable across many pip versions, where more fine-grained
# ones are not. Ignoring options that don't exist on the parser (for
# instance, --use-wheel) gives us a straightforward method of backward
# compatibility.
try:
command = InstallCommand()
except TypeError:
# This is likely pip 1.3.0's "__init__() takes exactly 2 arguments (1
# given)" error. In that version, InstallCommand takes a top=level
# parser passed in from outside.
from pip.baseparser import create_main_parser
command = InstallCommand(create_main_parser())
# The downside is that it essentially ruins the InstallCommand class for
# further use. Calling out to pip.main() within the same interpreter, for
# example, would result in arguments parsed this time turning up there.
# Thus, we deepcopy the arg parser so we don't trash its singletons. Of
# course, deepcopy doesn't work on these objects, because they contain
# uncopyable regex patterns, so we pickle and unpickle instead. Fun!
options, _ = loads(dumps(command.parser)).parse_args(argv)
# Carry over PackageFinder kwargs that have [about] the same names as
# options attr names:
possible_options = [
'find_links',
FORMAT_CONTROL_ARG,
('allow_all_prereleases', 'pre'),
'process_dependency_links'
]
kwargs = {}
for option in possible_options:
kw, attr = option if isinstance(option, tuple) else (option, option)
value = getattr(options, attr, MARKER)
if value is not MARKER:
kwargs[kw] = value
# Figure out index_urls:
index_urls = [options.index_url] + options.extra_index_urls
if options.no_index:
index_urls = []
index_urls += getattr(options, 'mirrors', [])
# If pip is new enough to have a PipSession, initialize one, since
# PackageFinder requires it:
if hasattr(command, '_build_session'):
kwargs['session'] = command._build_session(options)
return PackageFinder(index_urls=index_urls, **kwargs)
class DownloadedReq(object):
"""A wrapper around InstallRequirement which offers additional information
based on downloading and examining a corresponding package archive
These are conceptually immutable, so we can get away with memoizing
expensive things.
"""
def __init__(self, req, argv, finder):
"""Download a requirement, compare its hashes, and return a subclass
of DownloadedReq depending on its state.
:arg req: The InstallRequirement I am based on
:arg argv: The args, starting after the subcommand
"""
self._req = req
self._argv = argv
self._finder = finder
# We use a separate temp dir for each requirement so requirements
# (from different indices) that happen to have the same archive names
# don't overwrite each other, leading to a security hole in which the
# latter is a hash mismatch, the former has already passed the
# comparison, and the latter gets installed.
self._temp_path = mkdtemp(prefix='peep-')
# Think of DownloadedReq as a one-shot state machine. It's an abstract
# class that ratchets forward to being one of its own subclasses,
# depending on its package status. Then it doesn't move again.
self.__class__ = self._class()
def dispose(self):
"""Delete temp files and dirs I've made. Render myself useless.
Do not call further methods on me after calling dispose().
"""
rmtree(self._temp_path)
def _version(self):
"""Deduce the version number of the downloaded package from its filename."""
# TODO: Can we delete this method and just print the line from the
# reqs file verbatim instead?
def version_of_archive(filename, package_name):
# Since we know the project_name, we can strip that off the left, strip
# any archive extensions off the right, and take the rest as the
# version.
for ext in ARCHIVE_EXTENSIONS:
if filename.endswith(ext):
filename = filename[:-len(ext)]
break
# Handle github sha tarball downloads.
if is_git_sha(filename):
filename = package_name + '-' + filename
if not filename.lower().replace('_', '-').startswith(package_name.lower()):
# TODO: Should we replace runs of [^a-zA-Z0-9.], not just _, with -?
give_up(filename, package_name)
return filename[len(package_name) + 1:] # Strip off '-' before version.
def version_of_wheel(filename, package_name):
# For Wheel files (http://legacy.python.org/dev/peps/pep-0427/#file-
# name-convention) we know the format bits are '-' separated.
whl_package_name, version, _rest = filename.split('-', 2)
# Do the alteration to package_name from PEP 427:
our_package_name = re.sub(r'[^\w\d.]+', '_', package_name, re.UNICODE)
if whl_package_name != our_package_name:
give_up(filename, whl_package_name)
return version
def give_up(filename, package_name):
raise RuntimeError("The archive '%s' didn't start with the package name "
"'%s', so I couldn't figure out the version number. "
"My bad; improve me." %
(filename, package_name))
get_version = (version_of_wheel
if self._downloaded_filename().endswith('.whl')
else version_of_archive)
return get_version(self._downloaded_filename(), self._project_name())
def _is_always_unsatisfied(self):
"""Returns whether this requirement is always unsatisfied
This would happen in cases where we can't determine the version
from the filename.
"""
# If this is a github sha tarball, then it is always unsatisfied
# because the url has a commit sha in it and not the version
# number.
url = self._url()
if url:
filename = filename_from_url(url)
if filename.endswith(ARCHIVE_EXTENSIONS):
filename, ext = splitext(filename)
if is_git_sha(filename):
return True
return False
@memoize # Avoid hitting the file[cache] over and over.
def _expected_hashes(self):
"""Return a list of known-good hashes for this package."""
return hashes_above(*path_and_line(self._req))
def _download(self, link):
"""Download a file, and return its name within my temp dir.
This does no verification of HTTPS certs, but our checking hashes
makes that largely unimportant. It would be nice to be able to use the
requests lib, which can verify certs, but it is guaranteed to be
available only in pip >= 1.5.
This also drops support for proxies and basic auth, though those could
be added back in.
"""
# Based on pip 1.4.1's URLOpener but with cert verification removed
def opener(is_https):
if is_https:
opener = build_opener(HTTPSHandler())
# Strip out HTTPHandler to prevent MITM spoof:
for handler in opener.handlers:
if isinstance(handler, HTTPHandler):
opener.handlers.remove(handler)
else:
opener = build_opener()
return opener
# Descended from unpack_http_url() in pip 1.4.1
def best_filename(link, response):
"""Return the most informative possible filename for a download,
ideally with a proper extension.
"""
content_type = response.info().get('content-type', '')
filename = link.filename # fallback
# Have a look at the Content-Disposition header for a better guess:
content_disposition = response.info().get('content-disposition')
if content_disposition:
type, params = cgi.parse_header(content_disposition)
# We use ``or`` here because we don't want to use an "empty" value
# from the filename param:
filename = params.get('filename') or filename
ext = splitext(filename)[1]
if not ext:
ext = mimetypes.guess_extension(content_type)
if ext:
filename += ext
if not ext and link.url != response.geturl():
ext = splitext(response.geturl())[1]
if ext:
filename += ext
return filename
# Descended from _download_url() in pip 1.4.1
def pipe_to_file(response, path, size=0):
"""Pull the data off an HTTP response, shove it in a new file, and
show progress.
:arg response: A file-like object to read from
:arg path: The path of the new file
:arg size: The expected size, in bytes, of the download. 0 for
unknown or to suppress progress indication (as for cached
downloads)
"""
def response_chunks(chunk_size):
while True:
chunk = response.read(chunk_size)
if not chunk:
break
yield chunk
print('Downloading %s%s...' % (
self._req.req,
(' (%sK)' % (size / 1000)) if size > 1000 else ''))
progress_indicator = (DownloadProgressBar(max=size).iter if size
else DownloadProgressSpinner().iter)
with open(path, 'wb') as file:
for chunk in progress_indicator(response_chunks(4096), 4096):
file.write(chunk)
url = link.url.split('#', 1)[0]
try:
response = opener(urlparse(url).scheme != 'http').open(url)
except (HTTPError, IOError) as exc:
raise DownloadError(link, exc)
filename = best_filename(link, response)
try:
size = int(response.headers['content-length'])
except (ValueError, KeyError, TypeError):
size = 0
pipe_to_file(response, join(self._temp_path, filename), size=size)
return filename
# Based on req_set.prepare_files() in pip bb2a8428d4aebc8d313d05d590f386fa3f0bbd0f
@memoize # Avoid re-downloading.
def _downloaded_filename(self):
"""Download the package's archive if necessary, and return its
filename.
--no-deps is implied, as we have reimplemented the bits that would
ordinarily do dependency resolution.
"""
# Peep doesn't support requirements that don't come down as a single
# file, because it can't hash them. Thus, it doesn't support editable
# requirements, because pip itself doesn't support editable
# requirements except for "local projects or a VCS url". Nor does it
# support VCS requirements yet, because we haven't yet come up with a
# portable, deterministic way to hash them. In summary, all we support
# is == requirements and tarballs/zips/etc.
# TODO: Stop on reqs that are editable or aren't ==.
# If the requirement isn't already specified as a URL, get a URL
# from an index:
link = self._link() or self._finder.find_requirement(self._req, upgrade=False)
if link:
lower_scheme = link.scheme.lower() # pip lower()s it for some reason.
if lower_scheme == 'http' or lower_scheme == 'https':
file_path = self._download(link)
return basename(file_path)
elif lower_scheme == 'file':
# The following is inspired by pip's unpack_file_url():
link_path = url_to_path(link.url_without_fragment)
if isdir(link_path):
raise UnsupportedRequirementError(
"%s: %s is a directory. So that it can compute "
"a hash, peep supports only filesystem paths which "
"point to files" %
(self._req, link.url_without_fragment))
else:
copy(link_path, self._temp_path)
return basename(link_path)
else:
raise UnsupportedRequirementError(
"%s: The download link, %s, would not result in a file "
"that can be hashed. Peep supports only == requirements, "
"file:// URLs pointing to files (not folders), and "
"http:// and https:// URLs pointing to tarballs, zips, "
"etc." % (self._req, link.url))
else:
raise UnsupportedRequirementError(
"%s: couldn't determine where to download this requirement from."
% (self._req,))
def install(self):
"""Install the package I represent, without dependencies.
Obey typical pip-install options passed in on the command line.
"""
other_args = list(requirement_args(self._argv, want_other=True))
archive_path = join(self._temp_path, self._downloaded_filename())
# -U so it installs whether pip deems the requirement "satisfied" or
# not. This is necessary for GitHub-sourced zips, which change without
# their version numbers changing.
run_pip(['install'] + other_args + ['--no-deps', '-U', archive_path])
@memoize
def _actual_hash(self):
"""Download the package's archive if necessary, and return its hash."""
return hash_of_file(join(self._temp_path, self._downloaded_filename()))
def _project_name(self):
"""Return the inner Requirement's "unsafe name".
Raise ValueError if there is no name.
"""
name = getattr(self._req.req, 'project_name', '')
if name:
return name
name = getattr(self._req.req, 'name', '')
if name:
return safe_name(name)
raise ValueError('Requirement has no project_name.')
def _name(self):
return self._req.name
def _link(self):
try:
return self._req.link
except AttributeError:
# The link attribute isn't available prior to pip 6.1.0, so fall
# back to the now deprecated 'url' attribute.
return Link(self._req.url) if self._req.url else None
def _url(self):
link = self._link()
return link.url if link else None
@memoize # Avoid re-running expensive check_if_exists().
def _is_satisfied(self):
self._req.check_if_exists()
return (self._req.satisfied_by and
not self._is_always_unsatisfied())
def _class(self):
"""Return the class I should be, spanning a continuum of goodness."""
try:
self._project_name()
except ValueError:
return MalformedReq
if self._is_satisfied():
return SatisfiedReq
if not self._expected_hashes():
return MissingReq
if self._actual_hash() not in self._expected_hashes():
return MismatchedReq
return InstallableReq
@classmethod
def foot(cls):
"""Return the text to be printed once, after all of the errors from
classes of my type are printed.
"""
return ''
class MalformedReq(DownloadedReq):
"""A requirement whose package name could not be determined"""
@classmethod
def head(cls):
return 'The following requirements could not be processed:\n'
def error(self):
return '* Unable to determine package name from URL %s; add #egg=' % self._url()
class MissingReq(DownloadedReq):
"""A requirement for which no hashes were specified in the requirements file"""
@classmethod
def head(cls):
return ('The following packages had no hashes specified in the requirements file, which\n'
'leaves them open to tampering. Vet these packages to your satisfaction, then\n'
'add these "sha256" lines like so:\n\n')
def error(self):
if self._url():
# _url() always contains an #egg= part, or this would be a
# MalformedRequest.
line = self._url()
else:
line = '%s==%s' % (self._name(), self._version())
return '# sha256: %s\n%s\n' % (self._actual_hash(), line)
class MismatchedReq(DownloadedReq):
"""A requirement for which the downloaded file didn't match any of my hashes."""
@classmethod
def head(cls):
return ("THE FOLLOWING PACKAGES DIDN'T MATCH THE HASHES SPECIFIED IN THE REQUIREMENTS\n"
"FILE. If you have updated the package versions, update the hashes. If not,\n"
"freak out, because someone has tampered with the packages.\n\n")
def error(self):
preamble = ' %s: expected' % self._project_name()
if len(self._expected_hashes()) > 1:
preamble += ' one of'
padding = '\n' + ' ' * (len(preamble) + 1)
return '%s %s\n%s got %s' % (preamble,
padding.join(self._expected_hashes()),
' ' * (len(preamble) - 4),
self._actual_hash())
@classmethod
def foot(cls):
return '\n'
class SatisfiedReq(DownloadedReq):
"""A requirement which turned out to be already installed"""
@classmethod
def head(cls):
return ("These packages were already installed, so we didn't need to download or build\n"
"them again. If you installed them with peep in the first place, you should be\n"
"safe. If not, uninstall them, then re-attempt your install with peep.\n")
def error(self):
return ' %s' % (self._req,)
class InstallableReq(DownloadedReq):
"""A requirement whose hash matched and can be safely installed"""
# DownloadedReq subclasses that indicate an error that should keep us from
# going forward with installation, in the order in which their errors should
# be reported:
ERROR_CLASSES = [MismatchedReq, MissingReq, MalformedReq]
def bucket(things, key):
"""Return a map of key -> list of things."""
ret = defaultdict(list)
for thing in things:
ret[key(thing)].append(thing)
return ret
def first_every_last(iterable, first, every, last):
"""Execute something before the first item of iter, something else for each
item, and a third thing after the last.
If there are no items in the iterable, don't execute anything.
"""
did_first = False
for item in iterable:
if not did_first:
did_first = True
first(item)
every(item)
if did_first:
last(item)
def _parse_requirements(path, finder):
try:
# list() so the generator that is parse_requirements() actually runs
# far enough to report a TypeError
return list(parse_requirements(
path, options=EmptyOptions(), finder=finder))
except TypeError:
# session is a required kwarg as of pip 6.0 and will raise
# a TypeError if missing. It needs to be a PipSession instance,
# but in older versions we can't import it from pip.download
# (nor do we need it at all) so we only import it in this except block
from pip.download import PipSession
return list(parse_requirements(
path, options=EmptyOptions(), session=PipSession(), finder=finder))
def peep_install(argv):
"""Perform the ``peep install`` subcommand, returning a shell status code
or raising a PipException.
:arg argv: The commandline args, starting after the subcommand
"""
output = []
out = output.append
reqs = []
try:
req_paths = list(requirement_args(argv, want_paths=True))
if not req_paths:
out("You have to specify one or more requirements files with the -r option, because\n"
"otherwise there's nowhere for peep to look up the hashes.\n")
return COMMAND_LINE_ERROR
# We're a "peep install" command, and we have some requirement paths.
reqs = list(chain.from_iterable(
downloaded_reqs_from_path(path, argv)
for path in req_paths))
buckets = bucket(reqs, lambda r: r.__class__)
# Skip a line after pip's "Cleaning up..." so the important stuff
# stands out:
if any(buckets[b] for b in ERROR_CLASSES):
out('\n')
printers = (lambda r: out(r.head()),
lambda r: out(r.error() + '\n'),
lambda r: out(r.foot()))
for c in ERROR_CLASSES:
first_every_last(buckets[c], *printers)
if any(buckets[b] for b in ERROR_CLASSES):
out('-------------------------------\n'
'Not proceeding to installation.\n')
return SOMETHING_WENT_WRONG
else:
for req in buckets[InstallableReq]:
req.install()
first_every_last(buckets[SatisfiedReq], *printers)
return ITS_FINE_ITS_FINE
except (UnsupportedRequirementError, InstallationError, DownloadError) as exc:
out(str(exc))
return SOMETHING_WENT_WRONG
finally:
for req in reqs:
req.dispose()
print(''.join(output))
def peep_port(paths):
"""Convert a peep requirements file to one compatble with pip-8 hashing.
Loses comments and tromps on URLs, so the result will need a little manual
massaging, but the hard part--the hash conversion--is done for you.
"""
if not paths:
print('Please specify one or more requirements files so I have '
'something to port.\n')
return COMMAND_LINE_ERROR
comes_from = None
for req in chain.from_iterable(
_parse_requirements(path, package_finder(argv)) for path in paths):
req_path, req_line = path_and_line(req)
hashes = [hexlify(urlsafe_b64decode((hash + '=').encode('ascii'))).decode('ascii')
for hash in hashes_above(req_path, req_line)]
if req_path != comes_from:
print()
print('# from %s' % req_path)
print()
comes_from = req_path
if not hashes:
print(req.req)
else:
print('%s' % (req.link if getattr(req, 'link', None) else req.req), end='')
for hash in hashes:
print(' \\')
print(' --hash=sha256:%s' % hash, end='')
print()
def main():
"""Be the top-level entrypoint. Return a shell status code."""
commands = {'hash': peep_hash,
'install': peep_install,
'port': peep_port}
try:
if len(argv) >= 2 and argv[1] in commands:
return commands[argv[1]](argv[2:])
else:
# Fall through to top-level pip main() for everything else:
return pip.main()
except PipException as exc:
return exc.error_code
def exception_handler(exc_type, exc_value, exc_tb):
print('Oh no! Peep had a problem while trying to do stuff. Please write up a bug report')
print('with the specifics so we can fix it:')
print()
print('https://github.com/erikrose/peep/issues/new')
print()
print('Here are some particulars you can copy and paste into the bug report:')
print()
print('---')
print('peep:', repr(__version__))
print('python:', repr(sys.version))
print('pip:', repr(getattr(pip, '__version__', 'no __version__ attr')))
print('Command line: ', repr(sys.argv))
print(
''.join(traceback.format_exception(exc_type, exc_value, exc_tb)))
print('---')
if __name__ == '__main__':
try:
exit(main())
except Exception:
exception_handler(*sys.exc_info())
exit(UNHANDLED_EXCEPTION)
|
erikrose/peep | peep.py | peep_install | python | def peep_install(argv):
output = []
out = output.append
reqs = []
try:
req_paths = list(requirement_args(argv, want_paths=True))
if not req_paths:
out("You have to specify one or more requirements files with the -r option, because\n"
"otherwise there's nowhere for peep to look up the hashes.\n")
return COMMAND_LINE_ERROR
# We're a "peep install" command, and we have some requirement paths.
reqs = list(chain.from_iterable(
downloaded_reqs_from_path(path, argv)
for path in req_paths))
buckets = bucket(reqs, lambda r: r.__class__)
# Skip a line after pip's "Cleaning up..." so the important stuff
# stands out:
if any(buckets[b] for b in ERROR_CLASSES):
out('\n')
printers = (lambda r: out(r.head()),
lambda r: out(r.error() + '\n'),
lambda r: out(r.foot()))
for c in ERROR_CLASSES:
first_every_last(buckets[c], *printers)
if any(buckets[b] for b in ERROR_CLASSES):
out('-------------------------------\n'
'Not proceeding to installation.\n')
return SOMETHING_WENT_WRONG
else:
for req in buckets[InstallableReq]:
req.install()
first_every_last(buckets[SatisfiedReq], *printers)
return ITS_FINE_ITS_FINE
except (UnsupportedRequirementError, InstallationError, DownloadError) as exc:
out(str(exc))
return SOMETHING_WENT_WRONG
finally:
for req in reqs:
req.dispose()
print(''.join(output)) | Perform the ``peep install`` subcommand, returning a shell status code
or raising a PipException.
:arg argv: The commandline args, starting after the subcommand | train | https://github.com/erikrose/peep/blob/c16f08c7f61e2f2afecb7cd1c93752bdd96c4968/peep.py#L847-L898 | [
"def requirement_args(argv, want_paths=False, want_other=False):\n \"\"\"Return an iterable of filtered arguments.\n\n :arg argv: Arguments, starting after the subcommand\n :arg want_paths: If True, the returned iterable includes the paths to any\n requirements files following a ``-r`` or ``--requirement`` option.\n :arg want_other: If True, the returned iterable includes the args that are\n not a requirement-file path or a ``-r`` or ``--requirement`` flag.\n\n \"\"\"\n was_r = False\n for arg in argv:\n # Allow for requirements files named \"-r\", don't freak out if there's a\n # trailing \"-r\", etc.\n if was_r:\n if want_paths:\n yield arg\n was_r = False\n elif arg in ['-r', '--requirement']:\n was_r = True\n else:\n if want_other:\n yield arg\n",
"def bucket(things, key):\n \"\"\"Return a map of key -> list of things.\"\"\"\n ret = defaultdict(list)\n for thing in things:\n ret[key(thing)].append(thing)\n return ret\n",
"def first_every_last(iterable, first, every, last):\n \"\"\"Execute something before the first item of iter, something else for each\n item, and a third thing after the last.\n\n If there are no items in the iterable, don't execute anything.\n\n \"\"\"\n did_first = False\n for item in iterable:\n if not did_first:\n did_first = True\n first(item)\n every(item)\n if did_first:\n last(item)\n"
] | #!/usr/bin/env python
"""peep ("prudently examine every package") verifies that packages conform to a
trusted, locally stored hash and only then installs them::
peep install -r requirements.txt
This makes your deployments verifiably repeatable without having to maintain a
local PyPI mirror or use a vendor lib. Just update the version numbers and
hashes in requirements.txt, and you're all set.
"""
# This is here so embedded copies of peep.py are MIT-compliant:
# Copyright (c) 2013 Erik Rose
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
from __future__ import print_function
try:
xrange = xrange
except NameError:
xrange = range
from base64 import urlsafe_b64encode, urlsafe_b64decode
from binascii import hexlify
import cgi
from collections import defaultdict
from functools import wraps
from hashlib import sha256
from itertools import chain, islice
import mimetypes
from optparse import OptionParser
from os.path import join, basename, splitext, isdir
from pickle import dumps, loads
import re
import sys
from shutil import rmtree, copy
from sys import argv, exit
from tempfile import mkdtemp
import traceback
try:
from urllib2 import build_opener, HTTPHandler, HTTPSHandler, HTTPError
except ImportError:
from urllib.request import build_opener, HTTPHandler, HTTPSHandler
from urllib.error import HTTPError
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse # 3.4
# TODO: Probably use six to make urllib stuff work across 2/3.
from pkg_resources import require, VersionConflict, DistributionNotFound, safe_name
# We don't admit our dependency on pip in setup.py, lest a naive user simply
# say `pip install peep.tar.gz` and thus pull down an untrusted copy of pip
# from PyPI. Instead, we make sure it's installed and new enough here and spit
# out an error message if not:
def activate(specifier):
"""Make a compatible version of pip importable. Raise a RuntimeError if we
couldn't."""
try:
for distro in require(specifier):
distro.activate()
except (VersionConflict, DistributionNotFound):
raise RuntimeError('The installed version of pip is too old; peep '
'requires ' + specifier)
# Before 0.6.2, the log module wasn't there, so some
# of our monkeypatching fails. It probably wouldn't be
# much work to support even earlier, though.
activate('pip>=0.6.2')
import pip
from pip.commands.install import InstallCommand
try:
from pip.download import url_to_path # 1.5.6
except ImportError:
try:
from pip.util import url_to_path # 0.7.0
except ImportError:
from pip.util import url_to_filename as url_to_path # 0.6.2
from pip.exceptions import InstallationError
from pip.index import PackageFinder, Link
try:
from pip.log import logger
except ImportError:
from pip import logger # 6.0
from pip.req import parse_requirements
try:
from pip.utils.ui import DownloadProgressBar, DownloadProgressSpinner
except ImportError:
class NullProgressBar(object):
def __init__(self, *args, **kwargs):
pass
def iter(self, ret, *args, **kwargs):
return ret
DownloadProgressBar = DownloadProgressSpinner = NullProgressBar
__version__ = 3, 1, 2
try:
from pip.index import FormatControl # noqa
FORMAT_CONTROL_ARG = 'format_control'
# The line-numbering bug will be fixed in pip 8. All 7.x releases had it.
PIP_MAJOR_VERSION = int(pip.__version__.split('.')[0])
PIP_COUNTS_COMMENTS = PIP_MAJOR_VERSION >= 8
except ImportError:
FORMAT_CONTROL_ARG = 'use_wheel' # pre-7
PIP_COUNTS_COMMENTS = True
ITS_FINE_ITS_FINE = 0
SOMETHING_WENT_WRONG = 1
# "Traditional" for command-line errors according to optparse docs:
COMMAND_LINE_ERROR = 2
UNHANDLED_EXCEPTION = 3
ARCHIVE_EXTENSIONS = ('.tar.bz2', '.tar.gz', '.tgz', '.tar', '.zip')
MARKER = object()
class PipException(Exception):
"""When I delegated to pip, it exited with an error."""
def __init__(self, error_code):
self.error_code = error_code
class UnsupportedRequirementError(Exception):
"""An unsupported line was encountered in a requirements file."""
class DownloadError(Exception):
def __init__(self, link, exc):
self.link = link
self.reason = str(exc)
def __str__(self):
return 'Downloading %s failed: %s' % (self.link, self.reason)
def encoded_hash(sha):
"""Return a short, 7-bit-safe representation of a hash.
If you pass a sha256, this results in the hash algorithm that the Wheel
format (PEP 427) uses, except here it's intended to be run across the
downloaded archive before unpacking.
"""
return urlsafe_b64encode(sha.digest()).decode('ascii').rstrip('=')
def path_and_line(req):
"""Return the path and line number of the file from which an
InstallRequirement came.
"""
path, line = (re.match(r'-r (.*) \(line (\d+)\)$',
req.comes_from).groups())
return path, int(line)
def hashes_above(path, line_number):
"""Yield hashes from contiguous comment lines before line ``line_number``.
"""
def hash_lists(path):
"""Yield lists of hashes appearing between non-comment lines.
The lists will be in order of appearance and, for each non-empty
list, their place in the results will coincide with that of the
line number of the corresponding result from `parse_requirements`
(which changed in pip 7.0 to not count comments).
"""
hashes = []
with open(path) as file:
for lineno, line in enumerate(file, 1):
match = HASH_COMMENT_RE.match(line)
if match: # Accumulate this hash.
hashes.append(match.groupdict()['hash'])
if not IGNORED_LINE_RE.match(line):
yield hashes # Report hashes seen so far.
hashes = []
elif PIP_COUNTS_COMMENTS:
# Comment: count as normal req but have no hashes.
yield []
return next(islice(hash_lists(path), line_number - 1, None))
def run_pip(initial_args):
"""Delegate to pip the given args (starting with the subcommand), and raise
``PipException`` if something goes wrong."""
status_code = pip.main(initial_args)
# Clear out the registrations in the pip "logger" singleton. Otherwise,
# loggers keep getting appended to it with every run. Pip assumes only one
# command invocation will happen per interpreter lifetime.
logger.consumers = []
if status_code:
raise PipException(status_code)
def hash_of_file(path):
"""Return the hash of a downloaded file."""
with open(path, 'rb') as archive:
sha = sha256()
while True:
data = archive.read(2 ** 20)
if not data:
break
sha.update(data)
return encoded_hash(sha)
def is_git_sha(text):
"""Return whether this is probably a git sha"""
# Handle both the full sha as well as the 7-character abbreviation
if len(text) in (40, 7):
try:
int(text, 16)
return True
except ValueError:
pass
return False
def filename_from_url(url):
parsed = urlparse(url)
path = parsed.path
return path.split('/')[-1]
def requirement_args(argv, want_paths=False, want_other=False):
"""Return an iterable of filtered arguments.
:arg argv: Arguments, starting after the subcommand
:arg want_paths: If True, the returned iterable includes the paths to any
requirements files following a ``-r`` or ``--requirement`` option.
:arg want_other: If True, the returned iterable includes the args that are
not a requirement-file path or a ``-r`` or ``--requirement`` flag.
"""
was_r = False
for arg in argv:
# Allow for requirements files named "-r", don't freak out if there's a
# trailing "-r", etc.
if was_r:
if want_paths:
yield arg
was_r = False
elif arg in ['-r', '--requirement']:
was_r = True
else:
if want_other:
yield arg
# any line that is a comment or just whitespace
IGNORED_LINE_RE = re.compile(r'^(\s*#.*)?\s*$')
HASH_COMMENT_RE = re.compile(
r"""
\s*\#\s+ # Lines that start with a '#'
(?P<hash_type>sha256):\s+ # Hash type is hardcoded to be sha256 for now.
(?P<hash>[^\s]+) # Hashes can be anything except '#' or spaces.
\s* # Suck up whitespace before the comment or
# just trailing whitespace if there is no
# comment. Also strip trailing newlines.
(?:\#(?P<comment>.*))? # Comments can be anything after a whitespace+#
# and are optional.
$""", re.X)
def peep_hash(argv):
"""Return the peep hash of one or more files, returning a shell status code
or raising a PipException.
:arg argv: The commandline args, starting after the subcommand
"""
parser = OptionParser(
usage='usage: %prog hash file [file ...]',
description='Print a peep hash line for one or more files: for '
'example, "# sha256: '
'oz42dZy6Gowxw8AelDtO4gRgTW_xPdooH484k7I5EOY".')
_, paths = parser.parse_args(args=argv)
if paths:
for path in paths:
print('# sha256:', hash_of_file(path))
return ITS_FINE_ITS_FINE
else:
parser.print_usage()
return COMMAND_LINE_ERROR
class EmptyOptions(object):
"""Fake optparse options for compatibility with pip<1.2
pip<1.2 had a bug in parse_requirements() in which the ``options`` kwarg
was required. We work around that by passing it a mock object.
"""
default_vcs = None
skip_requirements_regex = None
isolated_mode = False
def memoize(func):
"""Memoize a method that should return the same result every time on a
given instance.
"""
@wraps(func)
def memoizer(self):
if not hasattr(self, '_cache'):
self._cache = {}
if func.__name__ not in self._cache:
self._cache[func.__name__] = func(self)
return self._cache[func.__name__]
return memoizer
def package_finder(argv):
"""Return a PackageFinder respecting command-line options.
:arg argv: Everything after the subcommand
"""
# We instantiate an InstallCommand and then use some of its private
# machinery--its arg parser--for our own purposes, like a virus. This
# approach is portable across many pip versions, where more fine-grained
# ones are not. Ignoring options that don't exist on the parser (for
# instance, --use-wheel) gives us a straightforward method of backward
# compatibility.
try:
command = InstallCommand()
except TypeError:
# This is likely pip 1.3.0's "__init__() takes exactly 2 arguments (1
# given)" error. In that version, InstallCommand takes a top=level
# parser passed in from outside.
from pip.baseparser import create_main_parser
command = InstallCommand(create_main_parser())
# The downside is that it essentially ruins the InstallCommand class for
# further use. Calling out to pip.main() within the same interpreter, for
# example, would result in arguments parsed this time turning up there.
# Thus, we deepcopy the arg parser so we don't trash its singletons. Of
# course, deepcopy doesn't work on these objects, because they contain
# uncopyable regex patterns, so we pickle and unpickle instead. Fun!
options, _ = loads(dumps(command.parser)).parse_args(argv)
# Carry over PackageFinder kwargs that have [about] the same names as
# options attr names:
possible_options = [
'find_links',
FORMAT_CONTROL_ARG,
('allow_all_prereleases', 'pre'),
'process_dependency_links'
]
kwargs = {}
for option in possible_options:
kw, attr = option if isinstance(option, tuple) else (option, option)
value = getattr(options, attr, MARKER)
if value is not MARKER:
kwargs[kw] = value
# Figure out index_urls:
index_urls = [options.index_url] + options.extra_index_urls
if options.no_index:
index_urls = []
index_urls += getattr(options, 'mirrors', [])
# If pip is new enough to have a PipSession, initialize one, since
# PackageFinder requires it:
if hasattr(command, '_build_session'):
kwargs['session'] = command._build_session(options)
return PackageFinder(index_urls=index_urls, **kwargs)
class DownloadedReq(object):
"""A wrapper around InstallRequirement which offers additional information
based on downloading and examining a corresponding package archive
These are conceptually immutable, so we can get away with memoizing
expensive things.
"""
def __init__(self, req, argv, finder):
"""Download a requirement, compare its hashes, and return a subclass
of DownloadedReq depending on its state.
:arg req: The InstallRequirement I am based on
:arg argv: The args, starting after the subcommand
"""
self._req = req
self._argv = argv
self._finder = finder
# We use a separate temp dir for each requirement so requirements
# (from different indices) that happen to have the same archive names
# don't overwrite each other, leading to a security hole in which the
# latter is a hash mismatch, the former has already passed the
# comparison, and the latter gets installed.
self._temp_path = mkdtemp(prefix='peep-')
# Think of DownloadedReq as a one-shot state machine. It's an abstract
# class that ratchets forward to being one of its own subclasses,
# depending on its package status. Then it doesn't move again.
self.__class__ = self._class()
def dispose(self):
"""Delete temp files and dirs I've made. Render myself useless.
Do not call further methods on me after calling dispose().
"""
rmtree(self._temp_path)
def _version(self):
"""Deduce the version number of the downloaded package from its filename."""
# TODO: Can we delete this method and just print the line from the
# reqs file verbatim instead?
def version_of_archive(filename, package_name):
# Since we know the project_name, we can strip that off the left, strip
# any archive extensions off the right, and take the rest as the
# version.
for ext in ARCHIVE_EXTENSIONS:
if filename.endswith(ext):
filename = filename[:-len(ext)]
break
# Handle github sha tarball downloads.
if is_git_sha(filename):
filename = package_name + '-' + filename
if not filename.lower().replace('_', '-').startswith(package_name.lower()):
# TODO: Should we replace runs of [^a-zA-Z0-9.], not just _, with -?
give_up(filename, package_name)
return filename[len(package_name) + 1:] # Strip off '-' before version.
def version_of_wheel(filename, package_name):
# For Wheel files (http://legacy.python.org/dev/peps/pep-0427/#file-
# name-convention) we know the format bits are '-' separated.
whl_package_name, version, _rest = filename.split('-', 2)
# Do the alteration to package_name from PEP 427:
our_package_name = re.sub(r'[^\w\d.]+', '_', package_name, re.UNICODE)
if whl_package_name != our_package_name:
give_up(filename, whl_package_name)
return version
def give_up(filename, package_name):
raise RuntimeError("The archive '%s' didn't start with the package name "
"'%s', so I couldn't figure out the version number. "
"My bad; improve me." %
(filename, package_name))
get_version = (version_of_wheel
if self._downloaded_filename().endswith('.whl')
else version_of_archive)
return get_version(self._downloaded_filename(), self._project_name())
def _is_always_unsatisfied(self):
"""Returns whether this requirement is always unsatisfied
This would happen in cases where we can't determine the version
from the filename.
"""
# If this is a github sha tarball, then it is always unsatisfied
# because the url has a commit sha in it and not the version
# number.
url = self._url()
if url:
filename = filename_from_url(url)
if filename.endswith(ARCHIVE_EXTENSIONS):
filename, ext = splitext(filename)
if is_git_sha(filename):
return True
return False
@memoize # Avoid hitting the file[cache] over and over.
def _expected_hashes(self):
"""Return a list of known-good hashes for this package."""
return hashes_above(*path_and_line(self._req))
def _download(self, link):
"""Download a file, and return its name within my temp dir.
This does no verification of HTTPS certs, but our checking hashes
makes that largely unimportant. It would be nice to be able to use the
requests lib, which can verify certs, but it is guaranteed to be
available only in pip >= 1.5.
This also drops support for proxies and basic auth, though those could
be added back in.
"""
# Based on pip 1.4.1's URLOpener but with cert verification removed
def opener(is_https):
if is_https:
opener = build_opener(HTTPSHandler())
# Strip out HTTPHandler to prevent MITM spoof:
for handler in opener.handlers:
if isinstance(handler, HTTPHandler):
opener.handlers.remove(handler)
else:
opener = build_opener()
return opener
# Descended from unpack_http_url() in pip 1.4.1
def best_filename(link, response):
"""Return the most informative possible filename for a download,
ideally with a proper extension.
"""
content_type = response.info().get('content-type', '')
filename = link.filename # fallback
# Have a look at the Content-Disposition header for a better guess:
content_disposition = response.info().get('content-disposition')
if content_disposition:
type, params = cgi.parse_header(content_disposition)
# We use ``or`` here because we don't want to use an "empty" value
# from the filename param:
filename = params.get('filename') or filename
ext = splitext(filename)[1]
if not ext:
ext = mimetypes.guess_extension(content_type)
if ext:
filename += ext
if not ext and link.url != response.geturl():
ext = splitext(response.geturl())[1]
if ext:
filename += ext
return filename
# Descended from _download_url() in pip 1.4.1
def pipe_to_file(response, path, size=0):
"""Pull the data off an HTTP response, shove it in a new file, and
show progress.
:arg response: A file-like object to read from
:arg path: The path of the new file
:arg size: The expected size, in bytes, of the download. 0 for
unknown or to suppress progress indication (as for cached
downloads)
"""
def response_chunks(chunk_size):
while True:
chunk = response.read(chunk_size)
if not chunk:
break
yield chunk
print('Downloading %s%s...' % (
self._req.req,
(' (%sK)' % (size / 1000)) if size > 1000 else ''))
progress_indicator = (DownloadProgressBar(max=size).iter if size
else DownloadProgressSpinner().iter)
with open(path, 'wb') as file:
for chunk in progress_indicator(response_chunks(4096), 4096):
file.write(chunk)
url = link.url.split('#', 1)[0]
try:
response = opener(urlparse(url).scheme != 'http').open(url)
except (HTTPError, IOError) as exc:
raise DownloadError(link, exc)
filename = best_filename(link, response)
try:
size = int(response.headers['content-length'])
except (ValueError, KeyError, TypeError):
size = 0
pipe_to_file(response, join(self._temp_path, filename), size=size)
return filename
# Based on req_set.prepare_files() in pip bb2a8428d4aebc8d313d05d590f386fa3f0bbd0f
@memoize # Avoid re-downloading.
def _downloaded_filename(self):
"""Download the package's archive if necessary, and return its
filename.
--no-deps is implied, as we have reimplemented the bits that would
ordinarily do dependency resolution.
"""
# Peep doesn't support requirements that don't come down as a single
# file, because it can't hash them. Thus, it doesn't support editable
# requirements, because pip itself doesn't support editable
# requirements except for "local projects or a VCS url". Nor does it
# support VCS requirements yet, because we haven't yet come up with a
# portable, deterministic way to hash them. In summary, all we support
# is == requirements and tarballs/zips/etc.
# TODO: Stop on reqs that are editable or aren't ==.
# If the requirement isn't already specified as a URL, get a URL
# from an index:
link = self._link() or self._finder.find_requirement(self._req, upgrade=False)
if link:
lower_scheme = link.scheme.lower() # pip lower()s it for some reason.
if lower_scheme == 'http' or lower_scheme == 'https':
file_path = self._download(link)
return basename(file_path)
elif lower_scheme == 'file':
# The following is inspired by pip's unpack_file_url():
link_path = url_to_path(link.url_without_fragment)
if isdir(link_path):
raise UnsupportedRequirementError(
"%s: %s is a directory. So that it can compute "
"a hash, peep supports only filesystem paths which "
"point to files" %
(self._req, link.url_without_fragment))
else:
copy(link_path, self._temp_path)
return basename(link_path)
else:
raise UnsupportedRequirementError(
"%s: The download link, %s, would not result in a file "
"that can be hashed. Peep supports only == requirements, "
"file:// URLs pointing to files (not folders), and "
"http:// and https:// URLs pointing to tarballs, zips, "
"etc." % (self._req, link.url))
else:
raise UnsupportedRequirementError(
"%s: couldn't determine where to download this requirement from."
% (self._req,))
def install(self):
"""Install the package I represent, without dependencies.
Obey typical pip-install options passed in on the command line.
"""
other_args = list(requirement_args(self._argv, want_other=True))
archive_path = join(self._temp_path, self._downloaded_filename())
# -U so it installs whether pip deems the requirement "satisfied" or
# not. This is necessary for GitHub-sourced zips, which change without
# their version numbers changing.
run_pip(['install'] + other_args + ['--no-deps', '-U', archive_path])
@memoize
def _actual_hash(self):
"""Download the package's archive if necessary, and return its hash."""
return hash_of_file(join(self._temp_path, self._downloaded_filename()))
def _project_name(self):
"""Return the inner Requirement's "unsafe name".
Raise ValueError if there is no name.
"""
name = getattr(self._req.req, 'project_name', '')
if name:
return name
name = getattr(self._req.req, 'name', '')
if name:
return safe_name(name)
raise ValueError('Requirement has no project_name.')
def _name(self):
return self._req.name
def _link(self):
try:
return self._req.link
except AttributeError:
# The link attribute isn't available prior to pip 6.1.0, so fall
# back to the now deprecated 'url' attribute.
return Link(self._req.url) if self._req.url else None
def _url(self):
link = self._link()
return link.url if link else None
@memoize # Avoid re-running expensive check_if_exists().
def _is_satisfied(self):
self._req.check_if_exists()
return (self._req.satisfied_by and
not self._is_always_unsatisfied())
def _class(self):
"""Return the class I should be, spanning a continuum of goodness."""
try:
self._project_name()
except ValueError:
return MalformedReq
if self._is_satisfied():
return SatisfiedReq
if not self._expected_hashes():
return MissingReq
if self._actual_hash() not in self._expected_hashes():
return MismatchedReq
return InstallableReq
@classmethod
def foot(cls):
"""Return the text to be printed once, after all of the errors from
classes of my type are printed.
"""
return ''
class MalformedReq(DownloadedReq):
"""A requirement whose package name could not be determined"""
@classmethod
def head(cls):
return 'The following requirements could not be processed:\n'
def error(self):
return '* Unable to determine package name from URL %s; add #egg=' % self._url()
class MissingReq(DownloadedReq):
"""A requirement for which no hashes were specified in the requirements file"""
@classmethod
def head(cls):
return ('The following packages had no hashes specified in the requirements file, which\n'
'leaves them open to tampering. Vet these packages to your satisfaction, then\n'
'add these "sha256" lines like so:\n\n')
def error(self):
if self._url():
# _url() always contains an #egg= part, or this would be a
# MalformedRequest.
line = self._url()
else:
line = '%s==%s' % (self._name(), self._version())
return '# sha256: %s\n%s\n' % (self._actual_hash(), line)
class MismatchedReq(DownloadedReq):
"""A requirement for which the downloaded file didn't match any of my hashes."""
@classmethod
def head(cls):
return ("THE FOLLOWING PACKAGES DIDN'T MATCH THE HASHES SPECIFIED IN THE REQUIREMENTS\n"
"FILE. If you have updated the package versions, update the hashes. If not,\n"
"freak out, because someone has tampered with the packages.\n\n")
def error(self):
preamble = ' %s: expected' % self._project_name()
if len(self._expected_hashes()) > 1:
preamble += ' one of'
padding = '\n' + ' ' * (len(preamble) + 1)
return '%s %s\n%s got %s' % (preamble,
padding.join(self._expected_hashes()),
' ' * (len(preamble) - 4),
self._actual_hash())
@classmethod
def foot(cls):
return '\n'
class SatisfiedReq(DownloadedReq):
"""A requirement which turned out to be already installed"""
@classmethod
def head(cls):
return ("These packages were already installed, so we didn't need to download or build\n"
"them again. If you installed them with peep in the first place, you should be\n"
"safe. If not, uninstall them, then re-attempt your install with peep.\n")
def error(self):
return ' %s' % (self._req,)
class InstallableReq(DownloadedReq):
"""A requirement whose hash matched and can be safely installed"""
# DownloadedReq subclasses that indicate an error that should keep us from
# going forward with installation, in the order in which their errors should
# be reported:
ERROR_CLASSES = [MismatchedReq, MissingReq, MalformedReq]
def bucket(things, key):
"""Return a map of key -> list of things."""
ret = defaultdict(list)
for thing in things:
ret[key(thing)].append(thing)
return ret
def first_every_last(iterable, first, every, last):
"""Execute something before the first item of iter, something else for each
item, and a third thing after the last.
If there are no items in the iterable, don't execute anything.
"""
did_first = False
for item in iterable:
if not did_first:
did_first = True
first(item)
every(item)
if did_first:
last(item)
def _parse_requirements(path, finder):
try:
# list() so the generator that is parse_requirements() actually runs
# far enough to report a TypeError
return list(parse_requirements(
path, options=EmptyOptions(), finder=finder))
except TypeError:
# session is a required kwarg as of pip 6.0 and will raise
# a TypeError if missing. It needs to be a PipSession instance,
# but in older versions we can't import it from pip.download
# (nor do we need it at all) so we only import it in this except block
from pip.download import PipSession
return list(parse_requirements(
path, options=EmptyOptions(), session=PipSession(), finder=finder))
def downloaded_reqs_from_path(path, argv):
"""Return a list of DownloadedReqs representing the requirements parsed
out of a given requirements file.
:arg path: The path to the requirements file
:arg argv: The commandline args, starting after the subcommand
"""
finder = package_finder(argv)
return [DownloadedReq(req, argv, finder) for req in
_parse_requirements(path, finder)]
def peep_port(paths):
"""Convert a peep requirements file to one compatble with pip-8 hashing.
Loses comments and tromps on URLs, so the result will need a little manual
massaging, but the hard part--the hash conversion--is done for you.
"""
if not paths:
print('Please specify one or more requirements files so I have '
'something to port.\n')
return COMMAND_LINE_ERROR
comes_from = None
for req in chain.from_iterable(
_parse_requirements(path, package_finder(argv)) for path in paths):
req_path, req_line = path_and_line(req)
hashes = [hexlify(urlsafe_b64decode((hash + '=').encode('ascii'))).decode('ascii')
for hash in hashes_above(req_path, req_line)]
if req_path != comes_from:
print()
print('# from %s' % req_path)
print()
comes_from = req_path
if not hashes:
print(req.req)
else:
print('%s' % (req.link if getattr(req, 'link', None) else req.req), end='')
for hash in hashes:
print(' \\')
print(' --hash=sha256:%s' % hash, end='')
print()
def main():
"""Be the top-level entrypoint. Return a shell status code."""
commands = {'hash': peep_hash,
'install': peep_install,
'port': peep_port}
try:
if len(argv) >= 2 and argv[1] in commands:
return commands[argv[1]](argv[2:])
else:
# Fall through to top-level pip main() for everything else:
return pip.main()
except PipException as exc:
return exc.error_code
def exception_handler(exc_type, exc_value, exc_tb):
print('Oh no! Peep had a problem while trying to do stuff. Please write up a bug report')
print('with the specifics so we can fix it:')
print()
print('https://github.com/erikrose/peep/issues/new')
print()
print('Here are some particulars you can copy and paste into the bug report:')
print()
print('---')
print('peep:', repr(__version__))
print('python:', repr(sys.version))
print('pip:', repr(getattr(pip, '__version__', 'no __version__ attr')))
print('Command line: ', repr(sys.argv))
print(
''.join(traceback.format_exception(exc_type, exc_value, exc_tb)))
print('---')
if __name__ == '__main__':
try:
exit(main())
except Exception:
exception_handler(*sys.exc_info())
exit(UNHANDLED_EXCEPTION)
|
erikrose/peep | peep.py | peep_port | python | def peep_port(paths):
if not paths:
print('Please specify one or more requirements files so I have '
'something to port.\n')
return COMMAND_LINE_ERROR
comes_from = None
for req in chain.from_iterable(
_parse_requirements(path, package_finder(argv)) for path in paths):
req_path, req_line = path_and_line(req)
hashes = [hexlify(urlsafe_b64decode((hash + '=').encode('ascii'))).decode('ascii')
for hash in hashes_above(req_path, req_line)]
if req_path != comes_from:
print()
print('# from %s' % req_path)
print()
comes_from = req_path
if not hashes:
print(req.req)
else:
print('%s' % (req.link if getattr(req, 'link', None) else req.req), end='')
for hash in hashes:
print(' \\')
print(' --hash=sha256:%s' % hash, end='')
print() | Convert a peep requirements file to one compatble with pip-8 hashing.
Loses comments and tromps on URLs, so the result will need a little manual
massaging, but the hard part--the hash conversion--is done for you. | train | https://github.com/erikrose/peep/blob/c16f08c7f61e2f2afecb7cd1c93752bdd96c4968/peep.py#L901-L932 | [
"def path_and_line(req):\n \"\"\"Return the path and line number of the file from which an\n InstallRequirement came.\n\n \"\"\"\n path, line = (re.match(r'-r (.*) \\(line (\\d+)\\)$',\n req.comes_from).groups())\n return path, int(line)\n",
"def hashes_above(path, line_number):\n \"\"\"Yield hashes from contiguous comment lines before line ``line_number``.\n\n \"\"\"\n def hash_lists(path):\n \"\"\"Yield lists of hashes appearing between non-comment lines.\n\n The lists will be in order of appearance and, for each non-empty\n list, their place in the results will coincide with that of the\n line number of the corresponding result from `parse_requirements`\n (which changed in pip 7.0 to not count comments).\n\n \"\"\"\n hashes = []\n with open(path) as file:\n for lineno, line in enumerate(file, 1):\n match = HASH_COMMENT_RE.match(line)\n if match: # Accumulate this hash.\n hashes.append(match.groupdict()['hash'])\n if not IGNORED_LINE_RE.match(line):\n yield hashes # Report hashes seen so far.\n hashes = []\n elif PIP_COUNTS_COMMENTS:\n # Comment: count as normal req but have no hashes.\n yield []\n\n return next(islice(hash_lists(path), line_number - 1, None))\n"
] | #!/usr/bin/env python
"""peep ("prudently examine every package") verifies that packages conform to a
trusted, locally stored hash and only then installs them::
peep install -r requirements.txt
This makes your deployments verifiably repeatable without having to maintain a
local PyPI mirror or use a vendor lib. Just update the version numbers and
hashes in requirements.txt, and you're all set.
"""
# This is here so embedded copies of peep.py are MIT-compliant:
# Copyright (c) 2013 Erik Rose
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
from __future__ import print_function
try:
xrange = xrange
except NameError:
xrange = range
from base64 import urlsafe_b64encode, urlsafe_b64decode
from binascii import hexlify
import cgi
from collections import defaultdict
from functools import wraps
from hashlib import sha256
from itertools import chain, islice
import mimetypes
from optparse import OptionParser
from os.path import join, basename, splitext, isdir
from pickle import dumps, loads
import re
import sys
from shutil import rmtree, copy
from sys import argv, exit
from tempfile import mkdtemp
import traceback
try:
from urllib2 import build_opener, HTTPHandler, HTTPSHandler, HTTPError
except ImportError:
from urllib.request import build_opener, HTTPHandler, HTTPSHandler
from urllib.error import HTTPError
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse # 3.4
# TODO: Probably use six to make urllib stuff work across 2/3.
from pkg_resources import require, VersionConflict, DistributionNotFound, safe_name
# We don't admit our dependency on pip in setup.py, lest a naive user simply
# say `pip install peep.tar.gz` and thus pull down an untrusted copy of pip
# from PyPI. Instead, we make sure it's installed and new enough here and spit
# out an error message if not:
def activate(specifier):
"""Make a compatible version of pip importable. Raise a RuntimeError if we
couldn't."""
try:
for distro in require(specifier):
distro.activate()
except (VersionConflict, DistributionNotFound):
raise RuntimeError('The installed version of pip is too old; peep '
'requires ' + specifier)
# Before 0.6.2, the log module wasn't there, so some
# of our monkeypatching fails. It probably wouldn't be
# much work to support even earlier, though.
activate('pip>=0.6.2')
import pip
from pip.commands.install import InstallCommand
try:
from pip.download import url_to_path # 1.5.6
except ImportError:
try:
from pip.util import url_to_path # 0.7.0
except ImportError:
from pip.util import url_to_filename as url_to_path # 0.6.2
from pip.exceptions import InstallationError
from pip.index import PackageFinder, Link
try:
from pip.log import logger
except ImportError:
from pip import logger # 6.0
from pip.req import parse_requirements
try:
from pip.utils.ui import DownloadProgressBar, DownloadProgressSpinner
except ImportError:
class NullProgressBar(object):
def __init__(self, *args, **kwargs):
pass
def iter(self, ret, *args, **kwargs):
return ret
DownloadProgressBar = DownloadProgressSpinner = NullProgressBar
__version__ = 3, 1, 2
try:
from pip.index import FormatControl # noqa
FORMAT_CONTROL_ARG = 'format_control'
# The line-numbering bug will be fixed in pip 8. All 7.x releases had it.
PIP_MAJOR_VERSION = int(pip.__version__.split('.')[0])
PIP_COUNTS_COMMENTS = PIP_MAJOR_VERSION >= 8
except ImportError:
FORMAT_CONTROL_ARG = 'use_wheel' # pre-7
PIP_COUNTS_COMMENTS = True
ITS_FINE_ITS_FINE = 0
SOMETHING_WENT_WRONG = 1
# "Traditional" for command-line errors according to optparse docs:
COMMAND_LINE_ERROR = 2
UNHANDLED_EXCEPTION = 3
ARCHIVE_EXTENSIONS = ('.tar.bz2', '.tar.gz', '.tgz', '.tar', '.zip')
MARKER = object()
class PipException(Exception):
"""When I delegated to pip, it exited with an error."""
def __init__(self, error_code):
self.error_code = error_code
class UnsupportedRequirementError(Exception):
"""An unsupported line was encountered in a requirements file."""
class DownloadError(Exception):
def __init__(self, link, exc):
self.link = link
self.reason = str(exc)
def __str__(self):
return 'Downloading %s failed: %s' % (self.link, self.reason)
def encoded_hash(sha):
"""Return a short, 7-bit-safe representation of a hash.
If you pass a sha256, this results in the hash algorithm that the Wheel
format (PEP 427) uses, except here it's intended to be run across the
downloaded archive before unpacking.
"""
return urlsafe_b64encode(sha.digest()).decode('ascii').rstrip('=')
def path_and_line(req):
"""Return the path and line number of the file from which an
InstallRequirement came.
"""
path, line = (re.match(r'-r (.*) \(line (\d+)\)$',
req.comes_from).groups())
return path, int(line)
def hashes_above(path, line_number):
"""Yield hashes from contiguous comment lines before line ``line_number``.
"""
def hash_lists(path):
"""Yield lists of hashes appearing between non-comment lines.
The lists will be in order of appearance and, for each non-empty
list, their place in the results will coincide with that of the
line number of the corresponding result from `parse_requirements`
(which changed in pip 7.0 to not count comments).
"""
hashes = []
with open(path) as file:
for lineno, line in enumerate(file, 1):
match = HASH_COMMENT_RE.match(line)
if match: # Accumulate this hash.
hashes.append(match.groupdict()['hash'])
if not IGNORED_LINE_RE.match(line):
yield hashes # Report hashes seen so far.
hashes = []
elif PIP_COUNTS_COMMENTS:
# Comment: count as normal req but have no hashes.
yield []
return next(islice(hash_lists(path), line_number - 1, None))
def run_pip(initial_args):
"""Delegate to pip the given args (starting with the subcommand), and raise
``PipException`` if something goes wrong."""
status_code = pip.main(initial_args)
# Clear out the registrations in the pip "logger" singleton. Otherwise,
# loggers keep getting appended to it with every run. Pip assumes only one
# command invocation will happen per interpreter lifetime.
logger.consumers = []
if status_code:
raise PipException(status_code)
def hash_of_file(path):
"""Return the hash of a downloaded file."""
with open(path, 'rb') as archive:
sha = sha256()
while True:
data = archive.read(2 ** 20)
if not data:
break
sha.update(data)
return encoded_hash(sha)
def is_git_sha(text):
"""Return whether this is probably a git sha"""
# Handle both the full sha as well as the 7-character abbreviation
if len(text) in (40, 7):
try:
int(text, 16)
return True
except ValueError:
pass
return False
def filename_from_url(url):
parsed = urlparse(url)
path = parsed.path
return path.split('/')[-1]
def requirement_args(argv, want_paths=False, want_other=False):
"""Return an iterable of filtered arguments.
:arg argv: Arguments, starting after the subcommand
:arg want_paths: If True, the returned iterable includes the paths to any
requirements files following a ``-r`` or ``--requirement`` option.
:arg want_other: If True, the returned iterable includes the args that are
not a requirement-file path or a ``-r`` or ``--requirement`` flag.
"""
was_r = False
for arg in argv:
# Allow for requirements files named "-r", don't freak out if there's a
# trailing "-r", etc.
if was_r:
if want_paths:
yield arg
was_r = False
elif arg in ['-r', '--requirement']:
was_r = True
else:
if want_other:
yield arg
# any line that is a comment or just whitespace
IGNORED_LINE_RE = re.compile(r'^(\s*#.*)?\s*$')
HASH_COMMENT_RE = re.compile(
r"""
\s*\#\s+ # Lines that start with a '#'
(?P<hash_type>sha256):\s+ # Hash type is hardcoded to be sha256 for now.
(?P<hash>[^\s]+) # Hashes can be anything except '#' or spaces.
\s* # Suck up whitespace before the comment or
# just trailing whitespace if there is no
# comment. Also strip trailing newlines.
(?:\#(?P<comment>.*))? # Comments can be anything after a whitespace+#
# and are optional.
$""", re.X)
def peep_hash(argv):
"""Return the peep hash of one or more files, returning a shell status code
or raising a PipException.
:arg argv: The commandline args, starting after the subcommand
"""
parser = OptionParser(
usage='usage: %prog hash file [file ...]',
description='Print a peep hash line for one or more files: for '
'example, "# sha256: '
'oz42dZy6Gowxw8AelDtO4gRgTW_xPdooH484k7I5EOY".')
_, paths = parser.parse_args(args=argv)
if paths:
for path in paths:
print('# sha256:', hash_of_file(path))
return ITS_FINE_ITS_FINE
else:
parser.print_usage()
return COMMAND_LINE_ERROR
class EmptyOptions(object):
"""Fake optparse options for compatibility with pip<1.2
pip<1.2 had a bug in parse_requirements() in which the ``options`` kwarg
was required. We work around that by passing it a mock object.
"""
default_vcs = None
skip_requirements_regex = None
isolated_mode = False
def memoize(func):
"""Memoize a method that should return the same result every time on a
given instance.
"""
@wraps(func)
def memoizer(self):
if not hasattr(self, '_cache'):
self._cache = {}
if func.__name__ not in self._cache:
self._cache[func.__name__] = func(self)
return self._cache[func.__name__]
return memoizer
def package_finder(argv):
"""Return a PackageFinder respecting command-line options.
:arg argv: Everything after the subcommand
"""
# We instantiate an InstallCommand and then use some of its private
# machinery--its arg parser--for our own purposes, like a virus. This
# approach is portable across many pip versions, where more fine-grained
# ones are not. Ignoring options that don't exist on the parser (for
# instance, --use-wheel) gives us a straightforward method of backward
# compatibility.
try:
command = InstallCommand()
except TypeError:
# This is likely pip 1.3.0's "__init__() takes exactly 2 arguments (1
# given)" error. In that version, InstallCommand takes a top=level
# parser passed in from outside.
from pip.baseparser import create_main_parser
command = InstallCommand(create_main_parser())
# The downside is that it essentially ruins the InstallCommand class for
# further use. Calling out to pip.main() within the same interpreter, for
# example, would result in arguments parsed this time turning up there.
# Thus, we deepcopy the arg parser so we don't trash its singletons. Of
# course, deepcopy doesn't work on these objects, because they contain
# uncopyable regex patterns, so we pickle and unpickle instead. Fun!
options, _ = loads(dumps(command.parser)).parse_args(argv)
# Carry over PackageFinder kwargs that have [about] the same names as
# options attr names:
possible_options = [
'find_links',
FORMAT_CONTROL_ARG,
('allow_all_prereleases', 'pre'),
'process_dependency_links'
]
kwargs = {}
for option in possible_options:
kw, attr = option if isinstance(option, tuple) else (option, option)
value = getattr(options, attr, MARKER)
if value is not MARKER:
kwargs[kw] = value
# Figure out index_urls:
index_urls = [options.index_url] + options.extra_index_urls
if options.no_index:
index_urls = []
index_urls += getattr(options, 'mirrors', [])
# If pip is new enough to have a PipSession, initialize one, since
# PackageFinder requires it:
if hasattr(command, '_build_session'):
kwargs['session'] = command._build_session(options)
return PackageFinder(index_urls=index_urls, **kwargs)
class DownloadedReq(object):
"""A wrapper around InstallRequirement which offers additional information
based on downloading and examining a corresponding package archive
These are conceptually immutable, so we can get away with memoizing
expensive things.
"""
def __init__(self, req, argv, finder):
"""Download a requirement, compare its hashes, and return a subclass
of DownloadedReq depending on its state.
:arg req: The InstallRequirement I am based on
:arg argv: The args, starting after the subcommand
"""
self._req = req
self._argv = argv
self._finder = finder
# We use a separate temp dir for each requirement so requirements
# (from different indices) that happen to have the same archive names
# don't overwrite each other, leading to a security hole in which the
# latter is a hash mismatch, the former has already passed the
# comparison, and the latter gets installed.
self._temp_path = mkdtemp(prefix='peep-')
# Think of DownloadedReq as a one-shot state machine. It's an abstract
# class that ratchets forward to being one of its own subclasses,
# depending on its package status. Then it doesn't move again.
self.__class__ = self._class()
def dispose(self):
"""Delete temp files and dirs I've made. Render myself useless.
Do not call further methods on me after calling dispose().
"""
rmtree(self._temp_path)
def _version(self):
"""Deduce the version number of the downloaded package from its filename."""
# TODO: Can we delete this method and just print the line from the
# reqs file verbatim instead?
def version_of_archive(filename, package_name):
# Since we know the project_name, we can strip that off the left, strip
# any archive extensions off the right, and take the rest as the
# version.
for ext in ARCHIVE_EXTENSIONS:
if filename.endswith(ext):
filename = filename[:-len(ext)]
break
# Handle github sha tarball downloads.
if is_git_sha(filename):
filename = package_name + '-' + filename
if not filename.lower().replace('_', '-').startswith(package_name.lower()):
# TODO: Should we replace runs of [^a-zA-Z0-9.], not just _, with -?
give_up(filename, package_name)
return filename[len(package_name) + 1:] # Strip off '-' before version.
def version_of_wheel(filename, package_name):
# For Wheel files (http://legacy.python.org/dev/peps/pep-0427/#file-
# name-convention) we know the format bits are '-' separated.
whl_package_name, version, _rest = filename.split('-', 2)
# Do the alteration to package_name from PEP 427:
our_package_name = re.sub(r'[^\w\d.]+', '_', package_name, re.UNICODE)
if whl_package_name != our_package_name:
give_up(filename, whl_package_name)
return version
def give_up(filename, package_name):
raise RuntimeError("The archive '%s' didn't start with the package name "
"'%s', so I couldn't figure out the version number. "
"My bad; improve me." %
(filename, package_name))
get_version = (version_of_wheel
if self._downloaded_filename().endswith('.whl')
else version_of_archive)
return get_version(self._downloaded_filename(), self._project_name())
def _is_always_unsatisfied(self):
"""Returns whether this requirement is always unsatisfied
This would happen in cases where we can't determine the version
from the filename.
"""
# If this is a github sha tarball, then it is always unsatisfied
# because the url has a commit sha in it and not the version
# number.
url = self._url()
if url:
filename = filename_from_url(url)
if filename.endswith(ARCHIVE_EXTENSIONS):
filename, ext = splitext(filename)
if is_git_sha(filename):
return True
return False
@memoize # Avoid hitting the file[cache] over and over.
def _expected_hashes(self):
"""Return a list of known-good hashes for this package."""
return hashes_above(*path_and_line(self._req))
def _download(self, link):
"""Download a file, and return its name within my temp dir.
This does no verification of HTTPS certs, but our checking hashes
makes that largely unimportant. It would be nice to be able to use the
requests lib, which can verify certs, but it is guaranteed to be
available only in pip >= 1.5.
This also drops support for proxies and basic auth, though those could
be added back in.
"""
# Based on pip 1.4.1's URLOpener but with cert verification removed
def opener(is_https):
if is_https:
opener = build_opener(HTTPSHandler())
# Strip out HTTPHandler to prevent MITM spoof:
for handler in opener.handlers:
if isinstance(handler, HTTPHandler):
opener.handlers.remove(handler)
else:
opener = build_opener()
return opener
# Descended from unpack_http_url() in pip 1.4.1
def best_filename(link, response):
"""Return the most informative possible filename for a download,
ideally with a proper extension.
"""
content_type = response.info().get('content-type', '')
filename = link.filename # fallback
# Have a look at the Content-Disposition header for a better guess:
content_disposition = response.info().get('content-disposition')
if content_disposition:
type, params = cgi.parse_header(content_disposition)
# We use ``or`` here because we don't want to use an "empty" value
# from the filename param:
filename = params.get('filename') or filename
ext = splitext(filename)[1]
if not ext:
ext = mimetypes.guess_extension(content_type)
if ext:
filename += ext
if not ext and link.url != response.geturl():
ext = splitext(response.geturl())[1]
if ext:
filename += ext
return filename
# Descended from _download_url() in pip 1.4.1
def pipe_to_file(response, path, size=0):
"""Pull the data off an HTTP response, shove it in a new file, and
show progress.
:arg response: A file-like object to read from
:arg path: The path of the new file
:arg size: The expected size, in bytes, of the download. 0 for
unknown or to suppress progress indication (as for cached
downloads)
"""
def response_chunks(chunk_size):
while True:
chunk = response.read(chunk_size)
if not chunk:
break
yield chunk
print('Downloading %s%s...' % (
self._req.req,
(' (%sK)' % (size / 1000)) if size > 1000 else ''))
progress_indicator = (DownloadProgressBar(max=size).iter if size
else DownloadProgressSpinner().iter)
with open(path, 'wb') as file:
for chunk in progress_indicator(response_chunks(4096), 4096):
file.write(chunk)
url = link.url.split('#', 1)[0]
try:
response = opener(urlparse(url).scheme != 'http').open(url)
except (HTTPError, IOError) as exc:
raise DownloadError(link, exc)
filename = best_filename(link, response)
try:
size = int(response.headers['content-length'])
except (ValueError, KeyError, TypeError):
size = 0
pipe_to_file(response, join(self._temp_path, filename), size=size)
return filename
# Based on req_set.prepare_files() in pip bb2a8428d4aebc8d313d05d590f386fa3f0bbd0f
@memoize # Avoid re-downloading.
def _downloaded_filename(self):
"""Download the package's archive if necessary, and return its
filename.
--no-deps is implied, as we have reimplemented the bits that would
ordinarily do dependency resolution.
"""
# Peep doesn't support requirements that don't come down as a single
# file, because it can't hash them. Thus, it doesn't support editable
# requirements, because pip itself doesn't support editable
# requirements except for "local projects or a VCS url". Nor does it
# support VCS requirements yet, because we haven't yet come up with a
# portable, deterministic way to hash them. In summary, all we support
# is == requirements and tarballs/zips/etc.
# TODO: Stop on reqs that are editable or aren't ==.
# If the requirement isn't already specified as a URL, get a URL
# from an index:
link = self._link() or self._finder.find_requirement(self._req, upgrade=False)
if link:
lower_scheme = link.scheme.lower() # pip lower()s it for some reason.
if lower_scheme == 'http' or lower_scheme == 'https':
file_path = self._download(link)
return basename(file_path)
elif lower_scheme == 'file':
# The following is inspired by pip's unpack_file_url():
link_path = url_to_path(link.url_without_fragment)
if isdir(link_path):
raise UnsupportedRequirementError(
"%s: %s is a directory. So that it can compute "
"a hash, peep supports only filesystem paths which "
"point to files" %
(self._req, link.url_without_fragment))
else:
copy(link_path, self._temp_path)
return basename(link_path)
else:
raise UnsupportedRequirementError(
"%s: The download link, %s, would not result in a file "
"that can be hashed. Peep supports only == requirements, "
"file:// URLs pointing to files (not folders), and "
"http:// and https:// URLs pointing to tarballs, zips, "
"etc." % (self._req, link.url))
else:
raise UnsupportedRequirementError(
"%s: couldn't determine where to download this requirement from."
% (self._req,))
def install(self):
"""Install the package I represent, without dependencies.
Obey typical pip-install options passed in on the command line.
"""
other_args = list(requirement_args(self._argv, want_other=True))
archive_path = join(self._temp_path, self._downloaded_filename())
# -U so it installs whether pip deems the requirement "satisfied" or
# not. This is necessary for GitHub-sourced zips, which change without
# their version numbers changing.
run_pip(['install'] + other_args + ['--no-deps', '-U', archive_path])
@memoize
def _actual_hash(self):
"""Download the package's archive if necessary, and return its hash."""
return hash_of_file(join(self._temp_path, self._downloaded_filename()))
def _project_name(self):
"""Return the inner Requirement's "unsafe name".
Raise ValueError if there is no name.
"""
name = getattr(self._req.req, 'project_name', '')
if name:
return name
name = getattr(self._req.req, 'name', '')
if name:
return safe_name(name)
raise ValueError('Requirement has no project_name.')
def _name(self):
return self._req.name
def _link(self):
try:
return self._req.link
except AttributeError:
# The link attribute isn't available prior to pip 6.1.0, so fall
# back to the now deprecated 'url' attribute.
return Link(self._req.url) if self._req.url else None
def _url(self):
link = self._link()
return link.url if link else None
@memoize # Avoid re-running expensive check_if_exists().
def _is_satisfied(self):
self._req.check_if_exists()
return (self._req.satisfied_by and
not self._is_always_unsatisfied())
def _class(self):
"""Return the class I should be, spanning a continuum of goodness."""
try:
self._project_name()
except ValueError:
return MalformedReq
if self._is_satisfied():
return SatisfiedReq
if not self._expected_hashes():
return MissingReq
if self._actual_hash() not in self._expected_hashes():
return MismatchedReq
return InstallableReq
@classmethod
def foot(cls):
"""Return the text to be printed once, after all of the errors from
classes of my type are printed.
"""
return ''
class MalformedReq(DownloadedReq):
"""A requirement whose package name could not be determined"""
@classmethod
def head(cls):
return 'The following requirements could not be processed:\n'
def error(self):
return '* Unable to determine package name from URL %s; add #egg=' % self._url()
class MissingReq(DownloadedReq):
"""A requirement for which no hashes were specified in the requirements file"""
@classmethod
def head(cls):
return ('The following packages had no hashes specified in the requirements file, which\n'
'leaves them open to tampering. Vet these packages to your satisfaction, then\n'
'add these "sha256" lines like so:\n\n')
def error(self):
if self._url():
# _url() always contains an #egg= part, or this would be a
# MalformedRequest.
line = self._url()
else:
line = '%s==%s' % (self._name(), self._version())
return '# sha256: %s\n%s\n' % (self._actual_hash(), line)
class MismatchedReq(DownloadedReq):
"""A requirement for which the downloaded file didn't match any of my hashes."""
@classmethod
def head(cls):
return ("THE FOLLOWING PACKAGES DIDN'T MATCH THE HASHES SPECIFIED IN THE REQUIREMENTS\n"
"FILE. If you have updated the package versions, update the hashes. If not,\n"
"freak out, because someone has tampered with the packages.\n\n")
def error(self):
preamble = ' %s: expected' % self._project_name()
if len(self._expected_hashes()) > 1:
preamble += ' one of'
padding = '\n' + ' ' * (len(preamble) + 1)
return '%s %s\n%s got %s' % (preamble,
padding.join(self._expected_hashes()),
' ' * (len(preamble) - 4),
self._actual_hash())
@classmethod
def foot(cls):
return '\n'
class SatisfiedReq(DownloadedReq):
"""A requirement which turned out to be already installed"""
@classmethod
def head(cls):
return ("These packages were already installed, so we didn't need to download or build\n"
"them again. If you installed them with peep in the first place, you should be\n"
"safe. If not, uninstall them, then re-attempt your install with peep.\n")
def error(self):
return ' %s' % (self._req,)
class InstallableReq(DownloadedReq):
"""A requirement whose hash matched and can be safely installed"""
# DownloadedReq subclasses that indicate an error that should keep us from
# going forward with installation, in the order in which their errors should
# be reported:
ERROR_CLASSES = [MismatchedReq, MissingReq, MalformedReq]
def bucket(things, key):
"""Return a map of key -> list of things."""
ret = defaultdict(list)
for thing in things:
ret[key(thing)].append(thing)
return ret
def first_every_last(iterable, first, every, last):
"""Execute something before the first item of iter, something else for each
item, and a third thing after the last.
If there are no items in the iterable, don't execute anything.
"""
did_first = False
for item in iterable:
if not did_first:
did_first = True
first(item)
every(item)
if did_first:
last(item)
def _parse_requirements(path, finder):
try:
# list() so the generator that is parse_requirements() actually runs
# far enough to report a TypeError
return list(parse_requirements(
path, options=EmptyOptions(), finder=finder))
except TypeError:
# session is a required kwarg as of pip 6.0 and will raise
# a TypeError if missing. It needs to be a PipSession instance,
# but in older versions we can't import it from pip.download
# (nor do we need it at all) so we only import it in this except block
from pip.download import PipSession
return list(parse_requirements(
path, options=EmptyOptions(), session=PipSession(), finder=finder))
def downloaded_reqs_from_path(path, argv):
"""Return a list of DownloadedReqs representing the requirements parsed
out of a given requirements file.
:arg path: The path to the requirements file
:arg argv: The commandline args, starting after the subcommand
"""
finder = package_finder(argv)
return [DownloadedReq(req, argv, finder) for req in
_parse_requirements(path, finder)]
def peep_install(argv):
"""Perform the ``peep install`` subcommand, returning a shell status code
or raising a PipException.
:arg argv: The commandline args, starting after the subcommand
"""
output = []
out = output.append
reqs = []
try:
req_paths = list(requirement_args(argv, want_paths=True))
if not req_paths:
out("You have to specify one or more requirements files with the -r option, because\n"
"otherwise there's nowhere for peep to look up the hashes.\n")
return COMMAND_LINE_ERROR
# We're a "peep install" command, and we have some requirement paths.
reqs = list(chain.from_iterable(
downloaded_reqs_from_path(path, argv)
for path in req_paths))
buckets = bucket(reqs, lambda r: r.__class__)
# Skip a line after pip's "Cleaning up..." so the important stuff
# stands out:
if any(buckets[b] for b in ERROR_CLASSES):
out('\n')
printers = (lambda r: out(r.head()),
lambda r: out(r.error() + '\n'),
lambda r: out(r.foot()))
for c in ERROR_CLASSES:
first_every_last(buckets[c], *printers)
if any(buckets[b] for b in ERROR_CLASSES):
out('-------------------------------\n'
'Not proceeding to installation.\n')
return SOMETHING_WENT_WRONG
else:
for req in buckets[InstallableReq]:
req.install()
first_every_last(buckets[SatisfiedReq], *printers)
return ITS_FINE_ITS_FINE
except (UnsupportedRequirementError, InstallationError, DownloadError) as exc:
out(str(exc))
return SOMETHING_WENT_WRONG
finally:
for req in reqs:
req.dispose()
print(''.join(output))
def main():
"""Be the top-level entrypoint. Return a shell status code."""
commands = {'hash': peep_hash,
'install': peep_install,
'port': peep_port}
try:
if len(argv) >= 2 and argv[1] in commands:
return commands[argv[1]](argv[2:])
else:
# Fall through to top-level pip main() for everything else:
return pip.main()
except PipException as exc:
return exc.error_code
def exception_handler(exc_type, exc_value, exc_tb):
print('Oh no! Peep had a problem while trying to do stuff. Please write up a bug report')
print('with the specifics so we can fix it:')
print()
print('https://github.com/erikrose/peep/issues/new')
print()
print('Here are some particulars you can copy and paste into the bug report:')
print()
print('---')
print('peep:', repr(__version__))
print('python:', repr(sys.version))
print('pip:', repr(getattr(pip, '__version__', 'no __version__ attr')))
print('Command line: ', repr(sys.argv))
print(
''.join(traceback.format_exception(exc_type, exc_value, exc_tb)))
print('---')
if __name__ == '__main__':
try:
exit(main())
except Exception:
exception_handler(*sys.exc_info())
exit(UNHANDLED_EXCEPTION)
|
erikrose/peep | peep.py | main | python | def main():
commands = {'hash': peep_hash,
'install': peep_install,
'port': peep_port}
try:
if len(argv) >= 2 and argv[1] in commands:
return commands[argv[1]](argv[2:])
else:
# Fall through to top-level pip main() for everything else:
return pip.main()
except PipException as exc:
return exc.error_code | Be the top-level entrypoint. Return a shell status code. | train | https://github.com/erikrose/peep/blob/c16f08c7f61e2f2afecb7cd1c93752bdd96c4968/peep.py#L935-L947 | null | #!/usr/bin/env python
"""peep ("prudently examine every package") verifies that packages conform to a
trusted, locally stored hash and only then installs them::
peep install -r requirements.txt
This makes your deployments verifiably repeatable without having to maintain a
local PyPI mirror or use a vendor lib. Just update the version numbers and
hashes in requirements.txt, and you're all set.
"""
# This is here so embedded copies of peep.py are MIT-compliant:
# Copyright (c) 2013 Erik Rose
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
from __future__ import print_function
try:
xrange = xrange
except NameError:
xrange = range
from base64 import urlsafe_b64encode, urlsafe_b64decode
from binascii import hexlify
import cgi
from collections import defaultdict
from functools import wraps
from hashlib import sha256
from itertools import chain, islice
import mimetypes
from optparse import OptionParser
from os.path import join, basename, splitext, isdir
from pickle import dumps, loads
import re
import sys
from shutil import rmtree, copy
from sys import argv, exit
from tempfile import mkdtemp
import traceback
try:
from urllib2 import build_opener, HTTPHandler, HTTPSHandler, HTTPError
except ImportError:
from urllib.request import build_opener, HTTPHandler, HTTPSHandler
from urllib.error import HTTPError
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse # 3.4
# TODO: Probably use six to make urllib stuff work across 2/3.
from pkg_resources import require, VersionConflict, DistributionNotFound, safe_name
# We don't admit our dependency on pip in setup.py, lest a naive user simply
# say `pip install peep.tar.gz` and thus pull down an untrusted copy of pip
# from PyPI. Instead, we make sure it's installed and new enough here and spit
# out an error message if not:
def activate(specifier):
"""Make a compatible version of pip importable. Raise a RuntimeError if we
couldn't."""
try:
for distro in require(specifier):
distro.activate()
except (VersionConflict, DistributionNotFound):
raise RuntimeError('The installed version of pip is too old; peep '
'requires ' + specifier)
# Before 0.6.2, the log module wasn't there, so some
# of our monkeypatching fails. It probably wouldn't be
# much work to support even earlier, though.
activate('pip>=0.6.2')
import pip
from pip.commands.install import InstallCommand
try:
from pip.download import url_to_path # 1.5.6
except ImportError:
try:
from pip.util import url_to_path # 0.7.0
except ImportError:
from pip.util import url_to_filename as url_to_path # 0.6.2
from pip.exceptions import InstallationError
from pip.index import PackageFinder, Link
try:
from pip.log import logger
except ImportError:
from pip import logger # 6.0
from pip.req import parse_requirements
try:
from pip.utils.ui import DownloadProgressBar, DownloadProgressSpinner
except ImportError:
class NullProgressBar(object):
def __init__(self, *args, **kwargs):
pass
def iter(self, ret, *args, **kwargs):
return ret
DownloadProgressBar = DownloadProgressSpinner = NullProgressBar
__version__ = 3, 1, 2
try:
from pip.index import FormatControl # noqa
FORMAT_CONTROL_ARG = 'format_control'
# The line-numbering bug will be fixed in pip 8. All 7.x releases had it.
PIP_MAJOR_VERSION = int(pip.__version__.split('.')[0])
PIP_COUNTS_COMMENTS = PIP_MAJOR_VERSION >= 8
except ImportError:
FORMAT_CONTROL_ARG = 'use_wheel' # pre-7
PIP_COUNTS_COMMENTS = True
ITS_FINE_ITS_FINE = 0
SOMETHING_WENT_WRONG = 1
# "Traditional" for command-line errors according to optparse docs:
COMMAND_LINE_ERROR = 2
UNHANDLED_EXCEPTION = 3
ARCHIVE_EXTENSIONS = ('.tar.bz2', '.tar.gz', '.tgz', '.tar', '.zip')
MARKER = object()
class PipException(Exception):
"""When I delegated to pip, it exited with an error."""
def __init__(self, error_code):
self.error_code = error_code
class UnsupportedRequirementError(Exception):
"""An unsupported line was encountered in a requirements file."""
class DownloadError(Exception):
def __init__(self, link, exc):
self.link = link
self.reason = str(exc)
def __str__(self):
return 'Downloading %s failed: %s' % (self.link, self.reason)
def encoded_hash(sha):
"""Return a short, 7-bit-safe representation of a hash.
If you pass a sha256, this results in the hash algorithm that the Wheel
format (PEP 427) uses, except here it's intended to be run across the
downloaded archive before unpacking.
"""
return urlsafe_b64encode(sha.digest()).decode('ascii').rstrip('=')
def path_and_line(req):
"""Return the path and line number of the file from which an
InstallRequirement came.
"""
path, line = (re.match(r'-r (.*) \(line (\d+)\)$',
req.comes_from).groups())
return path, int(line)
def hashes_above(path, line_number):
"""Yield hashes from contiguous comment lines before line ``line_number``.
"""
def hash_lists(path):
"""Yield lists of hashes appearing between non-comment lines.
The lists will be in order of appearance and, for each non-empty
list, their place in the results will coincide with that of the
line number of the corresponding result from `parse_requirements`
(which changed in pip 7.0 to not count comments).
"""
hashes = []
with open(path) as file:
for lineno, line in enumerate(file, 1):
match = HASH_COMMENT_RE.match(line)
if match: # Accumulate this hash.
hashes.append(match.groupdict()['hash'])
if not IGNORED_LINE_RE.match(line):
yield hashes # Report hashes seen so far.
hashes = []
elif PIP_COUNTS_COMMENTS:
# Comment: count as normal req but have no hashes.
yield []
return next(islice(hash_lists(path), line_number - 1, None))
def run_pip(initial_args):
"""Delegate to pip the given args (starting with the subcommand), and raise
``PipException`` if something goes wrong."""
status_code = pip.main(initial_args)
# Clear out the registrations in the pip "logger" singleton. Otherwise,
# loggers keep getting appended to it with every run. Pip assumes only one
# command invocation will happen per interpreter lifetime.
logger.consumers = []
if status_code:
raise PipException(status_code)
def hash_of_file(path):
"""Return the hash of a downloaded file."""
with open(path, 'rb') as archive:
sha = sha256()
while True:
data = archive.read(2 ** 20)
if not data:
break
sha.update(data)
return encoded_hash(sha)
def is_git_sha(text):
"""Return whether this is probably a git sha"""
# Handle both the full sha as well as the 7-character abbreviation
if len(text) in (40, 7):
try:
int(text, 16)
return True
except ValueError:
pass
return False
def filename_from_url(url):
parsed = urlparse(url)
path = parsed.path
return path.split('/')[-1]
def requirement_args(argv, want_paths=False, want_other=False):
"""Return an iterable of filtered arguments.
:arg argv: Arguments, starting after the subcommand
:arg want_paths: If True, the returned iterable includes the paths to any
requirements files following a ``-r`` or ``--requirement`` option.
:arg want_other: If True, the returned iterable includes the args that are
not a requirement-file path or a ``-r`` or ``--requirement`` flag.
"""
was_r = False
for arg in argv:
# Allow for requirements files named "-r", don't freak out if there's a
# trailing "-r", etc.
if was_r:
if want_paths:
yield arg
was_r = False
elif arg in ['-r', '--requirement']:
was_r = True
else:
if want_other:
yield arg
# any line that is a comment or just whitespace
IGNORED_LINE_RE = re.compile(r'^(\s*#.*)?\s*$')
HASH_COMMENT_RE = re.compile(
r"""
\s*\#\s+ # Lines that start with a '#'
(?P<hash_type>sha256):\s+ # Hash type is hardcoded to be sha256 for now.
(?P<hash>[^\s]+) # Hashes can be anything except '#' or spaces.
\s* # Suck up whitespace before the comment or
# just trailing whitespace if there is no
# comment. Also strip trailing newlines.
(?:\#(?P<comment>.*))? # Comments can be anything after a whitespace+#
# and are optional.
$""", re.X)
def peep_hash(argv):
"""Return the peep hash of one or more files, returning a shell status code
or raising a PipException.
:arg argv: The commandline args, starting after the subcommand
"""
parser = OptionParser(
usage='usage: %prog hash file [file ...]',
description='Print a peep hash line for one or more files: for '
'example, "# sha256: '
'oz42dZy6Gowxw8AelDtO4gRgTW_xPdooH484k7I5EOY".')
_, paths = parser.parse_args(args=argv)
if paths:
for path in paths:
print('# sha256:', hash_of_file(path))
return ITS_FINE_ITS_FINE
else:
parser.print_usage()
return COMMAND_LINE_ERROR
class EmptyOptions(object):
"""Fake optparse options for compatibility with pip<1.2
pip<1.2 had a bug in parse_requirements() in which the ``options`` kwarg
was required. We work around that by passing it a mock object.
"""
default_vcs = None
skip_requirements_regex = None
isolated_mode = False
def memoize(func):
"""Memoize a method that should return the same result every time on a
given instance.
"""
@wraps(func)
def memoizer(self):
if not hasattr(self, '_cache'):
self._cache = {}
if func.__name__ not in self._cache:
self._cache[func.__name__] = func(self)
return self._cache[func.__name__]
return memoizer
def package_finder(argv):
"""Return a PackageFinder respecting command-line options.
:arg argv: Everything after the subcommand
"""
# We instantiate an InstallCommand and then use some of its private
# machinery--its arg parser--for our own purposes, like a virus. This
# approach is portable across many pip versions, where more fine-grained
# ones are not. Ignoring options that don't exist on the parser (for
# instance, --use-wheel) gives us a straightforward method of backward
# compatibility.
try:
command = InstallCommand()
except TypeError:
# This is likely pip 1.3.0's "__init__() takes exactly 2 arguments (1
# given)" error. In that version, InstallCommand takes a top=level
# parser passed in from outside.
from pip.baseparser import create_main_parser
command = InstallCommand(create_main_parser())
# The downside is that it essentially ruins the InstallCommand class for
# further use. Calling out to pip.main() within the same interpreter, for
# example, would result in arguments parsed this time turning up there.
# Thus, we deepcopy the arg parser so we don't trash its singletons. Of
# course, deepcopy doesn't work on these objects, because they contain
# uncopyable regex patterns, so we pickle and unpickle instead. Fun!
options, _ = loads(dumps(command.parser)).parse_args(argv)
# Carry over PackageFinder kwargs that have [about] the same names as
# options attr names:
possible_options = [
'find_links',
FORMAT_CONTROL_ARG,
('allow_all_prereleases', 'pre'),
'process_dependency_links'
]
kwargs = {}
for option in possible_options:
kw, attr = option if isinstance(option, tuple) else (option, option)
value = getattr(options, attr, MARKER)
if value is not MARKER:
kwargs[kw] = value
# Figure out index_urls:
index_urls = [options.index_url] + options.extra_index_urls
if options.no_index:
index_urls = []
index_urls += getattr(options, 'mirrors', [])
# If pip is new enough to have a PipSession, initialize one, since
# PackageFinder requires it:
if hasattr(command, '_build_session'):
kwargs['session'] = command._build_session(options)
return PackageFinder(index_urls=index_urls, **kwargs)
class DownloadedReq(object):
"""A wrapper around InstallRequirement which offers additional information
based on downloading and examining a corresponding package archive
These are conceptually immutable, so we can get away with memoizing
expensive things.
"""
def __init__(self, req, argv, finder):
"""Download a requirement, compare its hashes, and return a subclass
of DownloadedReq depending on its state.
:arg req: The InstallRequirement I am based on
:arg argv: The args, starting after the subcommand
"""
self._req = req
self._argv = argv
self._finder = finder
# We use a separate temp dir for each requirement so requirements
# (from different indices) that happen to have the same archive names
# don't overwrite each other, leading to a security hole in which the
# latter is a hash mismatch, the former has already passed the
# comparison, and the latter gets installed.
self._temp_path = mkdtemp(prefix='peep-')
# Think of DownloadedReq as a one-shot state machine. It's an abstract
# class that ratchets forward to being one of its own subclasses,
# depending on its package status. Then it doesn't move again.
self.__class__ = self._class()
def dispose(self):
"""Delete temp files and dirs I've made. Render myself useless.
Do not call further methods on me after calling dispose().
"""
rmtree(self._temp_path)
def _version(self):
"""Deduce the version number of the downloaded package from its filename."""
# TODO: Can we delete this method and just print the line from the
# reqs file verbatim instead?
def version_of_archive(filename, package_name):
# Since we know the project_name, we can strip that off the left, strip
# any archive extensions off the right, and take the rest as the
# version.
for ext in ARCHIVE_EXTENSIONS:
if filename.endswith(ext):
filename = filename[:-len(ext)]
break
# Handle github sha tarball downloads.
if is_git_sha(filename):
filename = package_name + '-' + filename
if not filename.lower().replace('_', '-').startswith(package_name.lower()):
# TODO: Should we replace runs of [^a-zA-Z0-9.], not just _, with -?
give_up(filename, package_name)
return filename[len(package_name) + 1:] # Strip off '-' before version.
def version_of_wheel(filename, package_name):
# For Wheel files (http://legacy.python.org/dev/peps/pep-0427/#file-
# name-convention) we know the format bits are '-' separated.
whl_package_name, version, _rest = filename.split('-', 2)
# Do the alteration to package_name from PEP 427:
our_package_name = re.sub(r'[^\w\d.]+', '_', package_name, re.UNICODE)
if whl_package_name != our_package_name:
give_up(filename, whl_package_name)
return version
def give_up(filename, package_name):
raise RuntimeError("The archive '%s' didn't start with the package name "
"'%s', so I couldn't figure out the version number. "
"My bad; improve me." %
(filename, package_name))
get_version = (version_of_wheel
if self._downloaded_filename().endswith('.whl')
else version_of_archive)
return get_version(self._downloaded_filename(), self._project_name())
def _is_always_unsatisfied(self):
"""Returns whether this requirement is always unsatisfied
This would happen in cases where we can't determine the version
from the filename.
"""
# If this is a github sha tarball, then it is always unsatisfied
# because the url has a commit sha in it and not the version
# number.
url = self._url()
if url:
filename = filename_from_url(url)
if filename.endswith(ARCHIVE_EXTENSIONS):
filename, ext = splitext(filename)
if is_git_sha(filename):
return True
return False
@memoize # Avoid hitting the file[cache] over and over.
def _expected_hashes(self):
"""Return a list of known-good hashes for this package."""
return hashes_above(*path_and_line(self._req))
def _download(self, link):
"""Download a file, and return its name within my temp dir.
This does no verification of HTTPS certs, but our checking hashes
makes that largely unimportant. It would be nice to be able to use the
requests lib, which can verify certs, but it is guaranteed to be
available only in pip >= 1.5.
This also drops support for proxies and basic auth, though those could
be added back in.
"""
# Based on pip 1.4.1's URLOpener but with cert verification removed
def opener(is_https):
if is_https:
opener = build_opener(HTTPSHandler())
# Strip out HTTPHandler to prevent MITM spoof:
for handler in opener.handlers:
if isinstance(handler, HTTPHandler):
opener.handlers.remove(handler)
else:
opener = build_opener()
return opener
# Descended from unpack_http_url() in pip 1.4.1
def best_filename(link, response):
"""Return the most informative possible filename for a download,
ideally with a proper extension.
"""
content_type = response.info().get('content-type', '')
filename = link.filename # fallback
# Have a look at the Content-Disposition header for a better guess:
content_disposition = response.info().get('content-disposition')
if content_disposition:
type, params = cgi.parse_header(content_disposition)
# We use ``or`` here because we don't want to use an "empty" value
# from the filename param:
filename = params.get('filename') or filename
ext = splitext(filename)[1]
if not ext:
ext = mimetypes.guess_extension(content_type)
if ext:
filename += ext
if not ext and link.url != response.geturl():
ext = splitext(response.geturl())[1]
if ext:
filename += ext
return filename
# Descended from _download_url() in pip 1.4.1
def pipe_to_file(response, path, size=0):
"""Pull the data off an HTTP response, shove it in a new file, and
show progress.
:arg response: A file-like object to read from
:arg path: The path of the new file
:arg size: The expected size, in bytes, of the download. 0 for
unknown or to suppress progress indication (as for cached
downloads)
"""
def response_chunks(chunk_size):
while True:
chunk = response.read(chunk_size)
if not chunk:
break
yield chunk
print('Downloading %s%s...' % (
self._req.req,
(' (%sK)' % (size / 1000)) if size > 1000 else ''))
progress_indicator = (DownloadProgressBar(max=size).iter if size
else DownloadProgressSpinner().iter)
with open(path, 'wb') as file:
for chunk in progress_indicator(response_chunks(4096), 4096):
file.write(chunk)
url = link.url.split('#', 1)[0]
try:
response = opener(urlparse(url).scheme != 'http').open(url)
except (HTTPError, IOError) as exc:
raise DownloadError(link, exc)
filename = best_filename(link, response)
try:
size = int(response.headers['content-length'])
except (ValueError, KeyError, TypeError):
size = 0
pipe_to_file(response, join(self._temp_path, filename), size=size)
return filename
# Based on req_set.prepare_files() in pip bb2a8428d4aebc8d313d05d590f386fa3f0bbd0f
@memoize # Avoid re-downloading.
def _downloaded_filename(self):
"""Download the package's archive if necessary, and return its
filename.
--no-deps is implied, as we have reimplemented the bits that would
ordinarily do dependency resolution.
"""
# Peep doesn't support requirements that don't come down as a single
# file, because it can't hash them. Thus, it doesn't support editable
# requirements, because pip itself doesn't support editable
# requirements except for "local projects or a VCS url". Nor does it
# support VCS requirements yet, because we haven't yet come up with a
# portable, deterministic way to hash them. In summary, all we support
# is == requirements and tarballs/zips/etc.
# TODO: Stop on reqs that are editable or aren't ==.
# If the requirement isn't already specified as a URL, get a URL
# from an index:
link = self._link() or self._finder.find_requirement(self._req, upgrade=False)
if link:
lower_scheme = link.scheme.lower() # pip lower()s it for some reason.
if lower_scheme == 'http' or lower_scheme == 'https':
file_path = self._download(link)
return basename(file_path)
elif lower_scheme == 'file':
# The following is inspired by pip's unpack_file_url():
link_path = url_to_path(link.url_without_fragment)
if isdir(link_path):
raise UnsupportedRequirementError(
"%s: %s is a directory. So that it can compute "
"a hash, peep supports only filesystem paths which "
"point to files" %
(self._req, link.url_without_fragment))
else:
copy(link_path, self._temp_path)
return basename(link_path)
else:
raise UnsupportedRequirementError(
"%s: The download link, %s, would not result in a file "
"that can be hashed. Peep supports only == requirements, "
"file:// URLs pointing to files (not folders), and "
"http:// and https:// URLs pointing to tarballs, zips, "
"etc." % (self._req, link.url))
else:
raise UnsupportedRequirementError(
"%s: couldn't determine where to download this requirement from."
% (self._req,))
def install(self):
"""Install the package I represent, without dependencies.
Obey typical pip-install options passed in on the command line.
"""
other_args = list(requirement_args(self._argv, want_other=True))
archive_path = join(self._temp_path, self._downloaded_filename())
# -U so it installs whether pip deems the requirement "satisfied" or
# not. This is necessary for GitHub-sourced zips, which change without
# their version numbers changing.
run_pip(['install'] + other_args + ['--no-deps', '-U', archive_path])
@memoize
def _actual_hash(self):
"""Download the package's archive if necessary, and return its hash."""
return hash_of_file(join(self._temp_path, self._downloaded_filename()))
def _project_name(self):
"""Return the inner Requirement's "unsafe name".
Raise ValueError if there is no name.
"""
name = getattr(self._req.req, 'project_name', '')
if name:
return name
name = getattr(self._req.req, 'name', '')
if name:
return safe_name(name)
raise ValueError('Requirement has no project_name.')
def _name(self):
return self._req.name
def _link(self):
try:
return self._req.link
except AttributeError:
# The link attribute isn't available prior to pip 6.1.0, so fall
# back to the now deprecated 'url' attribute.
return Link(self._req.url) if self._req.url else None
def _url(self):
link = self._link()
return link.url if link else None
@memoize # Avoid re-running expensive check_if_exists().
def _is_satisfied(self):
self._req.check_if_exists()
return (self._req.satisfied_by and
not self._is_always_unsatisfied())
def _class(self):
"""Return the class I should be, spanning a continuum of goodness."""
try:
self._project_name()
except ValueError:
return MalformedReq
if self._is_satisfied():
return SatisfiedReq
if not self._expected_hashes():
return MissingReq
if self._actual_hash() not in self._expected_hashes():
return MismatchedReq
return InstallableReq
@classmethod
def foot(cls):
"""Return the text to be printed once, after all of the errors from
classes of my type are printed.
"""
return ''
class MalformedReq(DownloadedReq):
"""A requirement whose package name could not be determined"""
@classmethod
def head(cls):
return 'The following requirements could not be processed:\n'
def error(self):
return '* Unable to determine package name from URL %s; add #egg=' % self._url()
class MissingReq(DownloadedReq):
"""A requirement for which no hashes were specified in the requirements file"""
@classmethod
def head(cls):
return ('The following packages had no hashes specified in the requirements file, which\n'
'leaves them open to tampering. Vet these packages to your satisfaction, then\n'
'add these "sha256" lines like so:\n\n')
def error(self):
if self._url():
# _url() always contains an #egg= part, or this would be a
# MalformedRequest.
line = self._url()
else:
line = '%s==%s' % (self._name(), self._version())
return '# sha256: %s\n%s\n' % (self._actual_hash(), line)
class MismatchedReq(DownloadedReq):
"""A requirement for which the downloaded file didn't match any of my hashes."""
@classmethod
def head(cls):
return ("THE FOLLOWING PACKAGES DIDN'T MATCH THE HASHES SPECIFIED IN THE REQUIREMENTS\n"
"FILE. If you have updated the package versions, update the hashes. If not,\n"
"freak out, because someone has tampered with the packages.\n\n")
def error(self):
preamble = ' %s: expected' % self._project_name()
if len(self._expected_hashes()) > 1:
preamble += ' one of'
padding = '\n' + ' ' * (len(preamble) + 1)
return '%s %s\n%s got %s' % (preamble,
padding.join(self._expected_hashes()),
' ' * (len(preamble) - 4),
self._actual_hash())
@classmethod
def foot(cls):
return '\n'
class SatisfiedReq(DownloadedReq):
"""A requirement which turned out to be already installed"""
@classmethod
def head(cls):
return ("These packages were already installed, so we didn't need to download or build\n"
"them again. If you installed them with peep in the first place, you should be\n"
"safe. If not, uninstall them, then re-attempt your install with peep.\n")
def error(self):
return ' %s' % (self._req,)
class InstallableReq(DownloadedReq):
"""A requirement whose hash matched and can be safely installed"""
# DownloadedReq subclasses that indicate an error that should keep us from
# going forward with installation, in the order in which their errors should
# be reported:
ERROR_CLASSES = [MismatchedReq, MissingReq, MalformedReq]
def bucket(things, key):
"""Return a map of key -> list of things."""
ret = defaultdict(list)
for thing in things:
ret[key(thing)].append(thing)
return ret
def first_every_last(iterable, first, every, last):
"""Execute something before the first item of iter, something else for each
item, and a third thing after the last.
If there are no items in the iterable, don't execute anything.
"""
did_first = False
for item in iterable:
if not did_first:
did_first = True
first(item)
every(item)
if did_first:
last(item)
def _parse_requirements(path, finder):
try:
# list() so the generator that is parse_requirements() actually runs
# far enough to report a TypeError
return list(parse_requirements(
path, options=EmptyOptions(), finder=finder))
except TypeError:
# session is a required kwarg as of pip 6.0 and will raise
# a TypeError if missing. It needs to be a PipSession instance,
# but in older versions we can't import it from pip.download
# (nor do we need it at all) so we only import it in this except block
from pip.download import PipSession
return list(parse_requirements(
path, options=EmptyOptions(), session=PipSession(), finder=finder))
def downloaded_reqs_from_path(path, argv):
"""Return a list of DownloadedReqs representing the requirements parsed
out of a given requirements file.
:arg path: The path to the requirements file
:arg argv: The commandline args, starting after the subcommand
"""
finder = package_finder(argv)
return [DownloadedReq(req, argv, finder) for req in
_parse_requirements(path, finder)]
def peep_install(argv):
"""Perform the ``peep install`` subcommand, returning a shell status code
or raising a PipException.
:arg argv: The commandline args, starting after the subcommand
"""
output = []
out = output.append
reqs = []
try:
req_paths = list(requirement_args(argv, want_paths=True))
if not req_paths:
out("You have to specify one or more requirements files with the -r option, because\n"
"otherwise there's nowhere for peep to look up the hashes.\n")
return COMMAND_LINE_ERROR
# We're a "peep install" command, and we have some requirement paths.
reqs = list(chain.from_iterable(
downloaded_reqs_from_path(path, argv)
for path in req_paths))
buckets = bucket(reqs, lambda r: r.__class__)
# Skip a line after pip's "Cleaning up..." so the important stuff
# stands out:
if any(buckets[b] for b in ERROR_CLASSES):
out('\n')
printers = (lambda r: out(r.head()),
lambda r: out(r.error() + '\n'),
lambda r: out(r.foot()))
for c in ERROR_CLASSES:
first_every_last(buckets[c], *printers)
if any(buckets[b] for b in ERROR_CLASSES):
out('-------------------------------\n'
'Not proceeding to installation.\n')
return SOMETHING_WENT_WRONG
else:
for req in buckets[InstallableReq]:
req.install()
first_every_last(buckets[SatisfiedReq], *printers)
return ITS_FINE_ITS_FINE
except (UnsupportedRequirementError, InstallationError, DownloadError) as exc:
out(str(exc))
return SOMETHING_WENT_WRONG
finally:
for req in reqs:
req.dispose()
print(''.join(output))
def peep_port(paths):
"""Convert a peep requirements file to one compatble with pip-8 hashing.
Loses comments and tromps on URLs, so the result will need a little manual
massaging, but the hard part--the hash conversion--is done for you.
"""
if not paths:
print('Please specify one or more requirements files so I have '
'something to port.\n')
return COMMAND_LINE_ERROR
comes_from = None
for req in chain.from_iterable(
_parse_requirements(path, package_finder(argv)) for path in paths):
req_path, req_line = path_and_line(req)
hashes = [hexlify(urlsafe_b64decode((hash + '=').encode('ascii'))).decode('ascii')
for hash in hashes_above(req_path, req_line)]
if req_path != comes_from:
print()
print('# from %s' % req_path)
print()
comes_from = req_path
if not hashes:
print(req.req)
else:
print('%s' % (req.link if getattr(req, 'link', None) else req.req), end='')
for hash in hashes:
print(' \\')
print(' --hash=sha256:%s' % hash, end='')
print()
def exception_handler(exc_type, exc_value, exc_tb):
print('Oh no! Peep had a problem while trying to do stuff. Please write up a bug report')
print('with the specifics so we can fix it:')
print()
print('https://github.com/erikrose/peep/issues/new')
print()
print('Here are some particulars you can copy and paste into the bug report:')
print()
print('---')
print('peep:', repr(__version__))
print('python:', repr(sys.version))
print('pip:', repr(getattr(pip, '__version__', 'no __version__ attr')))
print('Command line: ', repr(sys.argv))
print(
''.join(traceback.format_exception(exc_type, exc_value, exc_tb)))
print('---')
if __name__ == '__main__':
try:
exit(main())
except Exception:
exception_handler(*sys.exc_info())
exit(UNHANDLED_EXCEPTION)
|
erikrose/peep | peep.py | DownloadedReq._version | python | def _version(self):
# TODO: Can we delete this method and just print the line from the
# reqs file verbatim instead?
def version_of_archive(filename, package_name):
# Since we know the project_name, we can strip that off the left, strip
# any archive extensions off the right, and take the rest as the
# version.
for ext in ARCHIVE_EXTENSIONS:
if filename.endswith(ext):
filename = filename[:-len(ext)]
break
# Handle github sha tarball downloads.
if is_git_sha(filename):
filename = package_name + '-' + filename
if not filename.lower().replace('_', '-').startswith(package_name.lower()):
# TODO: Should we replace runs of [^a-zA-Z0-9.], not just _, with -?
give_up(filename, package_name)
return filename[len(package_name) + 1:] # Strip off '-' before version.
def version_of_wheel(filename, package_name):
# For Wheel files (http://legacy.python.org/dev/peps/pep-0427/#file-
# name-convention) we know the format bits are '-' separated.
whl_package_name, version, _rest = filename.split('-', 2)
# Do the alteration to package_name from PEP 427:
our_package_name = re.sub(r'[^\w\d.]+', '_', package_name, re.UNICODE)
if whl_package_name != our_package_name:
give_up(filename, whl_package_name)
return version
def give_up(filename, package_name):
raise RuntimeError("The archive '%s' didn't start with the package name "
"'%s', so I couldn't figure out the version number. "
"My bad; improve me." %
(filename, package_name))
get_version = (version_of_wheel
if self._downloaded_filename().endswith('.whl')
else version_of_archive)
return get_version(self._downloaded_filename(), self._project_name()) | Deduce the version number of the downloaded package from its filename. | train | https://github.com/erikrose/peep/blob/c16f08c7f61e2f2afecb7cd1c93752bdd96c4968/peep.py#L432-L471 | null | class DownloadedReq(object):
"""A wrapper around InstallRequirement which offers additional information
based on downloading and examining a corresponding package archive
These are conceptually immutable, so we can get away with memoizing
expensive things.
"""
def __init__(self, req, argv, finder):
"""Download a requirement, compare its hashes, and return a subclass
of DownloadedReq depending on its state.
:arg req: The InstallRequirement I am based on
:arg argv: The args, starting after the subcommand
"""
self._req = req
self._argv = argv
self._finder = finder
# We use a separate temp dir for each requirement so requirements
# (from different indices) that happen to have the same archive names
# don't overwrite each other, leading to a security hole in which the
# latter is a hash mismatch, the former has already passed the
# comparison, and the latter gets installed.
self._temp_path = mkdtemp(prefix='peep-')
# Think of DownloadedReq as a one-shot state machine. It's an abstract
# class that ratchets forward to being one of its own subclasses,
# depending on its package status. Then it doesn't move again.
self.__class__ = self._class()
def dispose(self):
"""Delete temp files and dirs I've made. Render myself useless.
Do not call further methods on me after calling dispose().
"""
rmtree(self._temp_path)
def _is_always_unsatisfied(self):
"""Returns whether this requirement is always unsatisfied
This would happen in cases where we can't determine the version
from the filename.
"""
# If this is a github sha tarball, then it is always unsatisfied
# because the url has a commit sha in it and not the version
# number.
url = self._url()
if url:
filename = filename_from_url(url)
if filename.endswith(ARCHIVE_EXTENSIONS):
filename, ext = splitext(filename)
if is_git_sha(filename):
return True
return False
@memoize # Avoid hitting the file[cache] over and over.
def _expected_hashes(self):
"""Return a list of known-good hashes for this package."""
return hashes_above(*path_and_line(self._req))
def _download(self, link):
"""Download a file, and return its name within my temp dir.
This does no verification of HTTPS certs, but our checking hashes
makes that largely unimportant. It would be nice to be able to use the
requests lib, which can verify certs, but it is guaranteed to be
available only in pip >= 1.5.
This also drops support for proxies and basic auth, though those could
be added back in.
"""
# Based on pip 1.4.1's URLOpener but with cert verification removed
def opener(is_https):
if is_https:
opener = build_opener(HTTPSHandler())
# Strip out HTTPHandler to prevent MITM spoof:
for handler in opener.handlers:
if isinstance(handler, HTTPHandler):
opener.handlers.remove(handler)
else:
opener = build_opener()
return opener
# Descended from unpack_http_url() in pip 1.4.1
def best_filename(link, response):
"""Return the most informative possible filename for a download,
ideally with a proper extension.
"""
content_type = response.info().get('content-type', '')
filename = link.filename # fallback
# Have a look at the Content-Disposition header for a better guess:
content_disposition = response.info().get('content-disposition')
if content_disposition:
type, params = cgi.parse_header(content_disposition)
# We use ``or`` here because we don't want to use an "empty" value
# from the filename param:
filename = params.get('filename') or filename
ext = splitext(filename)[1]
if not ext:
ext = mimetypes.guess_extension(content_type)
if ext:
filename += ext
if not ext and link.url != response.geturl():
ext = splitext(response.geturl())[1]
if ext:
filename += ext
return filename
# Descended from _download_url() in pip 1.4.1
def pipe_to_file(response, path, size=0):
"""Pull the data off an HTTP response, shove it in a new file, and
show progress.
:arg response: A file-like object to read from
:arg path: The path of the new file
:arg size: The expected size, in bytes, of the download. 0 for
unknown or to suppress progress indication (as for cached
downloads)
"""
def response_chunks(chunk_size):
while True:
chunk = response.read(chunk_size)
if not chunk:
break
yield chunk
print('Downloading %s%s...' % (
self._req.req,
(' (%sK)' % (size / 1000)) if size > 1000 else ''))
progress_indicator = (DownloadProgressBar(max=size).iter if size
else DownloadProgressSpinner().iter)
with open(path, 'wb') as file:
for chunk in progress_indicator(response_chunks(4096), 4096):
file.write(chunk)
url = link.url.split('#', 1)[0]
try:
response = opener(urlparse(url).scheme != 'http').open(url)
except (HTTPError, IOError) as exc:
raise DownloadError(link, exc)
filename = best_filename(link, response)
try:
size = int(response.headers['content-length'])
except (ValueError, KeyError, TypeError):
size = 0
pipe_to_file(response, join(self._temp_path, filename), size=size)
return filename
# Based on req_set.prepare_files() in pip bb2a8428d4aebc8d313d05d590f386fa3f0bbd0f
@memoize # Avoid re-downloading.
def _downloaded_filename(self):
"""Download the package's archive if necessary, and return its
filename.
--no-deps is implied, as we have reimplemented the bits that would
ordinarily do dependency resolution.
"""
# Peep doesn't support requirements that don't come down as a single
# file, because it can't hash them. Thus, it doesn't support editable
# requirements, because pip itself doesn't support editable
# requirements except for "local projects or a VCS url". Nor does it
# support VCS requirements yet, because we haven't yet come up with a
# portable, deterministic way to hash them. In summary, all we support
# is == requirements and tarballs/zips/etc.
# TODO: Stop on reqs that are editable or aren't ==.
# If the requirement isn't already specified as a URL, get a URL
# from an index:
link = self._link() or self._finder.find_requirement(self._req, upgrade=False)
if link:
lower_scheme = link.scheme.lower() # pip lower()s it for some reason.
if lower_scheme == 'http' or lower_scheme == 'https':
file_path = self._download(link)
return basename(file_path)
elif lower_scheme == 'file':
# The following is inspired by pip's unpack_file_url():
link_path = url_to_path(link.url_without_fragment)
if isdir(link_path):
raise UnsupportedRequirementError(
"%s: %s is a directory. So that it can compute "
"a hash, peep supports only filesystem paths which "
"point to files" %
(self._req, link.url_without_fragment))
else:
copy(link_path, self._temp_path)
return basename(link_path)
else:
raise UnsupportedRequirementError(
"%s: The download link, %s, would not result in a file "
"that can be hashed. Peep supports only == requirements, "
"file:// URLs pointing to files (not folders), and "
"http:// and https:// URLs pointing to tarballs, zips, "
"etc." % (self._req, link.url))
else:
raise UnsupportedRequirementError(
"%s: couldn't determine where to download this requirement from."
% (self._req,))
def install(self):
"""Install the package I represent, without dependencies.
Obey typical pip-install options passed in on the command line.
"""
other_args = list(requirement_args(self._argv, want_other=True))
archive_path = join(self._temp_path, self._downloaded_filename())
# -U so it installs whether pip deems the requirement "satisfied" or
# not. This is necessary for GitHub-sourced zips, which change without
# their version numbers changing.
run_pip(['install'] + other_args + ['--no-deps', '-U', archive_path])
@memoize
def _actual_hash(self):
"""Download the package's archive if necessary, and return its hash."""
return hash_of_file(join(self._temp_path, self._downloaded_filename()))
def _project_name(self):
"""Return the inner Requirement's "unsafe name".
Raise ValueError if there is no name.
"""
name = getattr(self._req.req, 'project_name', '')
if name:
return name
name = getattr(self._req.req, 'name', '')
if name:
return safe_name(name)
raise ValueError('Requirement has no project_name.')
def _name(self):
return self._req.name
def _link(self):
try:
return self._req.link
except AttributeError:
# The link attribute isn't available prior to pip 6.1.0, so fall
# back to the now deprecated 'url' attribute.
return Link(self._req.url) if self._req.url else None
def _url(self):
link = self._link()
return link.url if link else None
@memoize # Avoid re-running expensive check_if_exists().
def _is_satisfied(self):
self._req.check_if_exists()
return (self._req.satisfied_by and
not self._is_always_unsatisfied())
def _class(self):
"""Return the class I should be, spanning a continuum of goodness."""
try:
self._project_name()
except ValueError:
return MalformedReq
if self._is_satisfied():
return SatisfiedReq
if not self._expected_hashes():
return MissingReq
if self._actual_hash() not in self._expected_hashes():
return MismatchedReq
return InstallableReq
@classmethod
def foot(cls):
"""Return the text to be printed once, after all of the errors from
classes of my type are printed.
"""
return ''
|
erikrose/peep | peep.py | DownloadedReq._is_always_unsatisfied | python | def _is_always_unsatisfied(self):
# If this is a github sha tarball, then it is always unsatisfied
# because the url has a commit sha in it and not the version
# number.
url = self._url()
if url:
filename = filename_from_url(url)
if filename.endswith(ARCHIVE_EXTENSIONS):
filename, ext = splitext(filename)
if is_git_sha(filename):
return True
return False | Returns whether this requirement is always unsatisfied
This would happen in cases where we can't determine the version
from the filename. | train | https://github.com/erikrose/peep/blob/c16f08c7f61e2f2afecb7cd1c93752bdd96c4968/peep.py#L473-L490 | null | class DownloadedReq(object):
"""A wrapper around InstallRequirement which offers additional information
based on downloading and examining a corresponding package archive
These are conceptually immutable, so we can get away with memoizing
expensive things.
"""
def __init__(self, req, argv, finder):
"""Download a requirement, compare its hashes, and return a subclass
of DownloadedReq depending on its state.
:arg req: The InstallRequirement I am based on
:arg argv: The args, starting after the subcommand
"""
self._req = req
self._argv = argv
self._finder = finder
# We use a separate temp dir for each requirement so requirements
# (from different indices) that happen to have the same archive names
# don't overwrite each other, leading to a security hole in which the
# latter is a hash mismatch, the former has already passed the
# comparison, and the latter gets installed.
self._temp_path = mkdtemp(prefix='peep-')
# Think of DownloadedReq as a one-shot state machine. It's an abstract
# class that ratchets forward to being one of its own subclasses,
# depending on its package status. Then it doesn't move again.
self.__class__ = self._class()
def dispose(self):
"""Delete temp files and dirs I've made. Render myself useless.
Do not call further methods on me after calling dispose().
"""
rmtree(self._temp_path)
def _version(self):
"""Deduce the version number of the downloaded package from its filename."""
# TODO: Can we delete this method and just print the line from the
# reqs file verbatim instead?
def version_of_archive(filename, package_name):
# Since we know the project_name, we can strip that off the left, strip
# any archive extensions off the right, and take the rest as the
# version.
for ext in ARCHIVE_EXTENSIONS:
if filename.endswith(ext):
filename = filename[:-len(ext)]
break
# Handle github sha tarball downloads.
if is_git_sha(filename):
filename = package_name + '-' + filename
if not filename.lower().replace('_', '-').startswith(package_name.lower()):
# TODO: Should we replace runs of [^a-zA-Z0-9.], not just _, with -?
give_up(filename, package_name)
return filename[len(package_name) + 1:] # Strip off '-' before version.
def version_of_wheel(filename, package_name):
# For Wheel files (http://legacy.python.org/dev/peps/pep-0427/#file-
# name-convention) we know the format bits are '-' separated.
whl_package_name, version, _rest = filename.split('-', 2)
# Do the alteration to package_name from PEP 427:
our_package_name = re.sub(r'[^\w\d.]+', '_', package_name, re.UNICODE)
if whl_package_name != our_package_name:
give_up(filename, whl_package_name)
return version
def give_up(filename, package_name):
raise RuntimeError("The archive '%s' didn't start with the package name "
"'%s', so I couldn't figure out the version number. "
"My bad; improve me." %
(filename, package_name))
get_version = (version_of_wheel
if self._downloaded_filename().endswith('.whl')
else version_of_archive)
return get_version(self._downloaded_filename(), self._project_name())
@memoize # Avoid hitting the file[cache] over and over.
def _expected_hashes(self):
"""Return a list of known-good hashes for this package."""
return hashes_above(*path_and_line(self._req))
def _download(self, link):
"""Download a file, and return its name within my temp dir.
This does no verification of HTTPS certs, but our checking hashes
makes that largely unimportant. It would be nice to be able to use the
requests lib, which can verify certs, but it is guaranteed to be
available only in pip >= 1.5.
This also drops support for proxies and basic auth, though those could
be added back in.
"""
# Based on pip 1.4.1's URLOpener but with cert verification removed
def opener(is_https):
if is_https:
opener = build_opener(HTTPSHandler())
# Strip out HTTPHandler to prevent MITM spoof:
for handler in opener.handlers:
if isinstance(handler, HTTPHandler):
opener.handlers.remove(handler)
else:
opener = build_opener()
return opener
# Descended from unpack_http_url() in pip 1.4.1
def best_filename(link, response):
"""Return the most informative possible filename for a download,
ideally with a proper extension.
"""
content_type = response.info().get('content-type', '')
filename = link.filename # fallback
# Have a look at the Content-Disposition header for a better guess:
content_disposition = response.info().get('content-disposition')
if content_disposition:
type, params = cgi.parse_header(content_disposition)
# We use ``or`` here because we don't want to use an "empty" value
# from the filename param:
filename = params.get('filename') or filename
ext = splitext(filename)[1]
if not ext:
ext = mimetypes.guess_extension(content_type)
if ext:
filename += ext
if not ext and link.url != response.geturl():
ext = splitext(response.geturl())[1]
if ext:
filename += ext
return filename
# Descended from _download_url() in pip 1.4.1
def pipe_to_file(response, path, size=0):
"""Pull the data off an HTTP response, shove it in a new file, and
show progress.
:arg response: A file-like object to read from
:arg path: The path of the new file
:arg size: The expected size, in bytes, of the download. 0 for
unknown or to suppress progress indication (as for cached
downloads)
"""
def response_chunks(chunk_size):
while True:
chunk = response.read(chunk_size)
if not chunk:
break
yield chunk
print('Downloading %s%s...' % (
self._req.req,
(' (%sK)' % (size / 1000)) if size > 1000 else ''))
progress_indicator = (DownloadProgressBar(max=size).iter if size
else DownloadProgressSpinner().iter)
with open(path, 'wb') as file:
for chunk in progress_indicator(response_chunks(4096), 4096):
file.write(chunk)
url = link.url.split('#', 1)[0]
try:
response = opener(urlparse(url).scheme != 'http').open(url)
except (HTTPError, IOError) as exc:
raise DownloadError(link, exc)
filename = best_filename(link, response)
try:
size = int(response.headers['content-length'])
except (ValueError, KeyError, TypeError):
size = 0
pipe_to_file(response, join(self._temp_path, filename), size=size)
return filename
# Based on req_set.prepare_files() in pip bb2a8428d4aebc8d313d05d590f386fa3f0bbd0f
@memoize # Avoid re-downloading.
def _downloaded_filename(self):
"""Download the package's archive if necessary, and return its
filename.
--no-deps is implied, as we have reimplemented the bits that would
ordinarily do dependency resolution.
"""
# Peep doesn't support requirements that don't come down as a single
# file, because it can't hash them. Thus, it doesn't support editable
# requirements, because pip itself doesn't support editable
# requirements except for "local projects or a VCS url". Nor does it
# support VCS requirements yet, because we haven't yet come up with a
# portable, deterministic way to hash them. In summary, all we support
# is == requirements and tarballs/zips/etc.
# TODO: Stop on reqs that are editable or aren't ==.
# If the requirement isn't already specified as a URL, get a URL
# from an index:
link = self._link() or self._finder.find_requirement(self._req, upgrade=False)
if link:
lower_scheme = link.scheme.lower() # pip lower()s it for some reason.
if lower_scheme == 'http' or lower_scheme == 'https':
file_path = self._download(link)
return basename(file_path)
elif lower_scheme == 'file':
# The following is inspired by pip's unpack_file_url():
link_path = url_to_path(link.url_without_fragment)
if isdir(link_path):
raise UnsupportedRequirementError(
"%s: %s is a directory. So that it can compute "
"a hash, peep supports only filesystem paths which "
"point to files" %
(self._req, link.url_without_fragment))
else:
copy(link_path, self._temp_path)
return basename(link_path)
else:
raise UnsupportedRequirementError(
"%s: The download link, %s, would not result in a file "
"that can be hashed. Peep supports only == requirements, "
"file:// URLs pointing to files (not folders), and "
"http:// and https:// URLs pointing to tarballs, zips, "
"etc." % (self._req, link.url))
else:
raise UnsupportedRequirementError(
"%s: couldn't determine where to download this requirement from."
% (self._req,))
def install(self):
"""Install the package I represent, without dependencies.
Obey typical pip-install options passed in on the command line.
"""
other_args = list(requirement_args(self._argv, want_other=True))
archive_path = join(self._temp_path, self._downloaded_filename())
# -U so it installs whether pip deems the requirement "satisfied" or
# not. This is necessary for GitHub-sourced zips, which change without
# their version numbers changing.
run_pip(['install'] + other_args + ['--no-deps', '-U', archive_path])
@memoize
def _actual_hash(self):
"""Download the package's archive if necessary, and return its hash."""
return hash_of_file(join(self._temp_path, self._downloaded_filename()))
def _project_name(self):
"""Return the inner Requirement's "unsafe name".
Raise ValueError if there is no name.
"""
name = getattr(self._req.req, 'project_name', '')
if name:
return name
name = getattr(self._req.req, 'name', '')
if name:
return safe_name(name)
raise ValueError('Requirement has no project_name.')
def _name(self):
return self._req.name
def _link(self):
try:
return self._req.link
except AttributeError:
# The link attribute isn't available prior to pip 6.1.0, so fall
# back to the now deprecated 'url' attribute.
return Link(self._req.url) if self._req.url else None
def _url(self):
link = self._link()
return link.url if link else None
@memoize # Avoid re-running expensive check_if_exists().
def _is_satisfied(self):
self._req.check_if_exists()
return (self._req.satisfied_by and
not self._is_always_unsatisfied())
def _class(self):
"""Return the class I should be, spanning a continuum of goodness."""
try:
self._project_name()
except ValueError:
return MalformedReq
if self._is_satisfied():
return SatisfiedReq
if not self._expected_hashes():
return MissingReq
if self._actual_hash() not in self._expected_hashes():
return MismatchedReq
return InstallableReq
@classmethod
def foot(cls):
"""Return the text to be printed once, after all of the errors from
classes of my type are printed.
"""
return ''
|
erikrose/peep | peep.py | DownloadedReq._download | python | def _download(self, link):
# Based on pip 1.4.1's URLOpener but with cert verification removed
def opener(is_https):
if is_https:
opener = build_opener(HTTPSHandler())
# Strip out HTTPHandler to prevent MITM spoof:
for handler in opener.handlers:
if isinstance(handler, HTTPHandler):
opener.handlers.remove(handler)
else:
opener = build_opener()
return opener
# Descended from unpack_http_url() in pip 1.4.1
def best_filename(link, response):
"""Return the most informative possible filename for a download,
ideally with a proper extension.
"""
content_type = response.info().get('content-type', '')
filename = link.filename # fallback
# Have a look at the Content-Disposition header for a better guess:
content_disposition = response.info().get('content-disposition')
if content_disposition:
type, params = cgi.parse_header(content_disposition)
# We use ``or`` here because we don't want to use an "empty" value
# from the filename param:
filename = params.get('filename') or filename
ext = splitext(filename)[1]
if not ext:
ext = mimetypes.guess_extension(content_type)
if ext:
filename += ext
if not ext and link.url != response.geturl():
ext = splitext(response.geturl())[1]
if ext:
filename += ext
return filename
# Descended from _download_url() in pip 1.4.1
def pipe_to_file(response, path, size=0):
"""Pull the data off an HTTP response, shove it in a new file, and
show progress.
:arg response: A file-like object to read from
:arg path: The path of the new file
:arg size: The expected size, in bytes, of the download. 0 for
unknown or to suppress progress indication (as for cached
downloads)
"""
def response_chunks(chunk_size):
while True:
chunk = response.read(chunk_size)
if not chunk:
break
yield chunk
print('Downloading %s%s...' % (
self._req.req,
(' (%sK)' % (size / 1000)) if size > 1000 else ''))
progress_indicator = (DownloadProgressBar(max=size).iter if size
else DownloadProgressSpinner().iter)
with open(path, 'wb') as file:
for chunk in progress_indicator(response_chunks(4096), 4096):
file.write(chunk)
url = link.url.split('#', 1)[0]
try:
response = opener(urlparse(url).scheme != 'http').open(url)
except (HTTPError, IOError) as exc:
raise DownloadError(link, exc)
filename = best_filename(link, response)
try:
size = int(response.headers['content-length'])
except (ValueError, KeyError, TypeError):
size = 0
pipe_to_file(response, join(self._temp_path, filename), size=size)
return filename | Download a file, and return its name within my temp dir.
This does no verification of HTTPS certs, but our checking hashes
makes that largely unimportant. It would be nice to be able to use the
requests lib, which can verify certs, but it is guaranteed to be
available only in pip >= 1.5.
This also drops support for proxies and basic auth, though those could
be added back in. | train | https://github.com/erikrose/peep/blob/c16f08c7f61e2f2afecb7cd1c93752bdd96c4968/peep.py#L497-L586 | null | class DownloadedReq(object):
"""A wrapper around InstallRequirement which offers additional information
based on downloading and examining a corresponding package archive
These are conceptually immutable, so we can get away with memoizing
expensive things.
"""
def __init__(self, req, argv, finder):
"""Download a requirement, compare its hashes, and return a subclass
of DownloadedReq depending on its state.
:arg req: The InstallRequirement I am based on
:arg argv: The args, starting after the subcommand
"""
self._req = req
self._argv = argv
self._finder = finder
# We use a separate temp dir for each requirement so requirements
# (from different indices) that happen to have the same archive names
# don't overwrite each other, leading to a security hole in which the
# latter is a hash mismatch, the former has already passed the
# comparison, and the latter gets installed.
self._temp_path = mkdtemp(prefix='peep-')
# Think of DownloadedReq as a one-shot state machine. It's an abstract
# class that ratchets forward to being one of its own subclasses,
# depending on its package status. Then it doesn't move again.
self.__class__ = self._class()
def dispose(self):
"""Delete temp files and dirs I've made. Render myself useless.
Do not call further methods on me after calling dispose().
"""
rmtree(self._temp_path)
def _version(self):
"""Deduce the version number of the downloaded package from its filename."""
# TODO: Can we delete this method and just print the line from the
# reqs file verbatim instead?
def version_of_archive(filename, package_name):
# Since we know the project_name, we can strip that off the left, strip
# any archive extensions off the right, and take the rest as the
# version.
for ext in ARCHIVE_EXTENSIONS:
if filename.endswith(ext):
filename = filename[:-len(ext)]
break
# Handle github sha tarball downloads.
if is_git_sha(filename):
filename = package_name + '-' + filename
if not filename.lower().replace('_', '-').startswith(package_name.lower()):
# TODO: Should we replace runs of [^a-zA-Z0-9.], not just _, with -?
give_up(filename, package_name)
return filename[len(package_name) + 1:] # Strip off '-' before version.
def version_of_wheel(filename, package_name):
# For Wheel files (http://legacy.python.org/dev/peps/pep-0427/#file-
# name-convention) we know the format bits are '-' separated.
whl_package_name, version, _rest = filename.split('-', 2)
# Do the alteration to package_name from PEP 427:
our_package_name = re.sub(r'[^\w\d.]+', '_', package_name, re.UNICODE)
if whl_package_name != our_package_name:
give_up(filename, whl_package_name)
return version
def give_up(filename, package_name):
raise RuntimeError("The archive '%s' didn't start with the package name "
"'%s', so I couldn't figure out the version number. "
"My bad; improve me." %
(filename, package_name))
get_version = (version_of_wheel
if self._downloaded_filename().endswith('.whl')
else version_of_archive)
return get_version(self._downloaded_filename(), self._project_name())
def _is_always_unsatisfied(self):
"""Returns whether this requirement is always unsatisfied
This would happen in cases where we can't determine the version
from the filename.
"""
# If this is a github sha tarball, then it is always unsatisfied
# because the url has a commit sha in it and not the version
# number.
url = self._url()
if url:
filename = filename_from_url(url)
if filename.endswith(ARCHIVE_EXTENSIONS):
filename, ext = splitext(filename)
if is_git_sha(filename):
return True
return False
@memoize # Avoid hitting the file[cache] over and over.
def _expected_hashes(self):
"""Return a list of known-good hashes for this package."""
return hashes_above(*path_and_line(self._req))
# Based on req_set.prepare_files() in pip bb2a8428d4aebc8d313d05d590f386fa3f0bbd0f
@memoize # Avoid re-downloading.
def _downloaded_filename(self):
"""Download the package's archive if necessary, and return its
filename.
--no-deps is implied, as we have reimplemented the bits that would
ordinarily do dependency resolution.
"""
# Peep doesn't support requirements that don't come down as a single
# file, because it can't hash them. Thus, it doesn't support editable
# requirements, because pip itself doesn't support editable
# requirements except for "local projects or a VCS url". Nor does it
# support VCS requirements yet, because we haven't yet come up with a
# portable, deterministic way to hash them. In summary, all we support
# is == requirements and tarballs/zips/etc.
# TODO: Stop on reqs that are editable or aren't ==.
# If the requirement isn't already specified as a URL, get a URL
# from an index:
link = self._link() or self._finder.find_requirement(self._req, upgrade=False)
if link:
lower_scheme = link.scheme.lower() # pip lower()s it for some reason.
if lower_scheme == 'http' or lower_scheme == 'https':
file_path = self._download(link)
return basename(file_path)
elif lower_scheme == 'file':
# The following is inspired by pip's unpack_file_url():
link_path = url_to_path(link.url_without_fragment)
if isdir(link_path):
raise UnsupportedRequirementError(
"%s: %s is a directory. So that it can compute "
"a hash, peep supports only filesystem paths which "
"point to files" %
(self._req, link.url_without_fragment))
else:
copy(link_path, self._temp_path)
return basename(link_path)
else:
raise UnsupportedRequirementError(
"%s: The download link, %s, would not result in a file "
"that can be hashed. Peep supports only == requirements, "
"file:// URLs pointing to files (not folders), and "
"http:// and https:// URLs pointing to tarballs, zips, "
"etc." % (self._req, link.url))
else:
raise UnsupportedRequirementError(
"%s: couldn't determine where to download this requirement from."
% (self._req,))
def install(self):
"""Install the package I represent, without dependencies.
Obey typical pip-install options passed in on the command line.
"""
other_args = list(requirement_args(self._argv, want_other=True))
archive_path = join(self._temp_path, self._downloaded_filename())
# -U so it installs whether pip deems the requirement "satisfied" or
# not. This is necessary for GitHub-sourced zips, which change without
# their version numbers changing.
run_pip(['install'] + other_args + ['--no-deps', '-U', archive_path])
@memoize
def _actual_hash(self):
"""Download the package's archive if necessary, and return its hash."""
return hash_of_file(join(self._temp_path, self._downloaded_filename()))
def _project_name(self):
"""Return the inner Requirement's "unsafe name".
Raise ValueError if there is no name.
"""
name = getattr(self._req.req, 'project_name', '')
if name:
return name
name = getattr(self._req.req, 'name', '')
if name:
return safe_name(name)
raise ValueError('Requirement has no project_name.')
def _name(self):
return self._req.name
def _link(self):
try:
return self._req.link
except AttributeError:
# The link attribute isn't available prior to pip 6.1.0, so fall
# back to the now deprecated 'url' attribute.
return Link(self._req.url) if self._req.url else None
def _url(self):
link = self._link()
return link.url if link else None
@memoize # Avoid re-running expensive check_if_exists().
def _is_satisfied(self):
self._req.check_if_exists()
return (self._req.satisfied_by and
not self._is_always_unsatisfied())
def _class(self):
"""Return the class I should be, spanning a continuum of goodness."""
try:
self._project_name()
except ValueError:
return MalformedReq
if self._is_satisfied():
return SatisfiedReq
if not self._expected_hashes():
return MissingReq
if self._actual_hash() not in self._expected_hashes():
return MismatchedReq
return InstallableReq
@classmethod
def foot(cls):
"""Return the text to be printed once, after all of the errors from
classes of my type are printed.
"""
return ''
|
erikrose/peep | peep.py | DownloadedReq._downloaded_filename | python | def _downloaded_filename(self):
# Peep doesn't support requirements that don't come down as a single
# file, because it can't hash them. Thus, it doesn't support editable
# requirements, because pip itself doesn't support editable
# requirements except for "local projects or a VCS url". Nor does it
# support VCS requirements yet, because we haven't yet come up with a
# portable, deterministic way to hash them. In summary, all we support
# is == requirements and tarballs/zips/etc.
# TODO: Stop on reqs that are editable or aren't ==.
# If the requirement isn't already specified as a URL, get a URL
# from an index:
link = self._link() or self._finder.find_requirement(self._req, upgrade=False)
if link:
lower_scheme = link.scheme.lower() # pip lower()s it for some reason.
if lower_scheme == 'http' or lower_scheme == 'https':
file_path = self._download(link)
return basename(file_path)
elif lower_scheme == 'file':
# The following is inspired by pip's unpack_file_url():
link_path = url_to_path(link.url_without_fragment)
if isdir(link_path):
raise UnsupportedRequirementError(
"%s: %s is a directory. So that it can compute "
"a hash, peep supports only filesystem paths which "
"point to files" %
(self._req, link.url_without_fragment))
else:
copy(link_path, self._temp_path)
return basename(link_path)
else:
raise UnsupportedRequirementError(
"%s: The download link, %s, would not result in a file "
"that can be hashed. Peep supports only == requirements, "
"file:// URLs pointing to files (not folders), and "
"http:// and https:// URLs pointing to tarballs, zips, "
"etc." % (self._req, link.url))
else:
raise UnsupportedRequirementError(
"%s: couldn't determine where to download this requirement from."
% (self._req,)) | Download the package's archive if necessary, and return its
filename.
--no-deps is implied, as we have reimplemented the bits that would
ordinarily do dependency resolution. | train | https://github.com/erikrose/peep/blob/c16f08c7f61e2f2afecb7cd1c93752bdd96c4968/peep.py#L590-L639 | null | class DownloadedReq(object):
"""A wrapper around InstallRequirement which offers additional information
based on downloading and examining a corresponding package archive
These are conceptually immutable, so we can get away with memoizing
expensive things.
"""
def __init__(self, req, argv, finder):
"""Download a requirement, compare its hashes, and return a subclass
of DownloadedReq depending on its state.
:arg req: The InstallRequirement I am based on
:arg argv: The args, starting after the subcommand
"""
self._req = req
self._argv = argv
self._finder = finder
# We use a separate temp dir for each requirement so requirements
# (from different indices) that happen to have the same archive names
# don't overwrite each other, leading to a security hole in which the
# latter is a hash mismatch, the former has already passed the
# comparison, and the latter gets installed.
self._temp_path = mkdtemp(prefix='peep-')
# Think of DownloadedReq as a one-shot state machine. It's an abstract
# class that ratchets forward to being one of its own subclasses,
# depending on its package status. Then it doesn't move again.
self.__class__ = self._class()
def dispose(self):
"""Delete temp files and dirs I've made. Render myself useless.
Do not call further methods on me after calling dispose().
"""
rmtree(self._temp_path)
def _version(self):
"""Deduce the version number of the downloaded package from its filename."""
# TODO: Can we delete this method and just print the line from the
# reqs file verbatim instead?
def version_of_archive(filename, package_name):
# Since we know the project_name, we can strip that off the left, strip
# any archive extensions off the right, and take the rest as the
# version.
for ext in ARCHIVE_EXTENSIONS:
if filename.endswith(ext):
filename = filename[:-len(ext)]
break
# Handle github sha tarball downloads.
if is_git_sha(filename):
filename = package_name + '-' + filename
if not filename.lower().replace('_', '-').startswith(package_name.lower()):
# TODO: Should we replace runs of [^a-zA-Z0-9.], not just _, with -?
give_up(filename, package_name)
return filename[len(package_name) + 1:] # Strip off '-' before version.
def version_of_wheel(filename, package_name):
# For Wheel files (http://legacy.python.org/dev/peps/pep-0427/#file-
# name-convention) we know the format bits are '-' separated.
whl_package_name, version, _rest = filename.split('-', 2)
# Do the alteration to package_name from PEP 427:
our_package_name = re.sub(r'[^\w\d.]+', '_', package_name, re.UNICODE)
if whl_package_name != our_package_name:
give_up(filename, whl_package_name)
return version
def give_up(filename, package_name):
raise RuntimeError("The archive '%s' didn't start with the package name "
"'%s', so I couldn't figure out the version number. "
"My bad; improve me." %
(filename, package_name))
get_version = (version_of_wheel
if self._downloaded_filename().endswith('.whl')
else version_of_archive)
return get_version(self._downloaded_filename(), self._project_name())
def _is_always_unsatisfied(self):
"""Returns whether this requirement is always unsatisfied
This would happen in cases where we can't determine the version
from the filename.
"""
# If this is a github sha tarball, then it is always unsatisfied
# because the url has a commit sha in it and not the version
# number.
url = self._url()
if url:
filename = filename_from_url(url)
if filename.endswith(ARCHIVE_EXTENSIONS):
filename, ext = splitext(filename)
if is_git_sha(filename):
return True
return False
@memoize # Avoid hitting the file[cache] over and over.
def _expected_hashes(self):
"""Return a list of known-good hashes for this package."""
return hashes_above(*path_and_line(self._req))
def _download(self, link):
"""Download a file, and return its name within my temp dir.
This does no verification of HTTPS certs, but our checking hashes
makes that largely unimportant. It would be nice to be able to use the
requests lib, which can verify certs, but it is guaranteed to be
available only in pip >= 1.5.
This also drops support for proxies and basic auth, though those could
be added back in.
"""
# Based on pip 1.4.1's URLOpener but with cert verification removed
def opener(is_https):
if is_https:
opener = build_opener(HTTPSHandler())
# Strip out HTTPHandler to prevent MITM spoof:
for handler in opener.handlers:
if isinstance(handler, HTTPHandler):
opener.handlers.remove(handler)
else:
opener = build_opener()
return opener
# Descended from unpack_http_url() in pip 1.4.1
def best_filename(link, response):
"""Return the most informative possible filename for a download,
ideally with a proper extension.
"""
content_type = response.info().get('content-type', '')
filename = link.filename # fallback
# Have a look at the Content-Disposition header for a better guess:
content_disposition = response.info().get('content-disposition')
if content_disposition:
type, params = cgi.parse_header(content_disposition)
# We use ``or`` here because we don't want to use an "empty" value
# from the filename param:
filename = params.get('filename') or filename
ext = splitext(filename)[1]
if not ext:
ext = mimetypes.guess_extension(content_type)
if ext:
filename += ext
if not ext and link.url != response.geturl():
ext = splitext(response.geturl())[1]
if ext:
filename += ext
return filename
# Descended from _download_url() in pip 1.4.1
def pipe_to_file(response, path, size=0):
"""Pull the data off an HTTP response, shove it in a new file, and
show progress.
:arg response: A file-like object to read from
:arg path: The path of the new file
:arg size: The expected size, in bytes, of the download. 0 for
unknown or to suppress progress indication (as for cached
downloads)
"""
def response_chunks(chunk_size):
while True:
chunk = response.read(chunk_size)
if not chunk:
break
yield chunk
print('Downloading %s%s...' % (
self._req.req,
(' (%sK)' % (size / 1000)) if size > 1000 else ''))
progress_indicator = (DownloadProgressBar(max=size).iter if size
else DownloadProgressSpinner().iter)
with open(path, 'wb') as file:
for chunk in progress_indicator(response_chunks(4096), 4096):
file.write(chunk)
url = link.url.split('#', 1)[0]
try:
response = opener(urlparse(url).scheme != 'http').open(url)
except (HTTPError, IOError) as exc:
raise DownloadError(link, exc)
filename = best_filename(link, response)
try:
size = int(response.headers['content-length'])
except (ValueError, KeyError, TypeError):
size = 0
pipe_to_file(response, join(self._temp_path, filename), size=size)
return filename
# Based on req_set.prepare_files() in pip bb2a8428d4aebc8d313d05d590f386fa3f0bbd0f
@memoize # Avoid re-downloading.
def install(self):
"""Install the package I represent, without dependencies.
Obey typical pip-install options passed in on the command line.
"""
other_args = list(requirement_args(self._argv, want_other=True))
archive_path = join(self._temp_path, self._downloaded_filename())
# -U so it installs whether pip deems the requirement "satisfied" or
# not. This is necessary for GitHub-sourced zips, which change without
# their version numbers changing.
run_pip(['install'] + other_args + ['--no-deps', '-U', archive_path])
@memoize
def _actual_hash(self):
"""Download the package's archive if necessary, and return its hash."""
return hash_of_file(join(self._temp_path, self._downloaded_filename()))
def _project_name(self):
"""Return the inner Requirement's "unsafe name".
Raise ValueError if there is no name.
"""
name = getattr(self._req.req, 'project_name', '')
if name:
return name
name = getattr(self._req.req, 'name', '')
if name:
return safe_name(name)
raise ValueError('Requirement has no project_name.')
def _name(self):
return self._req.name
def _link(self):
try:
return self._req.link
except AttributeError:
# The link attribute isn't available prior to pip 6.1.0, so fall
# back to the now deprecated 'url' attribute.
return Link(self._req.url) if self._req.url else None
def _url(self):
link = self._link()
return link.url if link else None
@memoize # Avoid re-running expensive check_if_exists().
def _is_satisfied(self):
self._req.check_if_exists()
return (self._req.satisfied_by and
not self._is_always_unsatisfied())
def _class(self):
"""Return the class I should be, spanning a continuum of goodness."""
try:
self._project_name()
except ValueError:
return MalformedReq
if self._is_satisfied():
return SatisfiedReq
if not self._expected_hashes():
return MissingReq
if self._actual_hash() not in self._expected_hashes():
return MismatchedReq
return InstallableReq
@classmethod
def foot(cls):
"""Return the text to be printed once, after all of the errors from
classes of my type are printed.
"""
return ''
|
erikrose/peep | peep.py | DownloadedReq.install | python | def install(self):
other_args = list(requirement_args(self._argv, want_other=True))
archive_path = join(self._temp_path, self._downloaded_filename())
# -U so it installs whether pip deems the requirement "satisfied" or
# not. This is necessary for GitHub-sourced zips, which change without
# their version numbers changing.
run_pip(['install'] + other_args + ['--no-deps', '-U', archive_path]) | Install the package I represent, without dependencies.
Obey typical pip-install options passed in on the command line. | train | https://github.com/erikrose/peep/blob/c16f08c7f61e2f2afecb7cd1c93752bdd96c4968/peep.py#L641-L652 | [
"def run_pip(initial_args):\n \"\"\"Delegate to pip the given args (starting with the subcommand), and raise\n ``PipException`` if something goes wrong.\"\"\"\n status_code = pip.main(initial_args)\n\n # Clear out the registrations in the pip \"logger\" singleton. Otherwise,\n # loggers keep getting appended to it with every run. Pip assumes only one\n # command invocation will happen per interpreter lifetime.\n logger.consumers = []\n\n if status_code:\n raise PipException(status_code)\n",
"def requirement_args(argv, want_paths=False, want_other=False):\n \"\"\"Return an iterable of filtered arguments.\n\n :arg argv: Arguments, starting after the subcommand\n :arg want_paths: If True, the returned iterable includes the paths to any\n requirements files following a ``-r`` or ``--requirement`` option.\n :arg want_other: If True, the returned iterable includes the args that are\n not a requirement-file path or a ``-r`` or ``--requirement`` flag.\n\n \"\"\"\n was_r = False\n for arg in argv:\n # Allow for requirements files named \"-r\", don't freak out if there's a\n # trailing \"-r\", etc.\n if was_r:\n if want_paths:\n yield arg\n was_r = False\n elif arg in ['-r', '--requirement']:\n was_r = True\n else:\n if want_other:\n yield arg\n"
] | class DownloadedReq(object):
"""A wrapper around InstallRequirement which offers additional information
based on downloading and examining a corresponding package archive
These are conceptually immutable, so we can get away with memoizing
expensive things.
"""
def __init__(self, req, argv, finder):
"""Download a requirement, compare its hashes, and return a subclass
of DownloadedReq depending on its state.
:arg req: The InstallRequirement I am based on
:arg argv: The args, starting after the subcommand
"""
self._req = req
self._argv = argv
self._finder = finder
# We use a separate temp dir for each requirement so requirements
# (from different indices) that happen to have the same archive names
# don't overwrite each other, leading to a security hole in which the
# latter is a hash mismatch, the former has already passed the
# comparison, and the latter gets installed.
self._temp_path = mkdtemp(prefix='peep-')
# Think of DownloadedReq as a one-shot state machine. It's an abstract
# class that ratchets forward to being one of its own subclasses,
# depending on its package status. Then it doesn't move again.
self.__class__ = self._class()
def dispose(self):
"""Delete temp files and dirs I've made. Render myself useless.
Do not call further methods on me after calling dispose().
"""
rmtree(self._temp_path)
def _version(self):
"""Deduce the version number of the downloaded package from its filename."""
# TODO: Can we delete this method and just print the line from the
# reqs file verbatim instead?
def version_of_archive(filename, package_name):
# Since we know the project_name, we can strip that off the left, strip
# any archive extensions off the right, and take the rest as the
# version.
for ext in ARCHIVE_EXTENSIONS:
if filename.endswith(ext):
filename = filename[:-len(ext)]
break
# Handle github sha tarball downloads.
if is_git_sha(filename):
filename = package_name + '-' + filename
if not filename.lower().replace('_', '-').startswith(package_name.lower()):
# TODO: Should we replace runs of [^a-zA-Z0-9.], not just _, with -?
give_up(filename, package_name)
return filename[len(package_name) + 1:] # Strip off '-' before version.
def version_of_wheel(filename, package_name):
# For Wheel files (http://legacy.python.org/dev/peps/pep-0427/#file-
# name-convention) we know the format bits are '-' separated.
whl_package_name, version, _rest = filename.split('-', 2)
# Do the alteration to package_name from PEP 427:
our_package_name = re.sub(r'[^\w\d.]+', '_', package_name, re.UNICODE)
if whl_package_name != our_package_name:
give_up(filename, whl_package_name)
return version
def give_up(filename, package_name):
raise RuntimeError("The archive '%s' didn't start with the package name "
"'%s', so I couldn't figure out the version number. "
"My bad; improve me." %
(filename, package_name))
get_version = (version_of_wheel
if self._downloaded_filename().endswith('.whl')
else version_of_archive)
return get_version(self._downloaded_filename(), self._project_name())
def _is_always_unsatisfied(self):
"""Returns whether this requirement is always unsatisfied
This would happen in cases where we can't determine the version
from the filename.
"""
# If this is a github sha tarball, then it is always unsatisfied
# because the url has a commit sha in it and not the version
# number.
url = self._url()
if url:
filename = filename_from_url(url)
if filename.endswith(ARCHIVE_EXTENSIONS):
filename, ext = splitext(filename)
if is_git_sha(filename):
return True
return False
@memoize # Avoid hitting the file[cache] over and over.
def _expected_hashes(self):
"""Return a list of known-good hashes for this package."""
return hashes_above(*path_and_line(self._req))
def _download(self, link):
"""Download a file, and return its name within my temp dir.
This does no verification of HTTPS certs, but our checking hashes
makes that largely unimportant. It would be nice to be able to use the
requests lib, which can verify certs, but it is guaranteed to be
available only in pip >= 1.5.
This also drops support for proxies and basic auth, though those could
be added back in.
"""
# Based on pip 1.4.1's URLOpener but with cert verification removed
def opener(is_https):
if is_https:
opener = build_opener(HTTPSHandler())
# Strip out HTTPHandler to prevent MITM spoof:
for handler in opener.handlers:
if isinstance(handler, HTTPHandler):
opener.handlers.remove(handler)
else:
opener = build_opener()
return opener
# Descended from unpack_http_url() in pip 1.4.1
def best_filename(link, response):
"""Return the most informative possible filename for a download,
ideally with a proper extension.
"""
content_type = response.info().get('content-type', '')
filename = link.filename # fallback
# Have a look at the Content-Disposition header for a better guess:
content_disposition = response.info().get('content-disposition')
if content_disposition:
type, params = cgi.parse_header(content_disposition)
# We use ``or`` here because we don't want to use an "empty" value
# from the filename param:
filename = params.get('filename') or filename
ext = splitext(filename)[1]
if not ext:
ext = mimetypes.guess_extension(content_type)
if ext:
filename += ext
if not ext and link.url != response.geturl():
ext = splitext(response.geturl())[1]
if ext:
filename += ext
return filename
# Descended from _download_url() in pip 1.4.1
def pipe_to_file(response, path, size=0):
"""Pull the data off an HTTP response, shove it in a new file, and
show progress.
:arg response: A file-like object to read from
:arg path: The path of the new file
:arg size: The expected size, in bytes, of the download. 0 for
unknown or to suppress progress indication (as for cached
downloads)
"""
def response_chunks(chunk_size):
while True:
chunk = response.read(chunk_size)
if not chunk:
break
yield chunk
print('Downloading %s%s...' % (
self._req.req,
(' (%sK)' % (size / 1000)) if size > 1000 else ''))
progress_indicator = (DownloadProgressBar(max=size).iter if size
else DownloadProgressSpinner().iter)
with open(path, 'wb') as file:
for chunk in progress_indicator(response_chunks(4096), 4096):
file.write(chunk)
url = link.url.split('#', 1)[0]
try:
response = opener(urlparse(url).scheme != 'http').open(url)
except (HTTPError, IOError) as exc:
raise DownloadError(link, exc)
filename = best_filename(link, response)
try:
size = int(response.headers['content-length'])
except (ValueError, KeyError, TypeError):
size = 0
pipe_to_file(response, join(self._temp_path, filename), size=size)
return filename
# Based on req_set.prepare_files() in pip bb2a8428d4aebc8d313d05d590f386fa3f0bbd0f
@memoize # Avoid re-downloading.
def _downloaded_filename(self):
"""Download the package's archive if necessary, and return its
filename.
--no-deps is implied, as we have reimplemented the bits that would
ordinarily do dependency resolution.
"""
# Peep doesn't support requirements that don't come down as a single
# file, because it can't hash them. Thus, it doesn't support editable
# requirements, because pip itself doesn't support editable
# requirements except for "local projects or a VCS url". Nor does it
# support VCS requirements yet, because we haven't yet come up with a
# portable, deterministic way to hash them. In summary, all we support
# is == requirements and tarballs/zips/etc.
# TODO: Stop on reqs that are editable or aren't ==.
# If the requirement isn't already specified as a URL, get a URL
# from an index:
link = self._link() or self._finder.find_requirement(self._req, upgrade=False)
if link:
lower_scheme = link.scheme.lower() # pip lower()s it for some reason.
if lower_scheme == 'http' or lower_scheme == 'https':
file_path = self._download(link)
return basename(file_path)
elif lower_scheme == 'file':
# The following is inspired by pip's unpack_file_url():
link_path = url_to_path(link.url_without_fragment)
if isdir(link_path):
raise UnsupportedRequirementError(
"%s: %s is a directory. So that it can compute "
"a hash, peep supports only filesystem paths which "
"point to files" %
(self._req, link.url_without_fragment))
else:
copy(link_path, self._temp_path)
return basename(link_path)
else:
raise UnsupportedRequirementError(
"%s: The download link, %s, would not result in a file "
"that can be hashed. Peep supports only == requirements, "
"file:// URLs pointing to files (not folders), and "
"http:// and https:// URLs pointing to tarballs, zips, "
"etc." % (self._req, link.url))
else:
raise UnsupportedRequirementError(
"%s: couldn't determine where to download this requirement from."
% (self._req,))
@memoize
def _actual_hash(self):
"""Download the package's archive if necessary, and return its hash."""
return hash_of_file(join(self._temp_path, self._downloaded_filename()))
def _project_name(self):
"""Return the inner Requirement's "unsafe name".
Raise ValueError if there is no name.
"""
name = getattr(self._req.req, 'project_name', '')
if name:
return name
name = getattr(self._req.req, 'name', '')
if name:
return safe_name(name)
raise ValueError('Requirement has no project_name.')
def _name(self):
return self._req.name
def _link(self):
try:
return self._req.link
except AttributeError:
# The link attribute isn't available prior to pip 6.1.0, so fall
# back to the now deprecated 'url' attribute.
return Link(self._req.url) if self._req.url else None
def _url(self):
link = self._link()
return link.url if link else None
@memoize # Avoid re-running expensive check_if_exists().
def _is_satisfied(self):
self._req.check_if_exists()
return (self._req.satisfied_by and
not self._is_always_unsatisfied())
def _class(self):
"""Return the class I should be, spanning a continuum of goodness."""
try:
self._project_name()
except ValueError:
return MalformedReq
if self._is_satisfied():
return SatisfiedReq
if not self._expected_hashes():
return MissingReq
if self._actual_hash() not in self._expected_hashes():
return MismatchedReq
return InstallableReq
@classmethod
def foot(cls):
"""Return the text to be printed once, after all of the errors from
classes of my type are printed.
"""
return ''
|
erikrose/peep | peep.py | DownloadedReq._project_name | python | def _project_name(self):
name = getattr(self._req.req, 'project_name', '')
if name:
return name
name = getattr(self._req.req, 'name', '')
if name:
return safe_name(name)
raise ValueError('Requirement has no project_name.') | Return the inner Requirement's "unsafe name".
Raise ValueError if there is no name. | train | https://github.com/erikrose/peep/blob/c16f08c7f61e2f2afecb7cd1c93752bdd96c4968/peep.py#L659-L671 | null | class DownloadedReq(object):
"""A wrapper around InstallRequirement which offers additional information
based on downloading and examining a corresponding package archive
These are conceptually immutable, so we can get away with memoizing
expensive things.
"""
def __init__(self, req, argv, finder):
"""Download a requirement, compare its hashes, and return a subclass
of DownloadedReq depending on its state.
:arg req: The InstallRequirement I am based on
:arg argv: The args, starting after the subcommand
"""
self._req = req
self._argv = argv
self._finder = finder
# We use a separate temp dir for each requirement so requirements
# (from different indices) that happen to have the same archive names
# don't overwrite each other, leading to a security hole in which the
# latter is a hash mismatch, the former has already passed the
# comparison, and the latter gets installed.
self._temp_path = mkdtemp(prefix='peep-')
# Think of DownloadedReq as a one-shot state machine. It's an abstract
# class that ratchets forward to being one of its own subclasses,
# depending on its package status. Then it doesn't move again.
self.__class__ = self._class()
def dispose(self):
"""Delete temp files and dirs I've made. Render myself useless.
Do not call further methods on me after calling dispose().
"""
rmtree(self._temp_path)
def _version(self):
"""Deduce the version number of the downloaded package from its filename."""
# TODO: Can we delete this method and just print the line from the
# reqs file verbatim instead?
def version_of_archive(filename, package_name):
# Since we know the project_name, we can strip that off the left, strip
# any archive extensions off the right, and take the rest as the
# version.
for ext in ARCHIVE_EXTENSIONS:
if filename.endswith(ext):
filename = filename[:-len(ext)]
break
# Handle github sha tarball downloads.
if is_git_sha(filename):
filename = package_name + '-' + filename
if not filename.lower().replace('_', '-').startswith(package_name.lower()):
# TODO: Should we replace runs of [^a-zA-Z0-9.], not just _, with -?
give_up(filename, package_name)
return filename[len(package_name) + 1:] # Strip off '-' before version.
def version_of_wheel(filename, package_name):
# For Wheel files (http://legacy.python.org/dev/peps/pep-0427/#file-
# name-convention) we know the format bits are '-' separated.
whl_package_name, version, _rest = filename.split('-', 2)
# Do the alteration to package_name from PEP 427:
our_package_name = re.sub(r'[^\w\d.]+', '_', package_name, re.UNICODE)
if whl_package_name != our_package_name:
give_up(filename, whl_package_name)
return version
def give_up(filename, package_name):
raise RuntimeError("The archive '%s' didn't start with the package name "
"'%s', so I couldn't figure out the version number. "
"My bad; improve me." %
(filename, package_name))
get_version = (version_of_wheel
if self._downloaded_filename().endswith('.whl')
else version_of_archive)
return get_version(self._downloaded_filename(), self._project_name())
def _is_always_unsatisfied(self):
"""Returns whether this requirement is always unsatisfied
This would happen in cases where we can't determine the version
from the filename.
"""
# If this is a github sha tarball, then it is always unsatisfied
# because the url has a commit sha in it and not the version
# number.
url = self._url()
if url:
filename = filename_from_url(url)
if filename.endswith(ARCHIVE_EXTENSIONS):
filename, ext = splitext(filename)
if is_git_sha(filename):
return True
return False
@memoize # Avoid hitting the file[cache] over and over.
def _expected_hashes(self):
"""Return a list of known-good hashes for this package."""
return hashes_above(*path_and_line(self._req))
def _download(self, link):
"""Download a file, and return its name within my temp dir.
This does no verification of HTTPS certs, but our checking hashes
makes that largely unimportant. It would be nice to be able to use the
requests lib, which can verify certs, but it is guaranteed to be
available only in pip >= 1.5.
This also drops support for proxies and basic auth, though those could
be added back in.
"""
# Based on pip 1.4.1's URLOpener but with cert verification removed
def opener(is_https):
if is_https:
opener = build_opener(HTTPSHandler())
# Strip out HTTPHandler to prevent MITM spoof:
for handler in opener.handlers:
if isinstance(handler, HTTPHandler):
opener.handlers.remove(handler)
else:
opener = build_opener()
return opener
# Descended from unpack_http_url() in pip 1.4.1
def best_filename(link, response):
"""Return the most informative possible filename for a download,
ideally with a proper extension.
"""
content_type = response.info().get('content-type', '')
filename = link.filename # fallback
# Have a look at the Content-Disposition header for a better guess:
content_disposition = response.info().get('content-disposition')
if content_disposition:
type, params = cgi.parse_header(content_disposition)
# We use ``or`` here because we don't want to use an "empty" value
# from the filename param:
filename = params.get('filename') or filename
ext = splitext(filename)[1]
if not ext:
ext = mimetypes.guess_extension(content_type)
if ext:
filename += ext
if not ext and link.url != response.geturl():
ext = splitext(response.geturl())[1]
if ext:
filename += ext
return filename
# Descended from _download_url() in pip 1.4.1
def pipe_to_file(response, path, size=0):
"""Pull the data off an HTTP response, shove it in a new file, and
show progress.
:arg response: A file-like object to read from
:arg path: The path of the new file
:arg size: The expected size, in bytes, of the download. 0 for
unknown or to suppress progress indication (as for cached
downloads)
"""
def response_chunks(chunk_size):
while True:
chunk = response.read(chunk_size)
if not chunk:
break
yield chunk
print('Downloading %s%s...' % (
self._req.req,
(' (%sK)' % (size / 1000)) if size > 1000 else ''))
progress_indicator = (DownloadProgressBar(max=size).iter if size
else DownloadProgressSpinner().iter)
with open(path, 'wb') as file:
for chunk in progress_indicator(response_chunks(4096), 4096):
file.write(chunk)
url = link.url.split('#', 1)[0]
try:
response = opener(urlparse(url).scheme != 'http').open(url)
except (HTTPError, IOError) as exc:
raise DownloadError(link, exc)
filename = best_filename(link, response)
try:
size = int(response.headers['content-length'])
except (ValueError, KeyError, TypeError):
size = 0
pipe_to_file(response, join(self._temp_path, filename), size=size)
return filename
# Based on req_set.prepare_files() in pip bb2a8428d4aebc8d313d05d590f386fa3f0bbd0f
@memoize # Avoid re-downloading.
def _downloaded_filename(self):
"""Download the package's archive if necessary, and return its
filename.
--no-deps is implied, as we have reimplemented the bits that would
ordinarily do dependency resolution.
"""
# Peep doesn't support requirements that don't come down as a single
# file, because it can't hash them. Thus, it doesn't support editable
# requirements, because pip itself doesn't support editable
# requirements except for "local projects or a VCS url". Nor does it
# support VCS requirements yet, because we haven't yet come up with a
# portable, deterministic way to hash them. In summary, all we support
# is == requirements and tarballs/zips/etc.
# TODO: Stop on reqs that are editable or aren't ==.
# If the requirement isn't already specified as a URL, get a URL
# from an index:
link = self._link() or self._finder.find_requirement(self._req, upgrade=False)
if link:
lower_scheme = link.scheme.lower() # pip lower()s it for some reason.
if lower_scheme == 'http' or lower_scheme == 'https':
file_path = self._download(link)
return basename(file_path)
elif lower_scheme == 'file':
# The following is inspired by pip's unpack_file_url():
link_path = url_to_path(link.url_without_fragment)
if isdir(link_path):
raise UnsupportedRequirementError(
"%s: %s is a directory. So that it can compute "
"a hash, peep supports only filesystem paths which "
"point to files" %
(self._req, link.url_without_fragment))
else:
copy(link_path, self._temp_path)
return basename(link_path)
else:
raise UnsupportedRequirementError(
"%s: The download link, %s, would not result in a file "
"that can be hashed. Peep supports only == requirements, "
"file:// URLs pointing to files (not folders), and "
"http:// and https:// URLs pointing to tarballs, zips, "
"etc." % (self._req, link.url))
else:
raise UnsupportedRequirementError(
"%s: couldn't determine where to download this requirement from."
% (self._req,))
def install(self):
"""Install the package I represent, without dependencies.
Obey typical pip-install options passed in on the command line.
"""
other_args = list(requirement_args(self._argv, want_other=True))
archive_path = join(self._temp_path, self._downloaded_filename())
# -U so it installs whether pip deems the requirement "satisfied" or
# not. This is necessary for GitHub-sourced zips, which change without
# their version numbers changing.
run_pip(['install'] + other_args + ['--no-deps', '-U', archive_path])
@memoize
def _actual_hash(self):
"""Download the package's archive if necessary, and return its hash."""
return hash_of_file(join(self._temp_path, self._downloaded_filename()))
def _name(self):
return self._req.name
def _link(self):
try:
return self._req.link
except AttributeError:
# The link attribute isn't available prior to pip 6.1.0, so fall
# back to the now deprecated 'url' attribute.
return Link(self._req.url) if self._req.url else None
def _url(self):
link = self._link()
return link.url if link else None
@memoize # Avoid re-running expensive check_if_exists().
def _is_satisfied(self):
self._req.check_if_exists()
return (self._req.satisfied_by and
not self._is_always_unsatisfied())
def _class(self):
"""Return the class I should be, spanning a continuum of goodness."""
try:
self._project_name()
except ValueError:
return MalformedReq
if self._is_satisfied():
return SatisfiedReq
if not self._expected_hashes():
return MissingReq
if self._actual_hash() not in self._expected_hashes():
return MismatchedReq
return InstallableReq
@classmethod
def foot(cls):
"""Return the text to be printed once, after all of the errors from
classes of my type are printed.
"""
return ''
|
erikrose/peep | peep.py | DownloadedReq._class | python | def _class(self):
try:
self._project_name()
except ValueError:
return MalformedReq
if self._is_satisfied():
return SatisfiedReq
if not self._expected_hashes():
return MissingReq
if self._actual_hash() not in self._expected_hashes():
return MismatchedReq
return InstallableReq | Return the class I should be, spanning a continuum of goodness. | train | https://github.com/erikrose/peep/blob/c16f08c7f61e2f2afecb7cd1c93752bdd96c4968/peep.py#L694-L706 | [
"def _project_name(self):\n \"\"\"Return the inner Requirement's \"unsafe name\".\n\n Raise ValueError if there is no name.\n\n \"\"\"\n name = getattr(self._req.req, 'project_name', '')\n if name:\n return name\n name = getattr(self._req.req, 'name', '')\n if name:\n return safe_name(name)\n raise ValueError('Requirement has no project_name.')\n"
] | class DownloadedReq(object):
"""A wrapper around InstallRequirement which offers additional information
based on downloading and examining a corresponding package archive
These are conceptually immutable, so we can get away with memoizing
expensive things.
"""
def __init__(self, req, argv, finder):
"""Download a requirement, compare its hashes, and return a subclass
of DownloadedReq depending on its state.
:arg req: The InstallRequirement I am based on
:arg argv: The args, starting after the subcommand
"""
self._req = req
self._argv = argv
self._finder = finder
# We use a separate temp dir for each requirement so requirements
# (from different indices) that happen to have the same archive names
# don't overwrite each other, leading to a security hole in which the
# latter is a hash mismatch, the former has already passed the
# comparison, and the latter gets installed.
self._temp_path = mkdtemp(prefix='peep-')
# Think of DownloadedReq as a one-shot state machine. It's an abstract
# class that ratchets forward to being one of its own subclasses,
# depending on its package status. Then it doesn't move again.
self.__class__ = self._class()
def dispose(self):
"""Delete temp files and dirs I've made. Render myself useless.
Do not call further methods on me after calling dispose().
"""
rmtree(self._temp_path)
def _version(self):
"""Deduce the version number of the downloaded package from its filename."""
# TODO: Can we delete this method and just print the line from the
# reqs file verbatim instead?
def version_of_archive(filename, package_name):
# Since we know the project_name, we can strip that off the left, strip
# any archive extensions off the right, and take the rest as the
# version.
for ext in ARCHIVE_EXTENSIONS:
if filename.endswith(ext):
filename = filename[:-len(ext)]
break
# Handle github sha tarball downloads.
if is_git_sha(filename):
filename = package_name + '-' + filename
if not filename.lower().replace('_', '-').startswith(package_name.lower()):
# TODO: Should we replace runs of [^a-zA-Z0-9.], not just _, with -?
give_up(filename, package_name)
return filename[len(package_name) + 1:] # Strip off '-' before version.
def version_of_wheel(filename, package_name):
# For Wheel files (http://legacy.python.org/dev/peps/pep-0427/#file-
# name-convention) we know the format bits are '-' separated.
whl_package_name, version, _rest = filename.split('-', 2)
# Do the alteration to package_name from PEP 427:
our_package_name = re.sub(r'[^\w\d.]+', '_', package_name, re.UNICODE)
if whl_package_name != our_package_name:
give_up(filename, whl_package_name)
return version
def give_up(filename, package_name):
raise RuntimeError("The archive '%s' didn't start with the package name "
"'%s', so I couldn't figure out the version number. "
"My bad; improve me." %
(filename, package_name))
get_version = (version_of_wheel
if self._downloaded_filename().endswith('.whl')
else version_of_archive)
return get_version(self._downloaded_filename(), self._project_name())
def _is_always_unsatisfied(self):
"""Returns whether this requirement is always unsatisfied
This would happen in cases where we can't determine the version
from the filename.
"""
# If this is a github sha tarball, then it is always unsatisfied
# because the url has a commit sha in it and not the version
# number.
url = self._url()
if url:
filename = filename_from_url(url)
if filename.endswith(ARCHIVE_EXTENSIONS):
filename, ext = splitext(filename)
if is_git_sha(filename):
return True
return False
@memoize # Avoid hitting the file[cache] over and over.
def _expected_hashes(self):
"""Return a list of known-good hashes for this package."""
return hashes_above(*path_and_line(self._req))
def _download(self, link):
"""Download a file, and return its name within my temp dir.
This does no verification of HTTPS certs, but our checking hashes
makes that largely unimportant. It would be nice to be able to use the
requests lib, which can verify certs, but it is guaranteed to be
available only in pip >= 1.5.
This also drops support for proxies and basic auth, though those could
be added back in.
"""
# Based on pip 1.4.1's URLOpener but with cert verification removed
def opener(is_https):
if is_https:
opener = build_opener(HTTPSHandler())
# Strip out HTTPHandler to prevent MITM spoof:
for handler in opener.handlers:
if isinstance(handler, HTTPHandler):
opener.handlers.remove(handler)
else:
opener = build_opener()
return opener
# Descended from unpack_http_url() in pip 1.4.1
def best_filename(link, response):
"""Return the most informative possible filename for a download,
ideally with a proper extension.
"""
content_type = response.info().get('content-type', '')
filename = link.filename # fallback
# Have a look at the Content-Disposition header for a better guess:
content_disposition = response.info().get('content-disposition')
if content_disposition:
type, params = cgi.parse_header(content_disposition)
# We use ``or`` here because we don't want to use an "empty" value
# from the filename param:
filename = params.get('filename') or filename
ext = splitext(filename)[1]
if not ext:
ext = mimetypes.guess_extension(content_type)
if ext:
filename += ext
if not ext and link.url != response.geturl():
ext = splitext(response.geturl())[1]
if ext:
filename += ext
return filename
# Descended from _download_url() in pip 1.4.1
def pipe_to_file(response, path, size=0):
"""Pull the data off an HTTP response, shove it in a new file, and
show progress.
:arg response: A file-like object to read from
:arg path: The path of the new file
:arg size: The expected size, in bytes, of the download. 0 for
unknown or to suppress progress indication (as for cached
downloads)
"""
def response_chunks(chunk_size):
while True:
chunk = response.read(chunk_size)
if not chunk:
break
yield chunk
print('Downloading %s%s...' % (
self._req.req,
(' (%sK)' % (size / 1000)) if size > 1000 else ''))
progress_indicator = (DownloadProgressBar(max=size).iter if size
else DownloadProgressSpinner().iter)
with open(path, 'wb') as file:
for chunk in progress_indicator(response_chunks(4096), 4096):
file.write(chunk)
url = link.url.split('#', 1)[0]
try:
response = opener(urlparse(url).scheme != 'http').open(url)
except (HTTPError, IOError) as exc:
raise DownloadError(link, exc)
filename = best_filename(link, response)
try:
size = int(response.headers['content-length'])
except (ValueError, KeyError, TypeError):
size = 0
pipe_to_file(response, join(self._temp_path, filename), size=size)
return filename
# Based on req_set.prepare_files() in pip bb2a8428d4aebc8d313d05d590f386fa3f0bbd0f
@memoize # Avoid re-downloading.
def _downloaded_filename(self):
"""Download the package's archive if necessary, and return its
filename.
--no-deps is implied, as we have reimplemented the bits that would
ordinarily do dependency resolution.
"""
# Peep doesn't support requirements that don't come down as a single
# file, because it can't hash them. Thus, it doesn't support editable
# requirements, because pip itself doesn't support editable
# requirements except for "local projects or a VCS url". Nor does it
# support VCS requirements yet, because we haven't yet come up with a
# portable, deterministic way to hash them. In summary, all we support
# is == requirements and tarballs/zips/etc.
# TODO: Stop on reqs that are editable or aren't ==.
# If the requirement isn't already specified as a URL, get a URL
# from an index:
link = self._link() or self._finder.find_requirement(self._req, upgrade=False)
if link:
lower_scheme = link.scheme.lower() # pip lower()s it for some reason.
if lower_scheme == 'http' or lower_scheme == 'https':
file_path = self._download(link)
return basename(file_path)
elif lower_scheme == 'file':
# The following is inspired by pip's unpack_file_url():
link_path = url_to_path(link.url_without_fragment)
if isdir(link_path):
raise UnsupportedRequirementError(
"%s: %s is a directory. So that it can compute "
"a hash, peep supports only filesystem paths which "
"point to files" %
(self._req, link.url_without_fragment))
else:
copy(link_path, self._temp_path)
return basename(link_path)
else:
raise UnsupportedRequirementError(
"%s: The download link, %s, would not result in a file "
"that can be hashed. Peep supports only == requirements, "
"file:// URLs pointing to files (not folders), and "
"http:// and https:// URLs pointing to tarballs, zips, "
"etc." % (self._req, link.url))
else:
raise UnsupportedRequirementError(
"%s: couldn't determine where to download this requirement from."
% (self._req,))
def install(self):
"""Install the package I represent, without dependencies.
Obey typical pip-install options passed in on the command line.
"""
other_args = list(requirement_args(self._argv, want_other=True))
archive_path = join(self._temp_path, self._downloaded_filename())
# -U so it installs whether pip deems the requirement "satisfied" or
# not. This is necessary for GitHub-sourced zips, which change without
# their version numbers changing.
run_pip(['install'] + other_args + ['--no-deps', '-U', archive_path])
@memoize
def _actual_hash(self):
"""Download the package's archive if necessary, and return its hash."""
return hash_of_file(join(self._temp_path, self._downloaded_filename()))
def _project_name(self):
"""Return the inner Requirement's "unsafe name".
Raise ValueError if there is no name.
"""
name = getattr(self._req.req, 'project_name', '')
if name:
return name
name = getattr(self._req.req, 'name', '')
if name:
return safe_name(name)
raise ValueError('Requirement has no project_name.')
def _name(self):
return self._req.name
def _link(self):
try:
return self._req.link
except AttributeError:
# The link attribute isn't available prior to pip 6.1.0, so fall
# back to the now deprecated 'url' attribute.
return Link(self._req.url) if self._req.url else None
def _url(self):
link = self._link()
return link.url if link else None
@memoize # Avoid re-running expensive check_if_exists().
def _is_satisfied(self):
self._req.check_if_exists()
return (self._req.satisfied_by and
not self._is_always_unsatisfied())
@classmethod
def foot(cls):
"""Return the text to be printed once, after all of the errors from
classes of my type are printed.
"""
return ''
|
DerMitch/fritzbox-smarthome | fritzhome/fritz.py | FritzBox.login | python | def login(self):
response = self.session.get(self.base_url + '/login_sid.lua', timeout=10)
xml = ET.fromstring(response.text)
if xml.find('SID').text == "0000000000000000":
challenge = xml.find('Challenge').text
url = self.base_url + "/login_sid.lua"
response = self.session.get(url, params={
"username": self.username,
"response": self.calculate_response(challenge, self.password),
}, timeout=10)
xml = ET.fromstring(response.text)
sid = xml.find('SID').text
if xml.find('SID').text == "0000000000000000":
blocktime = int(xml.find('BlockTime').text)
exc = Exception("Login failed, please wait {} seconds".format(
blocktime
))
exc.blocktime = blocktime
raise exc
self.sid = sid
return sid | Try to login and set the internal session id.
Please note:
- Any failed login resets all existing session ids, even of
other users.
- SIDs expire after some time | train | https://github.com/DerMitch/fritzbox-smarthome/blob/84cbd7c1b33e6256add041b0395ff5fccc01f103/fritzhome/fritz.py#L53-L81 | [
"def calculate_response(self, challenge, password):\n \"\"\"Calculate response for the challenge-response authentication\"\"\"\n to_hash = (challenge + \"-\" + password).encode(\"UTF-16LE\")\n hashed = hashlib.md5(to_hash).hexdigest()\n return \"{0}-{1}\".format(challenge, hashed)\n"
] | class FritzBox(object):
"""
Provides easy access to a FritzBOX's SmartHome functions,
which are poorly documented by AVM...
A note about SIDs:
They expire after some time. If you have a long-running daemon,
you should call login() every 10 minutes or so else you'll get
nice 403 errors.
"""
def __init__(self, ip, username, password, use_tls=False):
if use_tls:
self.base_url = 'https://' + ip
else:
self.base_url = 'http://' + ip
self.username = username
self.password = password
self.sid = None
self.session = Session()
def calculate_response(self, challenge, password):
"""Calculate response for the challenge-response authentication"""
to_hash = (challenge + "-" + password).encode("UTF-16LE")
hashed = hashlib.md5(to_hash).hexdigest()
return "{0}-{1}".format(challenge, hashed)
#
# Useful public methods
#
def get_actors(self):
"""
Returns a list of Actor objects for querying SmartHome devices.
This is currently the only working method for getting temperature data.
"""
devices = self.homeautoswitch("getdevicelistinfos")
xml = ET.fromstring(devices)
actors = []
for device in xml.findall('device'):
actors.append(Actor(fritzbox=self, device=device))
return actors
def get_actor_by_ain(self, ain):
"""
Return a actor identified by it's ain or return None
"""
for actor in self.get_actors():
if actor.actor_id == ain:
return actor
#
# "Private" methods
#
def homeautoswitch(self, cmd, ain=None, param=None):
"""
Call a switch method.
Should only be used by internal library functions.
"""
assert self.sid, "Not logged in"
params = {
'switchcmd': cmd,
'sid': self.sid,
}
if param is not None:
params['param'] = param
if ain:
params['ain'] = ain
url = self.base_url + '/webservices/homeautoswitch.lua'
response = self.session.get(url, params=params, timeout=10)
response.raise_for_status()
return response.text.strip().encode('utf-8')
def get_switch_actors(self):
"""
Get information about all actors
This needs 1+(5n) requests where n = number of actors registered
Deprecated, use get_actors instead.
Returns a dict:
[ain] = {
'name': Name of actor,
'state': Powerstate (boolean)
'present': Connected to server? (boolean)
'power': Current power consumption in mW
'energy': Used energy in Wh since last energy reset
'temperature': Current environment temperature in celsius
}
"""
actors = {}
for ain in self.homeautoswitch("getswitchlist").split(','):
actors[ain] = {
'name': self.homeautoswitch("getswitchname", ain),
'state': bool(self.homeautoswitch("getswitchstate", ain)),
'present': bool(self.homeautoswitch("getswitchpresent", ain)),
'power': self.homeautoswitch("getswitchpower", ain),
'energy': self.homeautoswitch("getswitchenergy", ain),
'temperature': self.homeautoswitch("getswitchtemperature", ain),
}
return actors
def set_switch_on(self, ain):
"""Switch the power of a actor ON"""
return self.homeautoswitch('setswitchon', ain)
def set_switch_off(self, ain):
"""Switch the power of a actor OFF"""
return self.homeautoswitch('setswitchoff', ain)
def set_switch_toggle(self, ain):
"""Toggle a power switch and return the new state"""
return self.homeautoswitch('setswitchtoggle', ain)
#
# DeviceID based methods
#
# Inspired by:
# https://github.com/valpo/fritzbox/blob/master/fritzbox/fritzautohome.py
#
def get_devices(self):
"""
Return a list of devices.
Deprecated, use get_actors instead.
"""
url = self.base_url + '/net/home_auto_query.lua'
response = self.session.get(url, params={
'sid': self.sid,
'command': 'AllOutletStates',
'xhr': 0,
}, timeout=15)
response.raise_for_status()
data = response.json()
count = int(data["Outlet_count"])
devices = []
for i in range(1, count + 1):
device = Device(
int(data["DeviceID_{0}".format(i)]),
int(data["DeviceConnectState_{0}".format(i)]),
int(data["DeviceSwitchState_{0}".format(i)])
)
devices.append(device)
return devices
def get_consumption(self, deviceid, timerange="10"):
"""
Return all available energy consumption data for the device.
You need to divice watt_values by 100 and volt_values by 1000
to get the "real" values.
:return: dict
"""
tranges = ("10", "24h", "month", "year")
if timerange not in tranges:
raise ValueError(
"Unknown timerange. Possible values are: {0}".format(tranges)
)
url = self.base_url + "/net/home_auto_query.lua"
response = self.session.get(url, params={
'sid': self.sid,
'command': 'EnergyStats_{0}'.format(timerange),
'id': deviceid,
'xhr': 0,
}, timeout=15)
response.raise_for_status()
data = response.json()
result = {}
# Single result values
values_map = {
'MM_Value_Amp': 'mm_value_amp',
'MM_Value_Power': 'mm_value_power',
'MM_Value_Volt': 'mm_value_volt',
'EnStats_average_value': 'enstats_average_value',
'EnStats_max_value': 'enstats_max_value',
'EnStats_min_value': 'enstats_min_value',
'EnStats_timer_type': 'enstats_timer_type',
'sum_Day': 'sum_day',
'sum_Month': 'sum_month',
'sum_Year': 'sum_year',
}
for avm_key, py_key in values_map.items():
result[py_key] = int(data[avm_key])
# Stats counts
count = int(data["EnStats_count"])
watt_values = [None for i in range(count)]
volt_values = [None for i in range(count)]
for i in range(1, count + 1):
watt_values[i - 1] = int(data["EnStats_watt_value_{}".format(i)])
volt_values[i - 1] = int(data["EnStats_volt_value_{}".format(i)])
result['watt_values'] = watt_values
result['volt_values'] = volt_values
return result
def get_logs(self):
"""
Return the system logs since the last reboot.
"""
assert BeautifulSoup, "Please install bs4 to use this method"
url = self.base_url + "/system/syslog.lua"
response = self.session.get(url, params={
'sid': self.sid,
'stylemode': 'print',
}, timeout=15)
response.raise_for_status()
entries = []
tree = BeautifulSoup(response.text)
rows = tree.find('table').find_all('tr')
for row in rows:
columns = row.find_all("td")
date = columns[0].string
time = columns[1].string
message = columns[2].find("a").string
merged = "{} {} {}".format(date, time, message.encode("UTF-8"))
msg_hash = hashlib.md5(merged).hexdigest()
entries.append(LogEntry(date, time, message, msg_hash))
return entries
|
DerMitch/fritzbox-smarthome | fritzhome/fritz.py | FritzBox.calculate_response | python | def calculate_response(self, challenge, password):
to_hash = (challenge + "-" + password).encode("UTF-16LE")
hashed = hashlib.md5(to_hash).hexdigest()
return "{0}-{1}".format(challenge, hashed) | Calculate response for the challenge-response authentication | train | https://github.com/DerMitch/fritzbox-smarthome/blob/84cbd7c1b33e6256add041b0395ff5fccc01f103/fritzhome/fritz.py#L83-L87 | null | class FritzBox(object):
"""
Provides easy access to a FritzBOX's SmartHome functions,
which are poorly documented by AVM...
A note about SIDs:
They expire after some time. If you have a long-running daemon,
you should call login() every 10 minutes or so else you'll get
nice 403 errors.
"""
def __init__(self, ip, username, password, use_tls=False):
if use_tls:
self.base_url = 'https://' + ip
else:
self.base_url = 'http://' + ip
self.username = username
self.password = password
self.sid = None
self.session = Session()
def login(self):
"""
Try to login and set the internal session id.
Please note:
- Any failed login resets all existing session ids, even of
other users.
- SIDs expire after some time
"""
response = self.session.get(self.base_url + '/login_sid.lua', timeout=10)
xml = ET.fromstring(response.text)
if xml.find('SID').text == "0000000000000000":
challenge = xml.find('Challenge').text
url = self.base_url + "/login_sid.lua"
response = self.session.get(url, params={
"username": self.username,
"response": self.calculate_response(challenge, self.password),
}, timeout=10)
xml = ET.fromstring(response.text)
sid = xml.find('SID').text
if xml.find('SID').text == "0000000000000000":
blocktime = int(xml.find('BlockTime').text)
exc = Exception("Login failed, please wait {} seconds".format(
blocktime
))
exc.blocktime = blocktime
raise exc
self.sid = sid
return sid
#
# Useful public methods
#
def get_actors(self):
"""
Returns a list of Actor objects for querying SmartHome devices.
This is currently the only working method for getting temperature data.
"""
devices = self.homeautoswitch("getdevicelistinfos")
xml = ET.fromstring(devices)
actors = []
for device in xml.findall('device'):
actors.append(Actor(fritzbox=self, device=device))
return actors
def get_actor_by_ain(self, ain):
"""
Return a actor identified by it's ain or return None
"""
for actor in self.get_actors():
if actor.actor_id == ain:
return actor
#
# "Private" methods
#
def homeautoswitch(self, cmd, ain=None, param=None):
"""
Call a switch method.
Should only be used by internal library functions.
"""
assert self.sid, "Not logged in"
params = {
'switchcmd': cmd,
'sid': self.sid,
}
if param is not None:
params['param'] = param
if ain:
params['ain'] = ain
url = self.base_url + '/webservices/homeautoswitch.lua'
response = self.session.get(url, params=params, timeout=10)
response.raise_for_status()
return response.text.strip().encode('utf-8')
def get_switch_actors(self):
"""
Get information about all actors
This needs 1+(5n) requests where n = number of actors registered
Deprecated, use get_actors instead.
Returns a dict:
[ain] = {
'name': Name of actor,
'state': Powerstate (boolean)
'present': Connected to server? (boolean)
'power': Current power consumption in mW
'energy': Used energy in Wh since last energy reset
'temperature': Current environment temperature in celsius
}
"""
actors = {}
for ain in self.homeautoswitch("getswitchlist").split(','):
actors[ain] = {
'name': self.homeautoswitch("getswitchname", ain),
'state': bool(self.homeautoswitch("getswitchstate", ain)),
'present': bool(self.homeautoswitch("getswitchpresent", ain)),
'power': self.homeautoswitch("getswitchpower", ain),
'energy': self.homeautoswitch("getswitchenergy", ain),
'temperature': self.homeautoswitch("getswitchtemperature", ain),
}
return actors
def set_switch_on(self, ain):
"""Switch the power of a actor ON"""
return self.homeautoswitch('setswitchon', ain)
def set_switch_off(self, ain):
"""Switch the power of a actor OFF"""
return self.homeautoswitch('setswitchoff', ain)
def set_switch_toggle(self, ain):
"""Toggle a power switch and return the new state"""
return self.homeautoswitch('setswitchtoggle', ain)
#
# DeviceID based methods
#
# Inspired by:
# https://github.com/valpo/fritzbox/blob/master/fritzbox/fritzautohome.py
#
def get_devices(self):
"""
Return a list of devices.
Deprecated, use get_actors instead.
"""
url = self.base_url + '/net/home_auto_query.lua'
response = self.session.get(url, params={
'sid': self.sid,
'command': 'AllOutletStates',
'xhr': 0,
}, timeout=15)
response.raise_for_status()
data = response.json()
count = int(data["Outlet_count"])
devices = []
for i in range(1, count + 1):
device = Device(
int(data["DeviceID_{0}".format(i)]),
int(data["DeviceConnectState_{0}".format(i)]),
int(data["DeviceSwitchState_{0}".format(i)])
)
devices.append(device)
return devices
def get_consumption(self, deviceid, timerange="10"):
"""
Return all available energy consumption data for the device.
You need to divice watt_values by 100 and volt_values by 1000
to get the "real" values.
:return: dict
"""
tranges = ("10", "24h", "month", "year")
if timerange not in tranges:
raise ValueError(
"Unknown timerange. Possible values are: {0}".format(tranges)
)
url = self.base_url + "/net/home_auto_query.lua"
response = self.session.get(url, params={
'sid': self.sid,
'command': 'EnergyStats_{0}'.format(timerange),
'id': deviceid,
'xhr': 0,
}, timeout=15)
response.raise_for_status()
data = response.json()
result = {}
# Single result values
values_map = {
'MM_Value_Amp': 'mm_value_amp',
'MM_Value_Power': 'mm_value_power',
'MM_Value_Volt': 'mm_value_volt',
'EnStats_average_value': 'enstats_average_value',
'EnStats_max_value': 'enstats_max_value',
'EnStats_min_value': 'enstats_min_value',
'EnStats_timer_type': 'enstats_timer_type',
'sum_Day': 'sum_day',
'sum_Month': 'sum_month',
'sum_Year': 'sum_year',
}
for avm_key, py_key in values_map.items():
result[py_key] = int(data[avm_key])
# Stats counts
count = int(data["EnStats_count"])
watt_values = [None for i in range(count)]
volt_values = [None for i in range(count)]
for i in range(1, count + 1):
watt_values[i - 1] = int(data["EnStats_watt_value_{}".format(i)])
volt_values[i - 1] = int(data["EnStats_volt_value_{}".format(i)])
result['watt_values'] = watt_values
result['volt_values'] = volt_values
return result
def get_logs(self):
"""
Return the system logs since the last reboot.
"""
assert BeautifulSoup, "Please install bs4 to use this method"
url = self.base_url + "/system/syslog.lua"
response = self.session.get(url, params={
'sid': self.sid,
'stylemode': 'print',
}, timeout=15)
response.raise_for_status()
entries = []
tree = BeautifulSoup(response.text)
rows = tree.find('table').find_all('tr')
for row in rows:
columns = row.find_all("td")
date = columns[0].string
time = columns[1].string
message = columns[2].find("a").string
merged = "{} {} {}".format(date, time, message.encode("UTF-8"))
msg_hash = hashlib.md5(merged).hexdigest()
entries.append(LogEntry(date, time, message, msg_hash))
return entries
|
DerMitch/fritzbox-smarthome | fritzhome/fritz.py | FritzBox.get_actors | python | def get_actors(self):
devices = self.homeautoswitch("getdevicelistinfos")
xml = ET.fromstring(devices)
actors = []
for device in xml.findall('device'):
actors.append(Actor(fritzbox=self, device=device))
return actors | Returns a list of Actor objects for querying SmartHome devices.
This is currently the only working method for getting temperature data. | train | https://github.com/DerMitch/fritzbox-smarthome/blob/84cbd7c1b33e6256add041b0395ff5fccc01f103/fritzhome/fritz.py#L93-L106 | [
"def homeautoswitch(self, cmd, ain=None, param=None):\n \"\"\"\n Call a switch method.\n Should only be used by internal library functions.\n \"\"\"\n assert self.sid, \"Not logged in\"\n params = {\n 'switchcmd': cmd,\n 'sid': self.sid,\n }\n if param is not None:\n params['param'] = param\n if ain:\n params['ain'] = ain\n\n url = self.base_url + '/webservices/homeautoswitch.lua'\n response = self.session.get(url, params=params, timeout=10)\n response.raise_for_status()\n return response.text.strip().encode('utf-8')\n"
] | class FritzBox(object):
"""
Provides easy access to a FritzBOX's SmartHome functions,
which are poorly documented by AVM...
A note about SIDs:
They expire after some time. If you have a long-running daemon,
you should call login() every 10 minutes or so else you'll get
nice 403 errors.
"""
def __init__(self, ip, username, password, use_tls=False):
if use_tls:
self.base_url = 'https://' + ip
else:
self.base_url = 'http://' + ip
self.username = username
self.password = password
self.sid = None
self.session = Session()
def login(self):
"""
Try to login and set the internal session id.
Please note:
- Any failed login resets all existing session ids, even of
other users.
- SIDs expire after some time
"""
response = self.session.get(self.base_url + '/login_sid.lua', timeout=10)
xml = ET.fromstring(response.text)
if xml.find('SID').text == "0000000000000000":
challenge = xml.find('Challenge').text
url = self.base_url + "/login_sid.lua"
response = self.session.get(url, params={
"username": self.username,
"response": self.calculate_response(challenge, self.password),
}, timeout=10)
xml = ET.fromstring(response.text)
sid = xml.find('SID').text
if xml.find('SID').text == "0000000000000000":
blocktime = int(xml.find('BlockTime').text)
exc = Exception("Login failed, please wait {} seconds".format(
blocktime
))
exc.blocktime = blocktime
raise exc
self.sid = sid
return sid
def calculate_response(self, challenge, password):
"""Calculate response for the challenge-response authentication"""
to_hash = (challenge + "-" + password).encode("UTF-16LE")
hashed = hashlib.md5(to_hash).hexdigest()
return "{0}-{1}".format(challenge, hashed)
#
# Useful public methods
#
def get_actor_by_ain(self, ain):
"""
Return a actor identified by it's ain or return None
"""
for actor in self.get_actors():
if actor.actor_id == ain:
return actor
#
# "Private" methods
#
def homeautoswitch(self, cmd, ain=None, param=None):
"""
Call a switch method.
Should only be used by internal library functions.
"""
assert self.sid, "Not logged in"
params = {
'switchcmd': cmd,
'sid': self.sid,
}
if param is not None:
params['param'] = param
if ain:
params['ain'] = ain
url = self.base_url + '/webservices/homeautoswitch.lua'
response = self.session.get(url, params=params, timeout=10)
response.raise_for_status()
return response.text.strip().encode('utf-8')
def get_switch_actors(self):
"""
Get information about all actors
This needs 1+(5n) requests where n = number of actors registered
Deprecated, use get_actors instead.
Returns a dict:
[ain] = {
'name': Name of actor,
'state': Powerstate (boolean)
'present': Connected to server? (boolean)
'power': Current power consumption in mW
'energy': Used energy in Wh since last energy reset
'temperature': Current environment temperature in celsius
}
"""
actors = {}
for ain in self.homeautoswitch("getswitchlist").split(','):
actors[ain] = {
'name': self.homeautoswitch("getswitchname", ain),
'state': bool(self.homeautoswitch("getswitchstate", ain)),
'present': bool(self.homeautoswitch("getswitchpresent", ain)),
'power': self.homeautoswitch("getswitchpower", ain),
'energy': self.homeautoswitch("getswitchenergy", ain),
'temperature': self.homeautoswitch("getswitchtemperature", ain),
}
return actors
def set_switch_on(self, ain):
"""Switch the power of a actor ON"""
return self.homeautoswitch('setswitchon', ain)
def set_switch_off(self, ain):
"""Switch the power of a actor OFF"""
return self.homeautoswitch('setswitchoff', ain)
def set_switch_toggle(self, ain):
"""Toggle a power switch and return the new state"""
return self.homeautoswitch('setswitchtoggle', ain)
#
# DeviceID based methods
#
# Inspired by:
# https://github.com/valpo/fritzbox/blob/master/fritzbox/fritzautohome.py
#
def get_devices(self):
"""
Return a list of devices.
Deprecated, use get_actors instead.
"""
url = self.base_url + '/net/home_auto_query.lua'
response = self.session.get(url, params={
'sid': self.sid,
'command': 'AllOutletStates',
'xhr': 0,
}, timeout=15)
response.raise_for_status()
data = response.json()
count = int(data["Outlet_count"])
devices = []
for i in range(1, count + 1):
device = Device(
int(data["DeviceID_{0}".format(i)]),
int(data["DeviceConnectState_{0}".format(i)]),
int(data["DeviceSwitchState_{0}".format(i)])
)
devices.append(device)
return devices
def get_consumption(self, deviceid, timerange="10"):
"""
Return all available energy consumption data for the device.
You need to divice watt_values by 100 and volt_values by 1000
to get the "real" values.
:return: dict
"""
tranges = ("10", "24h", "month", "year")
if timerange not in tranges:
raise ValueError(
"Unknown timerange. Possible values are: {0}".format(tranges)
)
url = self.base_url + "/net/home_auto_query.lua"
response = self.session.get(url, params={
'sid': self.sid,
'command': 'EnergyStats_{0}'.format(timerange),
'id': deviceid,
'xhr': 0,
}, timeout=15)
response.raise_for_status()
data = response.json()
result = {}
# Single result values
values_map = {
'MM_Value_Amp': 'mm_value_amp',
'MM_Value_Power': 'mm_value_power',
'MM_Value_Volt': 'mm_value_volt',
'EnStats_average_value': 'enstats_average_value',
'EnStats_max_value': 'enstats_max_value',
'EnStats_min_value': 'enstats_min_value',
'EnStats_timer_type': 'enstats_timer_type',
'sum_Day': 'sum_day',
'sum_Month': 'sum_month',
'sum_Year': 'sum_year',
}
for avm_key, py_key in values_map.items():
result[py_key] = int(data[avm_key])
# Stats counts
count = int(data["EnStats_count"])
watt_values = [None for i in range(count)]
volt_values = [None for i in range(count)]
for i in range(1, count + 1):
watt_values[i - 1] = int(data["EnStats_watt_value_{}".format(i)])
volt_values[i - 1] = int(data["EnStats_volt_value_{}".format(i)])
result['watt_values'] = watt_values
result['volt_values'] = volt_values
return result
def get_logs(self):
"""
Return the system logs since the last reboot.
"""
assert BeautifulSoup, "Please install bs4 to use this method"
url = self.base_url + "/system/syslog.lua"
response = self.session.get(url, params={
'sid': self.sid,
'stylemode': 'print',
}, timeout=15)
response.raise_for_status()
entries = []
tree = BeautifulSoup(response.text)
rows = tree.find('table').find_all('tr')
for row in rows:
columns = row.find_all("td")
date = columns[0].string
time = columns[1].string
message = columns[2].find("a").string
merged = "{} {} {}".format(date, time, message.encode("UTF-8"))
msg_hash = hashlib.md5(merged).hexdigest()
entries.append(LogEntry(date, time, message, msg_hash))
return entries
|
DerMitch/fritzbox-smarthome | fritzhome/fritz.py | FritzBox.get_actor_by_ain | python | def get_actor_by_ain(self, ain):
for actor in self.get_actors():
if actor.actor_id == ain:
return actor | Return a actor identified by it's ain or return None | train | https://github.com/DerMitch/fritzbox-smarthome/blob/84cbd7c1b33e6256add041b0395ff5fccc01f103/fritzhome/fritz.py#L108-L114 | [
"def get_actors(self):\n \"\"\"\n Returns a list of Actor objects for querying SmartHome devices.\n\n This is currently the only working method for getting temperature data.\n \"\"\"\n devices = self.homeautoswitch(\"getdevicelistinfos\")\n xml = ET.fromstring(devices)\n\n actors = []\n for device in xml.findall('device'):\n actors.append(Actor(fritzbox=self, device=device))\n\n return actors\n"
] | class FritzBox(object):
"""
Provides easy access to a FritzBOX's SmartHome functions,
which are poorly documented by AVM...
A note about SIDs:
They expire after some time. If you have a long-running daemon,
you should call login() every 10 minutes or so else you'll get
nice 403 errors.
"""
def __init__(self, ip, username, password, use_tls=False):
if use_tls:
self.base_url = 'https://' + ip
else:
self.base_url = 'http://' + ip
self.username = username
self.password = password
self.sid = None
self.session = Session()
def login(self):
"""
Try to login and set the internal session id.
Please note:
- Any failed login resets all existing session ids, even of
other users.
- SIDs expire after some time
"""
response = self.session.get(self.base_url + '/login_sid.lua', timeout=10)
xml = ET.fromstring(response.text)
if xml.find('SID').text == "0000000000000000":
challenge = xml.find('Challenge').text
url = self.base_url + "/login_sid.lua"
response = self.session.get(url, params={
"username": self.username,
"response": self.calculate_response(challenge, self.password),
}, timeout=10)
xml = ET.fromstring(response.text)
sid = xml.find('SID').text
if xml.find('SID').text == "0000000000000000":
blocktime = int(xml.find('BlockTime').text)
exc = Exception("Login failed, please wait {} seconds".format(
blocktime
))
exc.blocktime = blocktime
raise exc
self.sid = sid
return sid
def calculate_response(self, challenge, password):
"""Calculate response for the challenge-response authentication"""
to_hash = (challenge + "-" + password).encode("UTF-16LE")
hashed = hashlib.md5(to_hash).hexdigest()
return "{0}-{1}".format(challenge, hashed)
#
# Useful public methods
#
def get_actors(self):
"""
Returns a list of Actor objects for querying SmartHome devices.
This is currently the only working method for getting temperature data.
"""
devices = self.homeautoswitch("getdevicelistinfos")
xml = ET.fromstring(devices)
actors = []
for device in xml.findall('device'):
actors.append(Actor(fritzbox=self, device=device))
return actors
#
# "Private" methods
#
def homeautoswitch(self, cmd, ain=None, param=None):
"""
Call a switch method.
Should only be used by internal library functions.
"""
assert self.sid, "Not logged in"
params = {
'switchcmd': cmd,
'sid': self.sid,
}
if param is not None:
params['param'] = param
if ain:
params['ain'] = ain
url = self.base_url + '/webservices/homeautoswitch.lua'
response = self.session.get(url, params=params, timeout=10)
response.raise_for_status()
return response.text.strip().encode('utf-8')
def get_switch_actors(self):
"""
Get information about all actors
This needs 1+(5n) requests where n = number of actors registered
Deprecated, use get_actors instead.
Returns a dict:
[ain] = {
'name': Name of actor,
'state': Powerstate (boolean)
'present': Connected to server? (boolean)
'power': Current power consumption in mW
'energy': Used energy in Wh since last energy reset
'temperature': Current environment temperature in celsius
}
"""
actors = {}
for ain in self.homeautoswitch("getswitchlist").split(','):
actors[ain] = {
'name': self.homeautoswitch("getswitchname", ain),
'state': bool(self.homeautoswitch("getswitchstate", ain)),
'present': bool(self.homeautoswitch("getswitchpresent", ain)),
'power': self.homeautoswitch("getswitchpower", ain),
'energy': self.homeautoswitch("getswitchenergy", ain),
'temperature': self.homeautoswitch("getswitchtemperature", ain),
}
return actors
def set_switch_on(self, ain):
"""Switch the power of a actor ON"""
return self.homeautoswitch('setswitchon', ain)
def set_switch_off(self, ain):
"""Switch the power of a actor OFF"""
return self.homeautoswitch('setswitchoff', ain)
def set_switch_toggle(self, ain):
"""Toggle a power switch and return the new state"""
return self.homeautoswitch('setswitchtoggle', ain)
#
# DeviceID based methods
#
# Inspired by:
# https://github.com/valpo/fritzbox/blob/master/fritzbox/fritzautohome.py
#
def get_devices(self):
"""
Return a list of devices.
Deprecated, use get_actors instead.
"""
url = self.base_url + '/net/home_auto_query.lua'
response = self.session.get(url, params={
'sid': self.sid,
'command': 'AllOutletStates',
'xhr': 0,
}, timeout=15)
response.raise_for_status()
data = response.json()
count = int(data["Outlet_count"])
devices = []
for i in range(1, count + 1):
device = Device(
int(data["DeviceID_{0}".format(i)]),
int(data["DeviceConnectState_{0}".format(i)]),
int(data["DeviceSwitchState_{0}".format(i)])
)
devices.append(device)
return devices
def get_consumption(self, deviceid, timerange="10"):
"""
Return all available energy consumption data for the device.
You need to divice watt_values by 100 and volt_values by 1000
to get the "real" values.
:return: dict
"""
tranges = ("10", "24h", "month", "year")
if timerange not in tranges:
raise ValueError(
"Unknown timerange. Possible values are: {0}".format(tranges)
)
url = self.base_url + "/net/home_auto_query.lua"
response = self.session.get(url, params={
'sid': self.sid,
'command': 'EnergyStats_{0}'.format(timerange),
'id': deviceid,
'xhr': 0,
}, timeout=15)
response.raise_for_status()
data = response.json()
result = {}
# Single result values
values_map = {
'MM_Value_Amp': 'mm_value_amp',
'MM_Value_Power': 'mm_value_power',
'MM_Value_Volt': 'mm_value_volt',
'EnStats_average_value': 'enstats_average_value',
'EnStats_max_value': 'enstats_max_value',
'EnStats_min_value': 'enstats_min_value',
'EnStats_timer_type': 'enstats_timer_type',
'sum_Day': 'sum_day',
'sum_Month': 'sum_month',
'sum_Year': 'sum_year',
}
for avm_key, py_key in values_map.items():
result[py_key] = int(data[avm_key])
# Stats counts
count = int(data["EnStats_count"])
watt_values = [None for i in range(count)]
volt_values = [None for i in range(count)]
for i in range(1, count + 1):
watt_values[i - 1] = int(data["EnStats_watt_value_{}".format(i)])
volt_values[i - 1] = int(data["EnStats_volt_value_{}".format(i)])
result['watt_values'] = watt_values
result['volt_values'] = volt_values
return result
def get_logs(self):
"""
Return the system logs since the last reboot.
"""
assert BeautifulSoup, "Please install bs4 to use this method"
url = self.base_url + "/system/syslog.lua"
response = self.session.get(url, params={
'sid': self.sid,
'stylemode': 'print',
}, timeout=15)
response.raise_for_status()
entries = []
tree = BeautifulSoup(response.text)
rows = tree.find('table').find_all('tr')
for row in rows:
columns = row.find_all("td")
date = columns[0].string
time = columns[1].string
message = columns[2].find("a").string
merged = "{} {} {}".format(date, time, message.encode("UTF-8"))
msg_hash = hashlib.md5(merged).hexdigest()
entries.append(LogEntry(date, time, message, msg_hash))
return entries
|
DerMitch/fritzbox-smarthome | fritzhome/fritz.py | FritzBox.homeautoswitch | python | def homeautoswitch(self, cmd, ain=None, param=None):
assert self.sid, "Not logged in"
params = {
'switchcmd': cmd,
'sid': self.sid,
}
if param is not None:
params['param'] = param
if ain:
params['ain'] = ain
url = self.base_url + '/webservices/homeautoswitch.lua'
response = self.session.get(url, params=params, timeout=10)
response.raise_for_status()
return response.text.strip().encode('utf-8') | Call a switch method.
Should only be used by internal library functions. | train | https://github.com/DerMitch/fritzbox-smarthome/blob/84cbd7c1b33e6256add041b0395ff5fccc01f103/fritzhome/fritz.py#L120-L138 | null | class FritzBox(object):
"""
Provides easy access to a FritzBOX's SmartHome functions,
which are poorly documented by AVM...
A note about SIDs:
They expire after some time. If you have a long-running daemon,
you should call login() every 10 minutes or so else you'll get
nice 403 errors.
"""
def __init__(self, ip, username, password, use_tls=False):
if use_tls:
self.base_url = 'https://' + ip
else:
self.base_url = 'http://' + ip
self.username = username
self.password = password
self.sid = None
self.session = Session()
def login(self):
"""
Try to login and set the internal session id.
Please note:
- Any failed login resets all existing session ids, even of
other users.
- SIDs expire after some time
"""
response = self.session.get(self.base_url + '/login_sid.lua', timeout=10)
xml = ET.fromstring(response.text)
if xml.find('SID').text == "0000000000000000":
challenge = xml.find('Challenge').text
url = self.base_url + "/login_sid.lua"
response = self.session.get(url, params={
"username": self.username,
"response": self.calculate_response(challenge, self.password),
}, timeout=10)
xml = ET.fromstring(response.text)
sid = xml.find('SID').text
if xml.find('SID').text == "0000000000000000":
blocktime = int(xml.find('BlockTime').text)
exc = Exception("Login failed, please wait {} seconds".format(
blocktime
))
exc.blocktime = blocktime
raise exc
self.sid = sid
return sid
def calculate_response(self, challenge, password):
"""Calculate response for the challenge-response authentication"""
to_hash = (challenge + "-" + password).encode("UTF-16LE")
hashed = hashlib.md5(to_hash).hexdigest()
return "{0}-{1}".format(challenge, hashed)
#
# Useful public methods
#
def get_actors(self):
"""
Returns a list of Actor objects for querying SmartHome devices.
This is currently the only working method for getting temperature data.
"""
devices = self.homeautoswitch("getdevicelistinfos")
xml = ET.fromstring(devices)
actors = []
for device in xml.findall('device'):
actors.append(Actor(fritzbox=self, device=device))
return actors
def get_actor_by_ain(self, ain):
"""
Return a actor identified by it's ain or return None
"""
for actor in self.get_actors():
if actor.actor_id == ain:
return actor
#
# "Private" methods
#
def get_switch_actors(self):
"""
Get information about all actors
This needs 1+(5n) requests where n = number of actors registered
Deprecated, use get_actors instead.
Returns a dict:
[ain] = {
'name': Name of actor,
'state': Powerstate (boolean)
'present': Connected to server? (boolean)
'power': Current power consumption in mW
'energy': Used energy in Wh since last energy reset
'temperature': Current environment temperature in celsius
}
"""
actors = {}
for ain in self.homeautoswitch("getswitchlist").split(','):
actors[ain] = {
'name': self.homeautoswitch("getswitchname", ain),
'state': bool(self.homeautoswitch("getswitchstate", ain)),
'present': bool(self.homeautoswitch("getswitchpresent", ain)),
'power': self.homeautoswitch("getswitchpower", ain),
'energy': self.homeautoswitch("getswitchenergy", ain),
'temperature': self.homeautoswitch("getswitchtemperature", ain),
}
return actors
def set_switch_on(self, ain):
"""Switch the power of a actor ON"""
return self.homeautoswitch('setswitchon', ain)
def set_switch_off(self, ain):
"""Switch the power of a actor OFF"""
return self.homeautoswitch('setswitchoff', ain)
def set_switch_toggle(self, ain):
"""Toggle a power switch and return the new state"""
return self.homeautoswitch('setswitchtoggle', ain)
#
# DeviceID based methods
#
# Inspired by:
# https://github.com/valpo/fritzbox/blob/master/fritzbox/fritzautohome.py
#
def get_devices(self):
"""
Return a list of devices.
Deprecated, use get_actors instead.
"""
url = self.base_url + '/net/home_auto_query.lua'
response = self.session.get(url, params={
'sid': self.sid,
'command': 'AllOutletStates',
'xhr': 0,
}, timeout=15)
response.raise_for_status()
data = response.json()
count = int(data["Outlet_count"])
devices = []
for i in range(1, count + 1):
device = Device(
int(data["DeviceID_{0}".format(i)]),
int(data["DeviceConnectState_{0}".format(i)]),
int(data["DeviceSwitchState_{0}".format(i)])
)
devices.append(device)
return devices
def get_consumption(self, deviceid, timerange="10"):
"""
Return all available energy consumption data for the device.
You need to divice watt_values by 100 and volt_values by 1000
to get the "real" values.
:return: dict
"""
tranges = ("10", "24h", "month", "year")
if timerange not in tranges:
raise ValueError(
"Unknown timerange. Possible values are: {0}".format(tranges)
)
url = self.base_url + "/net/home_auto_query.lua"
response = self.session.get(url, params={
'sid': self.sid,
'command': 'EnergyStats_{0}'.format(timerange),
'id': deviceid,
'xhr': 0,
}, timeout=15)
response.raise_for_status()
data = response.json()
result = {}
# Single result values
values_map = {
'MM_Value_Amp': 'mm_value_amp',
'MM_Value_Power': 'mm_value_power',
'MM_Value_Volt': 'mm_value_volt',
'EnStats_average_value': 'enstats_average_value',
'EnStats_max_value': 'enstats_max_value',
'EnStats_min_value': 'enstats_min_value',
'EnStats_timer_type': 'enstats_timer_type',
'sum_Day': 'sum_day',
'sum_Month': 'sum_month',
'sum_Year': 'sum_year',
}
for avm_key, py_key in values_map.items():
result[py_key] = int(data[avm_key])
# Stats counts
count = int(data["EnStats_count"])
watt_values = [None for i in range(count)]
volt_values = [None for i in range(count)]
for i in range(1, count + 1):
watt_values[i - 1] = int(data["EnStats_watt_value_{}".format(i)])
volt_values[i - 1] = int(data["EnStats_volt_value_{}".format(i)])
result['watt_values'] = watt_values
result['volt_values'] = volt_values
return result
def get_logs(self):
"""
Return the system logs since the last reboot.
"""
assert BeautifulSoup, "Please install bs4 to use this method"
url = self.base_url + "/system/syslog.lua"
response = self.session.get(url, params={
'sid': self.sid,
'stylemode': 'print',
}, timeout=15)
response.raise_for_status()
entries = []
tree = BeautifulSoup(response.text)
rows = tree.find('table').find_all('tr')
for row in rows:
columns = row.find_all("td")
date = columns[0].string
time = columns[1].string
message = columns[2].find("a").string
merged = "{} {} {}".format(date, time, message.encode("UTF-8"))
msg_hash = hashlib.md5(merged).hexdigest()
entries.append(LogEntry(date, time, message, msg_hash))
return entries
|
DerMitch/fritzbox-smarthome | fritzhome/fritz.py | FritzBox.get_switch_actors | python | def get_switch_actors(self):
actors = {}
for ain in self.homeautoswitch("getswitchlist").split(','):
actors[ain] = {
'name': self.homeautoswitch("getswitchname", ain),
'state': bool(self.homeautoswitch("getswitchstate", ain)),
'present': bool(self.homeautoswitch("getswitchpresent", ain)),
'power': self.homeautoswitch("getswitchpower", ain),
'energy': self.homeautoswitch("getswitchenergy", ain),
'temperature': self.homeautoswitch("getswitchtemperature", ain),
}
return actors | Get information about all actors
This needs 1+(5n) requests where n = number of actors registered
Deprecated, use get_actors instead.
Returns a dict:
[ain] = {
'name': Name of actor,
'state': Powerstate (boolean)
'present': Connected to server? (boolean)
'power': Current power consumption in mW
'energy': Used energy in Wh since last energy reset
'temperature': Current environment temperature in celsius
} | train | https://github.com/DerMitch/fritzbox-smarthome/blob/84cbd7c1b33e6256add041b0395ff5fccc01f103/fritzhome/fritz.py#L140-L168 | [
"def homeautoswitch(self, cmd, ain=None, param=None):\n \"\"\"\n Call a switch method.\n Should only be used by internal library functions.\n \"\"\"\n assert self.sid, \"Not logged in\"\n params = {\n 'switchcmd': cmd,\n 'sid': self.sid,\n }\n if param is not None:\n params['param'] = param\n if ain:\n params['ain'] = ain\n\n url = self.base_url + '/webservices/homeautoswitch.lua'\n response = self.session.get(url, params=params, timeout=10)\n response.raise_for_status()\n return response.text.strip().encode('utf-8')\n"
] | class FritzBox(object):
"""
Provides easy access to a FritzBOX's SmartHome functions,
which are poorly documented by AVM...
A note about SIDs:
They expire after some time. If you have a long-running daemon,
you should call login() every 10 minutes or so else you'll get
nice 403 errors.
"""
def __init__(self, ip, username, password, use_tls=False):
if use_tls:
self.base_url = 'https://' + ip
else:
self.base_url = 'http://' + ip
self.username = username
self.password = password
self.sid = None
self.session = Session()
def login(self):
"""
Try to login and set the internal session id.
Please note:
- Any failed login resets all existing session ids, even of
other users.
- SIDs expire after some time
"""
response = self.session.get(self.base_url + '/login_sid.lua', timeout=10)
xml = ET.fromstring(response.text)
if xml.find('SID').text == "0000000000000000":
challenge = xml.find('Challenge').text
url = self.base_url + "/login_sid.lua"
response = self.session.get(url, params={
"username": self.username,
"response": self.calculate_response(challenge, self.password),
}, timeout=10)
xml = ET.fromstring(response.text)
sid = xml.find('SID').text
if xml.find('SID').text == "0000000000000000":
blocktime = int(xml.find('BlockTime').text)
exc = Exception("Login failed, please wait {} seconds".format(
blocktime
))
exc.blocktime = blocktime
raise exc
self.sid = sid
return sid
def calculate_response(self, challenge, password):
"""Calculate response for the challenge-response authentication"""
to_hash = (challenge + "-" + password).encode("UTF-16LE")
hashed = hashlib.md5(to_hash).hexdigest()
return "{0}-{1}".format(challenge, hashed)
#
# Useful public methods
#
def get_actors(self):
"""
Returns a list of Actor objects for querying SmartHome devices.
This is currently the only working method for getting temperature data.
"""
devices = self.homeautoswitch("getdevicelistinfos")
xml = ET.fromstring(devices)
actors = []
for device in xml.findall('device'):
actors.append(Actor(fritzbox=self, device=device))
return actors
def get_actor_by_ain(self, ain):
"""
Return a actor identified by it's ain or return None
"""
for actor in self.get_actors():
if actor.actor_id == ain:
return actor
#
# "Private" methods
#
def homeautoswitch(self, cmd, ain=None, param=None):
"""
Call a switch method.
Should only be used by internal library functions.
"""
assert self.sid, "Not logged in"
params = {
'switchcmd': cmd,
'sid': self.sid,
}
if param is not None:
params['param'] = param
if ain:
params['ain'] = ain
url = self.base_url + '/webservices/homeautoswitch.lua'
response = self.session.get(url, params=params, timeout=10)
response.raise_for_status()
return response.text.strip().encode('utf-8')
def set_switch_on(self, ain):
"""Switch the power of a actor ON"""
return self.homeautoswitch('setswitchon', ain)
def set_switch_off(self, ain):
"""Switch the power of a actor OFF"""
return self.homeautoswitch('setswitchoff', ain)
def set_switch_toggle(self, ain):
"""Toggle a power switch and return the new state"""
return self.homeautoswitch('setswitchtoggle', ain)
#
# DeviceID based methods
#
# Inspired by:
# https://github.com/valpo/fritzbox/blob/master/fritzbox/fritzautohome.py
#
def get_devices(self):
"""
Return a list of devices.
Deprecated, use get_actors instead.
"""
url = self.base_url + '/net/home_auto_query.lua'
response = self.session.get(url, params={
'sid': self.sid,
'command': 'AllOutletStates',
'xhr': 0,
}, timeout=15)
response.raise_for_status()
data = response.json()
count = int(data["Outlet_count"])
devices = []
for i in range(1, count + 1):
device = Device(
int(data["DeviceID_{0}".format(i)]),
int(data["DeviceConnectState_{0}".format(i)]),
int(data["DeviceSwitchState_{0}".format(i)])
)
devices.append(device)
return devices
def get_consumption(self, deviceid, timerange="10"):
"""
Return all available energy consumption data for the device.
You need to divice watt_values by 100 and volt_values by 1000
to get the "real" values.
:return: dict
"""
tranges = ("10", "24h", "month", "year")
if timerange not in tranges:
raise ValueError(
"Unknown timerange. Possible values are: {0}".format(tranges)
)
url = self.base_url + "/net/home_auto_query.lua"
response = self.session.get(url, params={
'sid': self.sid,
'command': 'EnergyStats_{0}'.format(timerange),
'id': deviceid,
'xhr': 0,
}, timeout=15)
response.raise_for_status()
data = response.json()
result = {}
# Single result values
values_map = {
'MM_Value_Amp': 'mm_value_amp',
'MM_Value_Power': 'mm_value_power',
'MM_Value_Volt': 'mm_value_volt',
'EnStats_average_value': 'enstats_average_value',
'EnStats_max_value': 'enstats_max_value',
'EnStats_min_value': 'enstats_min_value',
'EnStats_timer_type': 'enstats_timer_type',
'sum_Day': 'sum_day',
'sum_Month': 'sum_month',
'sum_Year': 'sum_year',
}
for avm_key, py_key in values_map.items():
result[py_key] = int(data[avm_key])
# Stats counts
count = int(data["EnStats_count"])
watt_values = [None for i in range(count)]
volt_values = [None for i in range(count)]
for i in range(1, count + 1):
watt_values[i - 1] = int(data["EnStats_watt_value_{}".format(i)])
volt_values[i - 1] = int(data["EnStats_volt_value_{}".format(i)])
result['watt_values'] = watt_values
result['volt_values'] = volt_values
return result
def get_logs(self):
"""
Return the system logs since the last reboot.
"""
assert BeautifulSoup, "Please install bs4 to use this method"
url = self.base_url + "/system/syslog.lua"
response = self.session.get(url, params={
'sid': self.sid,
'stylemode': 'print',
}, timeout=15)
response.raise_for_status()
entries = []
tree = BeautifulSoup(response.text)
rows = tree.find('table').find_all('tr')
for row in rows:
columns = row.find_all("td")
date = columns[0].string
time = columns[1].string
message = columns[2].find("a").string
merged = "{} {} {}".format(date, time, message.encode("UTF-8"))
msg_hash = hashlib.md5(merged).hexdigest()
entries.append(LogEntry(date, time, message, msg_hash))
return entries
|
DerMitch/fritzbox-smarthome | fritzhome/fritz.py | FritzBox.get_devices | python | def get_devices(self):
url = self.base_url + '/net/home_auto_query.lua'
response = self.session.get(url, params={
'sid': self.sid,
'command': 'AllOutletStates',
'xhr': 0,
}, timeout=15)
response.raise_for_status()
data = response.json()
count = int(data["Outlet_count"])
devices = []
for i in range(1, count + 1):
device = Device(
int(data["DeviceID_{0}".format(i)]),
int(data["DeviceConnectState_{0}".format(i)]),
int(data["DeviceSwitchState_{0}".format(i)])
)
devices.append(device)
return devices | Return a list of devices.
Deprecated, use get_actors instead. | train | https://github.com/DerMitch/fritzbox-smarthome/blob/84cbd7c1b33e6256add041b0395ff5fccc01f103/fritzhome/fritz.py#L189-L211 | null | class FritzBox(object):
"""
Provides easy access to a FritzBOX's SmartHome functions,
which are poorly documented by AVM...
A note about SIDs:
They expire after some time. If you have a long-running daemon,
you should call login() every 10 minutes or so else you'll get
nice 403 errors.
"""
def __init__(self, ip, username, password, use_tls=False):
if use_tls:
self.base_url = 'https://' + ip
else:
self.base_url = 'http://' + ip
self.username = username
self.password = password
self.sid = None
self.session = Session()
def login(self):
"""
Try to login and set the internal session id.
Please note:
- Any failed login resets all existing session ids, even of
other users.
- SIDs expire after some time
"""
response = self.session.get(self.base_url + '/login_sid.lua', timeout=10)
xml = ET.fromstring(response.text)
if xml.find('SID').text == "0000000000000000":
challenge = xml.find('Challenge').text
url = self.base_url + "/login_sid.lua"
response = self.session.get(url, params={
"username": self.username,
"response": self.calculate_response(challenge, self.password),
}, timeout=10)
xml = ET.fromstring(response.text)
sid = xml.find('SID').text
if xml.find('SID').text == "0000000000000000":
blocktime = int(xml.find('BlockTime').text)
exc = Exception("Login failed, please wait {} seconds".format(
blocktime
))
exc.blocktime = blocktime
raise exc
self.sid = sid
return sid
def calculate_response(self, challenge, password):
"""Calculate response for the challenge-response authentication"""
to_hash = (challenge + "-" + password).encode("UTF-16LE")
hashed = hashlib.md5(to_hash).hexdigest()
return "{0}-{1}".format(challenge, hashed)
#
# Useful public methods
#
def get_actors(self):
"""
Returns a list of Actor objects for querying SmartHome devices.
This is currently the only working method for getting temperature data.
"""
devices = self.homeautoswitch("getdevicelistinfos")
xml = ET.fromstring(devices)
actors = []
for device in xml.findall('device'):
actors.append(Actor(fritzbox=self, device=device))
return actors
def get_actor_by_ain(self, ain):
"""
Return a actor identified by it's ain or return None
"""
for actor in self.get_actors():
if actor.actor_id == ain:
return actor
#
# "Private" methods
#
def homeautoswitch(self, cmd, ain=None, param=None):
"""
Call a switch method.
Should only be used by internal library functions.
"""
assert self.sid, "Not logged in"
params = {
'switchcmd': cmd,
'sid': self.sid,
}
if param is not None:
params['param'] = param
if ain:
params['ain'] = ain
url = self.base_url + '/webservices/homeautoswitch.lua'
response = self.session.get(url, params=params, timeout=10)
response.raise_for_status()
return response.text.strip().encode('utf-8')
def get_switch_actors(self):
"""
Get information about all actors
This needs 1+(5n) requests where n = number of actors registered
Deprecated, use get_actors instead.
Returns a dict:
[ain] = {
'name': Name of actor,
'state': Powerstate (boolean)
'present': Connected to server? (boolean)
'power': Current power consumption in mW
'energy': Used energy in Wh since last energy reset
'temperature': Current environment temperature in celsius
}
"""
actors = {}
for ain in self.homeautoswitch("getswitchlist").split(','):
actors[ain] = {
'name': self.homeautoswitch("getswitchname", ain),
'state': bool(self.homeautoswitch("getswitchstate", ain)),
'present': bool(self.homeautoswitch("getswitchpresent", ain)),
'power': self.homeautoswitch("getswitchpower", ain),
'energy': self.homeautoswitch("getswitchenergy", ain),
'temperature': self.homeautoswitch("getswitchtemperature", ain),
}
return actors
def set_switch_on(self, ain):
"""Switch the power of a actor ON"""
return self.homeautoswitch('setswitchon', ain)
def set_switch_off(self, ain):
"""Switch the power of a actor OFF"""
return self.homeautoswitch('setswitchoff', ain)
def set_switch_toggle(self, ain):
"""Toggle a power switch and return the new state"""
return self.homeautoswitch('setswitchtoggle', ain)
#
# DeviceID based methods
#
# Inspired by:
# https://github.com/valpo/fritzbox/blob/master/fritzbox/fritzautohome.py
#
def get_consumption(self, deviceid, timerange="10"):
"""
Return all available energy consumption data for the device.
You need to divice watt_values by 100 and volt_values by 1000
to get the "real" values.
:return: dict
"""
tranges = ("10", "24h", "month", "year")
if timerange not in tranges:
raise ValueError(
"Unknown timerange. Possible values are: {0}".format(tranges)
)
url = self.base_url + "/net/home_auto_query.lua"
response = self.session.get(url, params={
'sid': self.sid,
'command': 'EnergyStats_{0}'.format(timerange),
'id': deviceid,
'xhr': 0,
}, timeout=15)
response.raise_for_status()
data = response.json()
result = {}
# Single result values
values_map = {
'MM_Value_Amp': 'mm_value_amp',
'MM_Value_Power': 'mm_value_power',
'MM_Value_Volt': 'mm_value_volt',
'EnStats_average_value': 'enstats_average_value',
'EnStats_max_value': 'enstats_max_value',
'EnStats_min_value': 'enstats_min_value',
'EnStats_timer_type': 'enstats_timer_type',
'sum_Day': 'sum_day',
'sum_Month': 'sum_month',
'sum_Year': 'sum_year',
}
for avm_key, py_key in values_map.items():
result[py_key] = int(data[avm_key])
# Stats counts
count = int(data["EnStats_count"])
watt_values = [None for i in range(count)]
volt_values = [None for i in range(count)]
for i in range(1, count + 1):
watt_values[i - 1] = int(data["EnStats_watt_value_{}".format(i)])
volt_values[i - 1] = int(data["EnStats_volt_value_{}".format(i)])
result['watt_values'] = watt_values
result['volt_values'] = volt_values
return result
def get_logs(self):
"""
Return the system logs since the last reboot.
"""
assert BeautifulSoup, "Please install bs4 to use this method"
url = self.base_url + "/system/syslog.lua"
response = self.session.get(url, params={
'sid': self.sid,
'stylemode': 'print',
}, timeout=15)
response.raise_for_status()
entries = []
tree = BeautifulSoup(response.text)
rows = tree.find('table').find_all('tr')
for row in rows:
columns = row.find_all("td")
date = columns[0].string
time = columns[1].string
message = columns[2].find("a").string
merged = "{} {} {}".format(date, time, message.encode("UTF-8"))
msg_hash = hashlib.md5(merged).hexdigest()
entries.append(LogEntry(date, time, message, msg_hash))
return entries
|
DerMitch/fritzbox-smarthome | fritzhome/fritz.py | FritzBox.get_consumption | python | def get_consumption(self, deviceid, timerange="10"):
tranges = ("10", "24h", "month", "year")
if timerange not in tranges:
raise ValueError(
"Unknown timerange. Possible values are: {0}".format(tranges)
)
url = self.base_url + "/net/home_auto_query.lua"
response = self.session.get(url, params={
'sid': self.sid,
'command': 'EnergyStats_{0}'.format(timerange),
'id': deviceid,
'xhr': 0,
}, timeout=15)
response.raise_for_status()
data = response.json()
result = {}
# Single result values
values_map = {
'MM_Value_Amp': 'mm_value_amp',
'MM_Value_Power': 'mm_value_power',
'MM_Value_Volt': 'mm_value_volt',
'EnStats_average_value': 'enstats_average_value',
'EnStats_max_value': 'enstats_max_value',
'EnStats_min_value': 'enstats_min_value',
'EnStats_timer_type': 'enstats_timer_type',
'sum_Day': 'sum_day',
'sum_Month': 'sum_month',
'sum_Year': 'sum_year',
}
for avm_key, py_key in values_map.items():
result[py_key] = int(data[avm_key])
# Stats counts
count = int(data["EnStats_count"])
watt_values = [None for i in range(count)]
volt_values = [None for i in range(count)]
for i in range(1, count + 1):
watt_values[i - 1] = int(data["EnStats_watt_value_{}".format(i)])
volt_values[i - 1] = int(data["EnStats_volt_value_{}".format(i)])
result['watt_values'] = watt_values
result['volt_values'] = volt_values
return result | Return all available energy consumption data for the device.
You need to divice watt_values by 100 and volt_values by 1000
to get the "real" values.
:return: dict | train | https://github.com/DerMitch/fritzbox-smarthome/blob/84cbd7c1b33e6256add041b0395ff5fccc01f103/fritzhome/fritz.py#L213-L268 | null | class FritzBox(object):
"""
Provides easy access to a FritzBOX's SmartHome functions,
which are poorly documented by AVM...
A note about SIDs:
They expire after some time. If you have a long-running daemon,
you should call login() every 10 minutes or so else you'll get
nice 403 errors.
"""
def __init__(self, ip, username, password, use_tls=False):
if use_tls:
self.base_url = 'https://' + ip
else:
self.base_url = 'http://' + ip
self.username = username
self.password = password
self.sid = None
self.session = Session()
def login(self):
"""
Try to login and set the internal session id.
Please note:
- Any failed login resets all existing session ids, even of
other users.
- SIDs expire after some time
"""
response = self.session.get(self.base_url + '/login_sid.lua', timeout=10)
xml = ET.fromstring(response.text)
if xml.find('SID').text == "0000000000000000":
challenge = xml.find('Challenge').text
url = self.base_url + "/login_sid.lua"
response = self.session.get(url, params={
"username": self.username,
"response": self.calculate_response(challenge, self.password),
}, timeout=10)
xml = ET.fromstring(response.text)
sid = xml.find('SID').text
if xml.find('SID').text == "0000000000000000":
blocktime = int(xml.find('BlockTime').text)
exc = Exception("Login failed, please wait {} seconds".format(
blocktime
))
exc.blocktime = blocktime
raise exc
self.sid = sid
return sid
def calculate_response(self, challenge, password):
"""Calculate response for the challenge-response authentication"""
to_hash = (challenge + "-" + password).encode("UTF-16LE")
hashed = hashlib.md5(to_hash).hexdigest()
return "{0}-{1}".format(challenge, hashed)
#
# Useful public methods
#
def get_actors(self):
"""
Returns a list of Actor objects for querying SmartHome devices.
This is currently the only working method for getting temperature data.
"""
devices = self.homeautoswitch("getdevicelistinfos")
xml = ET.fromstring(devices)
actors = []
for device in xml.findall('device'):
actors.append(Actor(fritzbox=self, device=device))
return actors
def get_actor_by_ain(self, ain):
"""
Return a actor identified by it's ain or return None
"""
for actor in self.get_actors():
if actor.actor_id == ain:
return actor
#
# "Private" methods
#
def homeautoswitch(self, cmd, ain=None, param=None):
"""
Call a switch method.
Should only be used by internal library functions.
"""
assert self.sid, "Not logged in"
params = {
'switchcmd': cmd,
'sid': self.sid,
}
if param is not None:
params['param'] = param
if ain:
params['ain'] = ain
url = self.base_url + '/webservices/homeautoswitch.lua'
response = self.session.get(url, params=params, timeout=10)
response.raise_for_status()
return response.text.strip().encode('utf-8')
def get_switch_actors(self):
"""
Get information about all actors
This needs 1+(5n) requests where n = number of actors registered
Deprecated, use get_actors instead.
Returns a dict:
[ain] = {
'name': Name of actor,
'state': Powerstate (boolean)
'present': Connected to server? (boolean)
'power': Current power consumption in mW
'energy': Used energy in Wh since last energy reset
'temperature': Current environment temperature in celsius
}
"""
actors = {}
for ain in self.homeautoswitch("getswitchlist").split(','):
actors[ain] = {
'name': self.homeautoswitch("getswitchname", ain),
'state': bool(self.homeautoswitch("getswitchstate", ain)),
'present': bool(self.homeautoswitch("getswitchpresent", ain)),
'power': self.homeautoswitch("getswitchpower", ain),
'energy': self.homeautoswitch("getswitchenergy", ain),
'temperature': self.homeautoswitch("getswitchtemperature", ain),
}
return actors
def set_switch_on(self, ain):
"""Switch the power of a actor ON"""
return self.homeautoswitch('setswitchon', ain)
def set_switch_off(self, ain):
"""Switch the power of a actor OFF"""
return self.homeautoswitch('setswitchoff', ain)
def set_switch_toggle(self, ain):
"""Toggle a power switch and return the new state"""
return self.homeautoswitch('setswitchtoggle', ain)
#
# DeviceID based methods
#
# Inspired by:
# https://github.com/valpo/fritzbox/blob/master/fritzbox/fritzautohome.py
#
def get_devices(self):
"""
Return a list of devices.
Deprecated, use get_actors instead.
"""
url = self.base_url + '/net/home_auto_query.lua'
response = self.session.get(url, params={
'sid': self.sid,
'command': 'AllOutletStates',
'xhr': 0,
}, timeout=15)
response.raise_for_status()
data = response.json()
count = int(data["Outlet_count"])
devices = []
for i in range(1, count + 1):
device = Device(
int(data["DeviceID_{0}".format(i)]),
int(data["DeviceConnectState_{0}".format(i)]),
int(data["DeviceSwitchState_{0}".format(i)])
)
devices.append(device)
return devices
def get_logs(self):
"""
Return the system logs since the last reboot.
"""
assert BeautifulSoup, "Please install bs4 to use this method"
url = self.base_url + "/system/syslog.lua"
response = self.session.get(url, params={
'sid': self.sid,
'stylemode': 'print',
}, timeout=15)
response.raise_for_status()
entries = []
tree = BeautifulSoup(response.text)
rows = tree.find('table').find_all('tr')
for row in rows:
columns = row.find_all("td")
date = columns[0].string
time = columns[1].string
message = columns[2].find("a").string
merged = "{} {} {}".format(date, time, message.encode("UTF-8"))
msg_hash = hashlib.md5(merged).hexdigest()
entries.append(LogEntry(date, time, message, msg_hash))
return entries
|
DerMitch/fritzbox-smarthome | fritzhome/fritz.py | FritzBox.get_logs | python | def get_logs(self):
assert BeautifulSoup, "Please install bs4 to use this method"
url = self.base_url + "/system/syslog.lua"
response = self.session.get(url, params={
'sid': self.sid,
'stylemode': 'print',
}, timeout=15)
response.raise_for_status()
entries = []
tree = BeautifulSoup(response.text)
rows = tree.find('table').find_all('tr')
for row in rows:
columns = row.find_all("td")
date = columns[0].string
time = columns[1].string
message = columns[2].find("a").string
merged = "{} {} {}".format(date, time, message.encode("UTF-8"))
msg_hash = hashlib.md5(merged).hexdigest()
entries.append(LogEntry(date, time, message, msg_hash))
return entries | Return the system logs since the last reboot. | train | https://github.com/DerMitch/fritzbox-smarthome/blob/84cbd7c1b33e6256add041b0395ff5fccc01f103/fritzhome/fritz.py#L270-L295 | null | class FritzBox(object):
"""
Provides easy access to a FritzBOX's SmartHome functions,
which are poorly documented by AVM...
A note about SIDs:
They expire after some time. If you have a long-running daemon,
you should call login() every 10 minutes or so else you'll get
nice 403 errors.
"""
def __init__(self, ip, username, password, use_tls=False):
if use_tls:
self.base_url = 'https://' + ip
else:
self.base_url = 'http://' + ip
self.username = username
self.password = password
self.sid = None
self.session = Session()
def login(self):
"""
Try to login and set the internal session id.
Please note:
- Any failed login resets all existing session ids, even of
other users.
- SIDs expire after some time
"""
response = self.session.get(self.base_url + '/login_sid.lua', timeout=10)
xml = ET.fromstring(response.text)
if xml.find('SID').text == "0000000000000000":
challenge = xml.find('Challenge').text
url = self.base_url + "/login_sid.lua"
response = self.session.get(url, params={
"username": self.username,
"response": self.calculate_response(challenge, self.password),
}, timeout=10)
xml = ET.fromstring(response.text)
sid = xml.find('SID').text
if xml.find('SID').text == "0000000000000000":
blocktime = int(xml.find('BlockTime').text)
exc = Exception("Login failed, please wait {} seconds".format(
blocktime
))
exc.blocktime = blocktime
raise exc
self.sid = sid
return sid
def calculate_response(self, challenge, password):
"""Calculate response for the challenge-response authentication"""
to_hash = (challenge + "-" + password).encode("UTF-16LE")
hashed = hashlib.md5(to_hash).hexdigest()
return "{0}-{1}".format(challenge, hashed)
#
# Useful public methods
#
def get_actors(self):
"""
Returns a list of Actor objects for querying SmartHome devices.
This is currently the only working method for getting temperature data.
"""
devices = self.homeautoswitch("getdevicelistinfos")
xml = ET.fromstring(devices)
actors = []
for device in xml.findall('device'):
actors.append(Actor(fritzbox=self, device=device))
return actors
def get_actor_by_ain(self, ain):
"""
Return a actor identified by it's ain or return None
"""
for actor in self.get_actors():
if actor.actor_id == ain:
return actor
#
# "Private" methods
#
def homeautoswitch(self, cmd, ain=None, param=None):
"""
Call a switch method.
Should only be used by internal library functions.
"""
assert self.sid, "Not logged in"
params = {
'switchcmd': cmd,
'sid': self.sid,
}
if param is not None:
params['param'] = param
if ain:
params['ain'] = ain
url = self.base_url + '/webservices/homeautoswitch.lua'
response = self.session.get(url, params=params, timeout=10)
response.raise_for_status()
return response.text.strip().encode('utf-8')
def get_switch_actors(self):
"""
Get information about all actors
This needs 1+(5n) requests where n = number of actors registered
Deprecated, use get_actors instead.
Returns a dict:
[ain] = {
'name': Name of actor,
'state': Powerstate (boolean)
'present': Connected to server? (boolean)
'power': Current power consumption in mW
'energy': Used energy in Wh since last energy reset
'temperature': Current environment temperature in celsius
}
"""
actors = {}
for ain in self.homeautoswitch("getswitchlist").split(','):
actors[ain] = {
'name': self.homeautoswitch("getswitchname", ain),
'state': bool(self.homeautoswitch("getswitchstate", ain)),
'present': bool(self.homeautoswitch("getswitchpresent", ain)),
'power': self.homeautoswitch("getswitchpower", ain),
'energy': self.homeautoswitch("getswitchenergy", ain),
'temperature': self.homeautoswitch("getswitchtemperature", ain),
}
return actors
def set_switch_on(self, ain):
"""Switch the power of a actor ON"""
return self.homeautoswitch('setswitchon', ain)
def set_switch_off(self, ain):
"""Switch the power of a actor OFF"""
return self.homeautoswitch('setswitchoff', ain)
def set_switch_toggle(self, ain):
"""Toggle a power switch and return the new state"""
return self.homeautoswitch('setswitchtoggle', ain)
#
# DeviceID based methods
#
# Inspired by:
# https://github.com/valpo/fritzbox/blob/master/fritzbox/fritzautohome.py
#
def get_devices(self):
"""
Return a list of devices.
Deprecated, use get_actors instead.
"""
url = self.base_url + '/net/home_auto_query.lua'
response = self.session.get(url, params={
'sid': self.sid,
'command': 'AllOutletStates',
'xhr': 0,
}, timeout=15)
response.raise_for_status()
data = response.json()
count = int(data["Outlet_count"])
devices = []
for i in range(1, count + 1):
device = Device(
int(data["DeviceID_{0}".format(i)]),
int(data["DeviceConnectState_{0}".format(i)]),
int(data["DeviceSwitchState_{0}".format(i)])
)
devices.append(device)
return devices
def get_consumption(self, deviceid, timerange="10"):
"""
Return all available energy consumption data for the device.
You need to divice watt_values by 100 and volt_values by 1000
to get the "real" values.
:return: dict
"""
tranges = ("10", "24h", "month", "year")
if timerange not in tranges:
raise ValueError(
"Unknown timerange. Possible values are: {0}".format(tranges)
)
url = self.base_url + "/net/home_auto_query.lua"
response = self.session.get(url, params={
'sid': self.sid,
'command': 'EnergyStats_{0}'.format(timerange),
'id': deviceid,
'xhr': 0,
}, timeout=15)
response.raise_for_status()
data = response.json()
result = {}
# Single result values
values_map = {
'MM_Value_Amp': 'mm_value_amp',
'MM_Value_Power': 'mm_value_power',
'MM_Value_Volt': 'mm_value_volt',
'EnStats_average_value': 'enstats_average_value',
'EnStats_max_value': 'enstats_max_value',
'EnStats_min_value': 'enstats_min_value',
'EnStats_timer_type': 'enstats_timer_type',
'sum_Day': 'sum_day',
'sum_Month': 'sum_month',
'sum_Year': 'sum_year',
}
for avm_key, py_key in values_map.items():
result[py_key] = int(data[avm_key])
# Stats counts
count = int(data["EnStats_count"])
watt_values = [None for i in range(count)]
volt_values = [None for i in range(count)]
for i in range(1, count + 1):
watt_values[i - 1] = int(data["EnStats_watt_value_{}".format(i)])
volt_values[i - 1] = int(data["EnStats_volt_value_{}".format(i)])
result['watt_values'] = watt_values
result['volt_values'] = volt_values
return result
|
DerMitch/fritzbox-smarthome | fritzhome/__main__.py | cli | python | def cli(context, host, username, password):
context.obj = FritzBox(host, username, password) | FritzBox SmartHome Tool
\b
Provides the following functions:
- A easy to use library for querying SmartHome actors
- This CLI tool for testing
- A carbon client for pipeing data into graphite | train | https://github.com/DerMitch/fritzbox-smarthome/blob/84cbd7c1b33e6256add041b0395ff5fccc01f103/fritzhome/__main__.py#L25-L35 | null | # coding: utf-8
"""
FRITZ!Box SmartHome Client
~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
from __future__ import print_function, division
import re
import time
import json
import socket
import click
from .fritz import FritzBox
@click.group()
@click.option('--host', default='169.254.1.1') # fritzbox "emergency" IP
@click.option('--username', default='smarthome')
@click.option('--password', default='smarthome')
@click.pass_context
@cli.command()
@click.pass_context
def actors(context):
"""Display a list of actors"""
fritz = context.obj
fritz.login()
for actor in fritz.get_actors():
click.echo("{} ({} {}; AIN {} )".format(
actor.name,
actor.manufacturer,
actor.productname,
actor.actor_id,
))
if actor.has_temperature:
click.echo("Temp: act {} target {}; battery (low): {}".format(
actor.temperature,
actor.target_temperature,
actor.battery_low,
))
click.echo("Temp (via get): act {} target {}".format(
actor.get_temperature(),
actor.get_target_temperature(),
))
@cli.command()
@click.option('--features', type=bool, default=False, help="Show device features")
@click.pass_context
def energy(context, features):
"""Display energy stats of all actors"""
fritz = context.obj
fritz.login()
for actor in fritz.get_actors():
if actor.temperature is not None:
click.echo("{} ({}): {:.2f} Watt current, {:.3f} wH total, {:.2f} °C".format(
actor.name.encode('utf-8'),
actor.actor_id,
(actor.get_power() or 0.0) / 1000,
(actor.get_energy() or 0.0) / 100,
actor.temperature
))
else:
click.echo("{} ({}): {:.2f} Watt current, {:.3f} wH total, offline".format(
actor.name.encode('utf-8'),
actor.actor_id,
(actor.get_power() or 0.0) / 1000,
(actor.get_energy() or 0.0) / 100
))
if features:
click.echo(" Features: PowerMeter: {}, Temperatur: {}, Switch: {}".format(
actor.has_powermeter, actor.has_temperature, actor.has_switch
))
@cli.command()
@click.argument('server')
@click.option('--port', type=int, default=2003)
@click.option('--interval', type=int, default=10)
@click.option('--prefix', default="smarthome")
@click.pass_context
def graphite(context, server, port, interval, prefix):
"""Display energy stats of all actors"""
fritz = context.obj
fritz.login()
sid_ttl = time.time() + 600
# Find actors and create carbon keys
click.echo(" * Requesting actors list")
simple_chars = re.compile('[^A-Za-z0-9]+')
actors = fritz.get_actors()
keys = {}
for actor in actors:
keys[actor.name] = "{}.{}".format(
prefix,
simple_chars.sub('_', actor.name)
)
# Connect to carbon
click.echo(" * Trying to connect to carbon")
timeout = 2
sock = socket.socket()
sock.settimeout(timeout)
try:
sock.connect((server, port))
except socket.timeout:
raise Exception("Took over {} second(s) to connect to {}".format(
timeout, server
))
except Exception as error:
raise Exception("unknown exception while connecting to {} - {}".format(
server, error
))
def send(key, value):
"""Send a key-value-pair to carbon"""
now = int(time.time())
payload = "{} {} {}\n".format(key, value, now)
sock.sendall(payload)
while True:
if time.time() > sid_ttl:
click.echo(" * Requesting new SID")
fritz.login()
sid_ttl = time.time() + 600
click.echo(" * Requesting statistics")
for actor in actors:
power = actor.get_power()
total = actor.get_energy()
click.echo(" -> {}: {:.2f} Watt current, {:.3f} wH total".format(
actor.name, power / 1000, total / 100
))
send(keys[actor.name] + '.current', power)
send(keys[actor.name] + '.total', total)
time.sleep(interval)
@cli.command(name="switch-on")
@click.argument('ain')
@click.pass_context
def switch_on(context, ain):
"""Switch an actor's power to ON"""
context.obj.login()
actor = context.obj.get_actor_by_ain(ain)
if actor:
click.echo("Switching {} on".format(actor.name))
actor.switch_on()
else:
click.echo("Actor not found: {}".format(ain))
@cli.command(name="switch-off")
@click.argument('ain')
@click.pass_context
def switch_off(context, ain):
"""Switch an actor's power to OFF"""
context.obj.login()
actor = context.obj.get_actor_by_ain(ain)
if actor:
click.echo("Switching {} off".format(actor.name))
actor.switch_off()
else:
click.echo("Actor not found: {}".format(ain))
@cli.command(name="switch-state")
@click.argument('ain')
@click.pass_context
def switch_state(context, ain):
"""Get an actor's power state"""
context.obj.login()
actor = context.obj.get_actor_by_ain(ain)
if actor:
click.echo("State for {} is: {}".format(ain,'ON' if actor.get_state() else 'OFF'))
else:
click.echo("Actor not found: {}".format(ain))
@cli.command(name="switch-toggle")
@click.argument('ain')
@click.pass_context
def switch_toggle(context, ain):
"""Toggle an actor's power state"""
context.obj.login()
actor = context.obj.get_actor_by_ain(ain)
if actor:
if actor.get_state():
actor.switch_off()
click.echo("State for {} is now OFF".format(ain))
else:
actor.switch_on()
click.echo("State for {} is now ON".format(ain))
else:
click.echo("Actor not found: {}".format(ain))
@cli.command()
@click.option('--format', type=click.Choice(['plain', 'json']),
default='plain')
@click.pass_context
def logs(context, format):
"""Show system logs since last reboot"""
fritz = context.obj
fritz.login()
messages = fritz.get_logs()
if format == "plain":
for msg in messages:
merged = "{} {} {}".format(msg.date, msg.time, msg.message.encode("UTF-8"))
click.echo(merged)
if format == "json":
entries = [msg._asdict() for msg in messages]
click.echo(json.dumps({
"entries": entries,
}))
if __name__ == '__main__':
cli()
|
DerMitch/fritzbox-smarthome | fritzhome/__main__.py | actors | python | def actors(context):
fritz = context.obj
fritz.login()
for actor in fritz.get_actors():
click.echo("{} ({} {}; AIN {} )".format(
actor.name,
actor.manufacturer,
actor.productname,
actor.actor_id,
))
if actor.has_temperature:
click.echo("Temp: act {} target {}; battery (low): {}".format(
actor.temperature,
actor.target_temperature,
actor.battery_low,
))
click.echo("Temp (via get): act {} target {}".format(
actor.get_temperature(),
actor.get_target_temperature(),
)) | Display a list of actors | train | https://github.com/DerMitch/fritzbox-smarthome/blob/84cbd7c1b33e6256add041b0395ff5fccc01f103/fritzhome/__main__.py#L40-L62 | null | # coding: utf-8
"""
FRITZ!Box SmartHome Client
~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
from __future__ import print_function, division
import re
import time
import json
import socket
import click
from .fritz import FritzBox
@click.group()
@click.option('--host', default='169.254.1.1') # fritzbox "emergency" IP
@click.option('--username', default='smarthome')
@click.option('--password', default='smarthome')
@click.pass_context
def cli(context, host, username, password):
"""
FritzBox SmartHome Tool
\b
Provides the following functions:
- A easy to use library for querying SmartHome actors
- This CLI tool for testing
- A carbon client for pipeing data into graphite
"""
context.obj = FritzBox(host, username, password)
@cli.command()
@click.pass_context
@cli.command()
@click.option('--features', type=bool, default=False, help="Show device features")
@click.pass_context
def energy(context, features):
"""Display energy stats of all actors"""
fritz = context.obj
fritz.login()
for actor in fritz.get_actors():
if actor.temperature is not None:
click.echo("{} ({}): {:.2f} Watt current, {:.3f} wH total, {:.2f} °C".format(
actor.name.encode('utf-8'),
actor.actor_id,
(actor.get_power() or 0.0) / 1000,
(actor.get_energy() or 0.0) / 100,
actor.temperature
))
else:
click.echo("{} ({}): {:.2f} Watt current, {:.3f} wH total, offline".format(
actor.name.encode('utf-8'),
actor.actor_id,
(actor.get_power() or 0.0) / 1000,
(actor.get_energy() or 0.0) / 100
))
if features:
click.echo(" Features: PowerMeter: {}, Temperatur: {}, Switch: {}".format(
actor.has_powermeter, actor.has_temperature, actor.has_switch
))
@cli.command()
@click.argument('server')
@click.option('--port', type=int, default=2003)
@click.option('--interval', type=int, default=10)
@click.option('--prefix', default="smarthome")
@click.pass_context
def graphite(context, server, port, interval, prefix):
"""Display energy stats of all actors"""
fritz = context.obj
fritz.login()
sid_ttl = time.time() + 600
# Find actors and create carbon keys
click.echo(" * Requesting actors list")
simple_chars = re.compile('[^A-Za-z0-9]+')
actors = fritz.get_actors()
keys = {}
for actor in actors:
keys[actor.name] = "{}.{}".format(
prefix,
simple_chars.sub('_', actor.name)
)
# Connect to carbon
click.echo(" * Trying to connect to carbon")
timeout = 2
sock = socket.socket()
sock.settimeout(timeout)
try:
sock.connect((server, port))
except socket.timeout:
raise Exception("Took over {} second(s) to connect to {}".format(
timeout, server
))
except Exception as error:
raise Exception("unknown exception while connecting to {} - {}".format(
server, error
))
def send(key, value):
"""Send a key-value-pair to carbon"""
now = int(time.time())
payload = "{} {} {}\n".format(key, value, now)
sock.sendall(payload)
while True:
if time.time() > sid_ttl:
click.echo(" * Requesting new SID")
fritz.login()
sid_ttl = time.time() + 600
click.echo(" * Requesting statistics")
for actor in actors:
power = actor.get_power()
total = actor.get_energy()
click.echo(" -> {}: {:.2f} Watt current, {:.3f} wH total".format(
actor.name, power / 1000, total / 100
))
send(keys[actor.name] + '.current', power)
send(keys[actor.name] + '.total', total)
time.sleep(interval)
@cli.command(name="switch-on")
@click.argument('ain')
@click.pass_context
def switch_on(context, ain):
"""Switch an actor's power to ON"""
context.obj.login()
actor = context.obj.get_actor_by_ain(ain)
if actor:
click.echo("Switching {} on".format(actor.name))
actor.switch_on()
else:
click.echo("Actor not found: {}".format(ain))
@cli.command(name="switch-off")
@click.argument('ain')
@click.pass_context
def switch_off(context, ain):
"""Switch an actor's power to OFF"""
context.obj.login()
actor = context.obj.get_actor_by_ain(ain)
if actor:
click.echo("Switching {} off".format(actor.name))
actor.switch_off()
else:
click.echo("Actor not found: {}".format(ain))
@cli.command(name="switch-state")
@click.argument('ain')
@click.pass_context
def switch_state(context, ain):
"""Get an actor's power state"""
context.obj.login()
actor = context.obj.get_actor_by_ain(ain)
if actor:
click.echo("State for {} is: {}".format(ain,'ON' if actor.get_state() else 'OFF'))
else:
click.echo("Actor not found: {}".format(ain))
@cli.command(name="switch-toggle")
@click.argument('ain')
@click.pass_context
def switch_toggle(context, ain):
"""Toggle an actor's power state"""
context.obj.login()
actor = context.obj.get_actor_by_ain(ain)
if actor:
if actor.get_state():
actor.switch_off()
click.echo("State for {} is now OFF".format(ain))
else:
actor.switch_on()
click.echo("State for {} is now ON".format(ain))
else:
click.echo("Actor not found: {}".format(ain))
@cli.command()
@click.option('--format', type=click.Choice(['plain', 'json']),
default='plain')
@click.pass_context
def logs(context, format):
"""Show system logs since last reboot"""
fritz = context.obj
fritz.login()
messages = fritz.get_logs()
if format == "plain":
for msg in messages:
merged = "{} {} {}".format(msg.date, msg.time, msg.message.encode("UTF-8"))
click.echo(merged)
if format == "json":
entries = [msg._asdict() for msg in messages]
click.echo(json.dumps({
"entries": entries,
}))
if __name__ == '__main__':
cli()
|
DerMitch/fritzbox-smarthome | fritzhome/__main__.py | energy | python | def energy(context, features):
fritz = context.obj
fritz.login()
for actor in fritz.get_actors():
if actor.temperature is not None:
click.echo("{} ({}): {:.2f} Watt current, {:.3f} wH total, {:.2f} °C".format(
actor.name.encode('utf-8'),
actor.actor_id,
(actor.get_power() or 0.0) / 1000,
(actor.get_energy() or 0.0) / 100,
actor.temperature
))
else:
click.echo("{} ({}): {:.2f} Watt current, {:.3f} wH total, offline".format(
actor.name.encode('utf-8'),
actor.actor_id,
(actor.get_power() or 0.0) / 1000,
(actor.get_energy() or 0.0) / 100
))
if features:
click.echo(" Features: PowerMeter: {}, Temperatur: {}, Switch: {}".format(
actor.has_powermeter, actor.has_temperature, actor.has_switch
)) | Display energy stats of all actors | train | https://github.com/DerMitch/fritzbox-smarthome/blob/84cbd7c1b33e6256add041b0395ff5fccc01f103/fritzhome/__main__.py#L68-L92 | null | # coding: utf-8
"""
FRITZ!Box SmartHome Client
~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
from __future__ import print_function, division
import re
import time
import json
import socket
import click
from .fritz import FritzBox
@click.group()
@click.option('--host', default='169.254.1.1') # fritzbox "emergency" IP
@click.option('--username', default='smarthome')
@click.option('--password', default='smarthome')
@click.pass_context
def cli(context, host, username, password):
"""
FritzBox SmartHome Tool
\b
Provides the following functions:
- A easy to use library for querying SmartHome actors
- This CLI tool for testing
- A carbon client for pipeing data into graphite
"""
context.obj = FritzBox(host, username, password)
@cli.command()
@click.pass_context
def actors(context):
"""Display a list of actors"""
fritz = context.obj
fritz.login()
for actor in fritz.get_actors():
click.echo("{} ({} {}; AIN {} )".format(
actor.name,
actor.manufacturer,
actor.productname,
actor.actor_id,
))
if actor.has_temperature:
click.echo("Temp: act {} target {}; battery (low): {}".format(
actor.temperature,
actor.target_temperature,
actor.battery_low,
))
click.echo("Temp (via get): act {} target {}".format(
actor.get_temperature(),
actor.get_target_temperature(),
))
@cli.command()
@click.option('--features', type=bool, default=False, help="Show device features")
@click.pass_context
@cli.command()
@click.argument('server')
@click.option('--port', type=int, default=2003)
@click.option('--interval', type=int, default=10)
@click.option('--prefix', default="smarthome")
@click.pass_context
def graphite(context, server, port, interval, prefix):
"""Display energy stats of all actors"""
fritz = context.obj
fritz.login()
sid_ttl = time.time() + 600
# Find actors and create carbon keys
click.echo(" * Requesting actors list")
simple_chars = re.compile('[^A-Za-z0-9]+')
actors = fritz.get_actors()
keys = {}
for actor in actors:
keys[actor.name] = "{}.{}".format(
prefix,
simple_chars.sub('_', actor.name)
)
# Connect to carbon
click.echo(" * Trying to connect to carbon")
timeout = 2
sock = socket.socket()
sock.settimeout(timeout)
try:
sock.connect((server, port))
except socket.timeout:
raise Exception("Took over {} second(s) to connect to {}".format(
timeout, server
))
except Exception as error:
raise Exception("unknown exception while connecting to {} - {}".format(
server, error
))
def send(key, value):
"""Send a key-value-pair to carbon"""
now = int(time.time())
payload = "{} {} {}\n".format(key, value, now)
sock.sendall(payload)
while True:
if time.time() > sid_ttl:
click.echo(" * Requesting new SID")
fritz.login()
sid_ttl = time.time() + 600
click.echo(" * Requesting statistics")
for actor in actors:
power = actor.get_power()
total = actor.get_energy()
click.echo(" -> {}: {:.2f} Watt current, {:.3f} wH total".format(
actor.name, power / 1000, total / 100
))
send(keys[actor.name] + '.current', power)
send(keys[actor.name] + '.total', total)
time.sleep(interval)
@cli.command(name="switch-on")
@click.argument('ain')
@click.pass_context
def switch_on(context, ain):
"""Switch an actor's power to ON"""
context.obj.login()
actor = context.obj.get_actor_by_ain(ain)
if actor:
click.echo("Switching {} on".format(actor.name))
actor.switch_on()
else:
click.echo("Actor not found: {}".format(ain))
@cli.command(name="switch-off")
@click.argument('ain')
@click.pass_context
def switch_off(context, ain):
"""Switch an actor's power to OFF"""
context.obj.login()
actor = context.obj.get_actor_by_ain(ain)
if actor:
click.echo("Switching {} off".format(actor.name))
actor.switch_off()
else:
click.echo("Actor not found: {}".format(ain))
@cli.command(name="switch-state")
@click.argument('ain')
@click.pass_context
def switch_state(context, ain):
"""Get an actor's power state"""
context.obj.login()
actor = context.obj.get_actor_by_ain(ain)
if actor:
click.echo("State for {} is: {}".format(ain,'ON' if actor.get_state() else 'OFF'))
else:
click.echo("Actor not found: {}".format(ain))
@cli.command(name="switch-toggle")
@click.argument('ain')
@click.pass_context
def switch_toggle(context, ain):
"""Toggle an actor's power state"""
context.obj.login()
actor = context.obj.get_actor_by_ain(ain)
if actor:
if actor.get_state():
actor.switch_off()
click.echo("State for {} is now OFF".format(ain))
else:
actor.switch_on()
click.echo("State for {} is now ON".format(ain))
else:
click.echo("Actor not found: {}".format(ain))
@cli.command()
@click.option('--format', type=click.Choice(['plain', 'json']),
default='plain')
@click.pass_context
def logs(context, format):
"""Show system logs since last reboot"""
fritz = context.obj
fritz.login()
messages = fritz.get_logs()
if format == "plain":
for msg in messages:
merged = "{} {} {}".format(msg.date, msg.time, msg.message.encode("UTF-8"))
click.echo(merged)
if format == "json":
entries = [msg._asdict() for msg in messages]
click.echo(json.dumps({
"entries": entries,
}))
if __name__ == '__main__':
cli()
|
DerMitch/fritzbox-smarthome | fritzhome/__main__.py | graphite | python | def graphite(context, server, port, interval, prefix):
fritz = context.obj
fritz.login()
sid_ttl = time.time() + 600
# Find actors and create carbon keys
click.echo(" * Requesting actors list")
simple_chars = re.compile('[^A-Za-z0-9]+')
actors = fritz.get_actors()
keys = {}
for actor in actors:
keys[actor.name] = "{}.{}".format(
prefix,
simple_chars.sub('_', actor.name)
)
# Connect to carbon
click.echo(" * Trying to connect to carbon")
timeout = 2
sock = socket.socket()
sock.settimeout(timeout)
try:
sock.connect((server, port))
except socket.timeout:
raise Exception("Took over {} second(s) to connect to {}".format(
timeout, server
))
except Exception as error:
raise Exception("unknown exception while connecting to {} - {}".format(
server, error
))
def send(key, value):
"""Send a key-value-pair to carbon"""
now = int(time.time())
payload = "{} {} {}\n".format(key, value, now)
sock.sendall(payload)
while True:
if time.time() > sid_ttl:
click.echo(" * Requesting new SID")
fritz.login()
sid_ttl = time.time() + 600
click.echo(" * Requesting statistics")
for actor in actors:
power = actor.get_power()
total = actor.get_energy()
click.echo(" -> {}: {:.2f} Watt current, {:.3f} wH total".format(
actor.name, power / 1000, total / 100
))
send(keys[actor.name] + '.current', power)
send(keys[actor.name] + '.total', total)
time.sleep(interval) | Display energy stats of all actors | train | https://github.com/DerMitch/fritzbox-smarthome/blob/84cbd7c1b33e6256add041b0395ff5fccc01f103/fritzhome/__main__.py#L101-L157 | [
"def send(key, value):\n \"\"\"Send a key-value-pair to carbon\"\"\"\n now = int(time.time())\n payload = \"{} {} {}\\n\".format(key, value, now)\n sock.sendall(payload)\n"
] | # coding: utf-8
"""
FRITZ!Box SmartHome Client
~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
from __future__ import print_function, division
import re
import time
import json
import socket
import click
from .fritz import FritzBox
@click.group()
@click.option('--host', default='169.254.1.1') # fritzbox "emergency" IP
@click.option('--username', default='smarthome')
@click.option('--password', default='smarthome')
@click.pass_context
def cli(context, host, username, password):
"""
FritzBox SmartHome Tool
\b
Provides the following functions:
- A easy to use library for querying SmartHome actors
- This CLI tool for testing
- A carbon client for pipeing data into graphite
"""
context.obj = FritzBox(host, username, password)
@cli.command()
@click.pass_context
def actors(context):
"""Display a list of actors"""
fritz = context.obj
fritz.login()
for actor in fritz.get_actors():
click.echo("{} ({} {}; AIN {} )".format(
actor.name,
actor.manufacturer,
actor.productname,
actor.actor_id,
))
if actor.has_temperature:
click.echo("Temp: act {} target {}; battery (low): {}".format(
actor.temperature,
actor.target_temperature,
actor.battery_low,
))
click.echo("Temp (via get): act {} target {}".format(
actor.get_temperature(),
actor.get_target_temperature(),
))
@cli.command()
@click.option('--features', type=bool, default=False, help="Show device features")
@click.pass_context
def energy(context, features):
"""Display energy stats of all actors"""
fritz = context.obj
fritz.login()
for actor in fritz.get_actors():
if actor.temperature is not None:
click.echo("{} ({}): {:.2f} Watt current, {:.3f} wH total, {:.2f} °C".format(
actor.name.encode('utf-8'),
actor.actor_id,
(actor.get_power() or 0.0) / 1000,
(actor.get_energy() or 0.0) / 100,
actor.temperature
))
else:
click.echo("{} ({}): {:.2f} Watt current, {:.3f} wH total, offline".format(
actor.name.encode('utf-8'),
actor.actor_id,
(actor.get_power() or 0.0) / 1000,
(actor.get_energy() or 0.0) / 100
))
if features:
click.echo(" Features: PowerMeter: {}, Temperatur: {}, Switch: {}".format(
actor.has_powermeter, actor.has_temperature, actor.has_switch
))
@cli.command()
@click.argument('server')
@click.option('--port', type=int, default=2003)
@click.option('--interval', type=int, default=10)
@click.option('--prefix', default="smarthome")
@click.pass_context
@cli.command(name="switch-on")
@click.argument('ain')
@click.pass_context
def switch_on(context, ain):
"""Switch an actor's power to ON"""
context.obj.login()
actor = context.obj.get_actor_by_ain(ain)
if actor:
click.echo("Switching {} on".format(actor.name))
actor.switch_on()
else:
click.echo("Actor not found: {}".format(ain))
@cli.command(name="switch-off")
@click.argument('ain')
@click.pass_context
def switch_off(context, ain):
"""Switch an actor's power to OFF"""
context.obj.login()
actor = context.obj.get_actor_by_ain(ain)
if actor:
click.echo("Switching {} off".format(actor.name))
actor.switch_off()
else:
click.echo("Actor not found: {}".format(ain))
@cli.command(name="switch-state")
@click.argument('ain')
@click.pass_context
def switch_state(context, ain):
"""Get an actor's power state"""
context.obj.login()
actor = context.obj.get_actor_by_ain(ain)
if actor:
click.echo("State for {} is: {}".format(ain,'ON' if actor.get_state() else 'OFF'))
else:
click.echo("Actor not found: {}".format(ain))
@cli.command(name="switch-toggle")
@click.argument('ain')
@click.pass_context
def switch_toggle(context, ain):
"""Toggle an actor's power state"""
context.obj.login()
actor = context.obj.get_actor_by_ain(ain)
if actor:
if actor.get_state():
actor.switch_off()
click.echo("State for {} is now OFF".format(ain))
else:
actor.switch_on()
click.echo("State for {} is now ON".format(ain))
else:
click.echo("Actor not found: {}".format(ain))
@cli.command()
@click.option('--format', type=click.Choice(['plain', 'json']),
default='plain')
@click.pass_context
def logs(context, format):
"""Show system logs since last reboot"""
fritz = context.obj
fritz.login()
messages = fritz.get_logs()
if format == "plain":
for msg in messages:
merged = "{} {} {}".format(msg.date, msg.time, msg.message.encode("UTF-8"))
click.echo(merged)
if format == "json":
entries = [msg._asdict() for msg in messages]
click.echo(json.dumps({
"entries": entries,
}))
if __name__ == '__main__':
cli()
|
DerMitch/fritzbox-smarthome | fritzhome/__main__.py | switch_on | python | def switch_on(context, ain):
context.obj.login()
actor = context.obj.get_actor_by_ain(ain)
if actor:
click.echo("Switching {} on".format(actor.name))
actor.switch_on()
else:
click.echo("Actor not found: {}".format(ain)) | Switch an actor's power to ON | train | https://github.com/DerMitch/fritzbox-smarthome/blob/84cbd7c1b33e6256add041b0395ff5fccc01f103/fritzhome/__main__.py#L163-L171 | null | # coding: utf-8
"""
FRITZ!Box SmartHome Client
~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
from __future__ import print_function, division
import re
import time
import json
import socket
import click
from .fritz import FritzBox
@click.group()
@click.option('--host', default='169.254.1.1') # fritzbox "emergency" IP
@click.option('--username', default='smarthome')
@click.option('--password', default='smarthome')
@click.pass_context
def cli(context, host, username, password):
"""
FritzBox SmartHome Tool
\b
Provides the following functions:
- A easy to use library for querying SmartHome actors
- This CLI tool for testing
- A carbon client for pipeing data into graphite
"""
context.obj = FritzBox(host, username, password)
@cli.command()
@click.pass_context
def actors(context):
"""Display a list of actors"""
fritz = context.obj
fritz.login()
for actor in fritz.get_actors():
click.echo("{} ({} {}; AIN {} )".format(
actor.name,
actor.manufacturer,
actor.productname,
actor.actor_id,
))
if actor.has_temperature:
click.echo("Temp: act {} target {}; battery (low): {}".format(
actor.temperature,
actor.target_temperature,
actor.battery_low,
))
click.echo("Temp (via get): act {} target {}".format(
actor.get_temperature(),
actor.get_target_temperature(),
))
@cli.command()
@click.option('--features', type=bool, default=False, help="Show device features")
@click.pass_context
def energy(context, features):
"""Display energy stats of all actors"""
fritz = context.obj
fritz.login()
for actor in fritz.get_actors():
if actor.temperature is not None:
click.echo("{} ({}): {:.2f} Watt current, {:.3f} wH total, {:.2f} °C".format(
actor.name.encode('utf-8'),
actor.actor_id,
(actor.get_power() or 0.0) / 1000,
(actor.get_energy() or 0.0) / 100,
actor.temperature
))
else:
click.echo("{} ({}): {:.2f} Watt current, {:.3f} wH total, offline".format(
actor.name.encode('utf-8'),
actor.actor_id,
(actor.get_power() or 0.0) / 1000,
(actor.get_energy() or 0.0) / 100
))
if features:
click.echo(" Features: PowerMeter: {}, Temperatur: {}, Switch: {}".format(
actor.has_powermeter, actor.has_temperature, actor.has_switch
))
@cli.command()
@click.argument('server')
@click.option('--port', type=int, default=2003)
@click.option('--interval', type=int, default=10)
@click.option('--prefix', default="smarthome")
@click.pass_context
def graphite(context, server, port, interval, prefix):
"""Display energy stats of all actors"""
fritz = context.obj
fritz.login()
sid_ttl = time.time() + 600
# Find actors and create carbon keys
click.echo(" * Requesting actors list")
simple_chars = re.compile('[^A-Za-z0-9]+')
actors = fritz.get_actors()
keys = {}
for actor in actors:
keys[actor.name] = "{}.{}".format(
prefix,
simple_chars.sub('_', actor.name)
)
# Connect to carbon
click.echo(" * Trying to connect to carbon")
timeout = 2
sock = socket.socket()
sock.settimeout(timeout)
try:
sock.connect((server, port))
except socket.timeout:
raise Exception("Took over {} second(s) to connect to {}".format(
timeout, server
))
except Exception as error:
raise Exception("unknown exception while connecting to {} - {}".format(
server, error
))
def send(key, value):
"""Send a key-value-pair to carbon"""
now = int(time.time())
payload = "{} {} {}\n".format(key, value, now)
sock.sendall(payload)
while True:
if time.time() > sid_ttl:
click.echo(" * Requesting new SID")
fritz.login()
sid_ttl = time.time() + 600
click.echo(" * Requesting statistics")
for actor in actors:
power = actor.get_power()
total = actor.get_energy()
click.echo(" -> {}: {:.2f} Watt current, {:.3f} wH total".format(
actor.name, power / 1000, total / 100
))
send(keys[actor.name] + '.current', power)
send(keys[actor.name] + '.total', total)
time.sleep(interval)
@cli.command(name="switch-on")
@click.argument('ain')
@click.pass_context
@cli.command(name="switch-off")
@click.argument('ain')
@click.pass_context
def switch_off(context, ain):
"""Switch an actor's power to OFF"""
context.obj.login()
actor = context.obj.get_actor_by_ain(ain)
if actor:
click.echo("Switching {} off".format(actor.name))
actor.switch_off()
else:
click.echo("Actor not found: {}".format(ain))
@cli.command(name="switch-state")
@click.argument('ain')
@click.pass_context
def switch_state(context, ain):
"""Get an actor's power state"""
context.obj.login()
actor = context.obj.get_actor_by_ain(ain)
if actor:
click.echo("State for {} is: {}".format(ain,'ON' if actor.get_state() else 'OFF'))
else:
click.echo("Actor not found: {}".format(ain))
@cli.command(name="switch-toggle")
@click.argument('ain')
@click.pass_context
def switch_toggle(context, ain):
"""Toggle an actor's power state"""
context.obj.login()
actor = context.obj.get_actor_by_ain(ain)
if actor:
if actor.get_state():
actor.switch_off()
click.echo("State for {} is now OFF".format(ain))
else:
actor.switch_on()
click.echo("State for {} is now ON".format(ain))
else:
click.echo("Actor not found: {}".format(ain))
@cli.command()
@click.option('--format', type=click.Choice(['plain', 'json']),
default='plain')
@click.pass_context
def logs(context, format):
"""Show system logs since last reboot"""
fritz = context.obj
fritz.login()
messages = fritz.get_logs()
if format == "plain":
for msg in messages:
merged = "{} {} {}".format(msg.date, msg.time, msg.message.encode("UTF-8"))
click.echo(merged)
if format == "json":
entries = [msg._asdict() for msg in messages]
click.echo(json.dumps({
"entries": entries,
}))
if __name__ == '__main__':
cli()
|
DerMitch/fritzbox-smarthome | fritzhome/__main__.py | switch_state | python | def switch_state(context, ain):
context.obj.login()
actor = context.obj.get_actor_by_ain(ain)
if actor:
click.echo("State for {} is: {}".format(ain,'ON' if actor.get_state() else 'OFF'))
else:
click.echo("Actor not found: {}".format(ain)) | Get an actor's power state | train | https://github.com/DerMitch/fritzbox-smarthome/blob/84cbd7c1b33e6256add041b0395ff5fccc01f103/fritzhome/__main__.py#L191-L198 | null | # coding: utf-8
"""
FRITZ!Box SmartHome Client
~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
from __future__ import print_function, division
import re
import time
import json
import socket
import click
from .fritz import FritzBox
@click.group()
@click.option('--host', default='169.254.1.1') # fritzbox "emergency" IP
@click.option('--username', default='smarthome')
@click.option('--password', default='smarthome')
@click.pass_context
def cli(context, host, username, password):
"""
FritzBox SmartHome Tool
\b
Provides the following functions:
- A easy to use library for querying SmartHome actors
- This CLI tool for testing
- A carbon client for pipeing data into graphite
"""
context.obj = FritzBox(host, username, password)
@cli.command()
@click.pass_context
def actors(context):
"""Display a list of actors"""
fritz = context.obj
fritz.login()
for actor in fritz.get_actors():
click.echo("{} ({} {}; AIN {} )".format(
actor.name,
actor.manufacturer,
actor.productname,
actor.actor_id,
))
if actor.has_temperature:
click.echo("Temp: act {} target {}; battery (low): {}".format(
actor.temperature,
actor.target_temperature,
actor.battery_low,
))
click.echo("Temp (via get): act {} target {}".format(
actor.get_temperature(),
actor.get_target_temperature(),
))
@cli.command()
@click.option('--features', type=bool, default=False, help="Show device features")
@click.pass_context
def energy(context, features):
"""Display energy stats of all actors"""
fritz = context.obj
fritz.login()
for actor in fritz.get_actors():
if actor.temperature is not None:
click.echo("{} ({}): {:.2f} Watt current, {:.3f} wH total, {:.2f} °C".format(
actor.name.encode('utf-8'),
actor.actor_id,
(actor.get_power() or 0.0) / 1000,
(actor.get_energy() or 0.0) / 100,
actor.temperature
))
else:
click.echo("{} ({}): {:.2f} Watt current, {:.3f} wH total, offline".format(
actor.name.encode('utf-8'),
actor.actor_id,
(actor.get_power() or 0.0) / 1000,
(actor.get_energy() or 0.0) / 100
))
if features:
click.echo(" Features: PowerMeter: {}, Temperatur: {}, Switch: {}".format(
actor.has_powermeter, actor.has_temperature, actor.has_switch
))
@cli.command()
@click.argument('server')
@click.option('--port', type=int, default=2003)
@click.option('--interval', type=int, default=10)
@click.option('--prefix', default="smarthome")
@click.pass_context
def graphite(context, server, port, interval, prefix):
"""Display energy stats of all actors"""
fritz = context.obj
fritz.login()
sid_ttl = time.time() + 600
# Find actors and create carbon keys
click.echo(" * Requesting actors list")
simple_chars = re.compile('[^A-Za-z0-9]+')
actors = fritz.get_actors()
keys = {}
for actor in actors:
keys[actor.name] = "{}.{}".format(
prefix,
simple_chars.sub('_', actor.name)
)
# Connect to carbon
click.echo(" * Trying to connect to carbon")
timeout = 2
sock = socket.socket()
sock.settimeout(timeout)
try:
sock.connect((server, port))
except socket.timeout:
raise Exception("Took over {} second(s) to connect to {}".format(
timeout, server
))
except Exception as error:
raise Exception("unknown exception while connecting to {} - {}".format(
server, error
))
def send(key, value):
"""Send a key-value-pair to carbon"""
now = int(time.time())
payload = "{} {} {}\n".format(key, value, now)
sock.sendall(payload)
while True:
if time.time() > sid_ttl:
click.echo(" * Requesting new SID")
fritz.login()
sid_ttl = time.time() + 600
click.echo(" * Requesting statistics")
for actor in actors:
power = actor.get_power()
total = actor.get_energy()
click.echo(" -> {}: {:.2f} Watt current, {:.3f} wH total".format(
actor.name, power / 1000, total / 100
))
send(keys[actor.name] + '.current', power)
send(keys[actor.name] + '.total', total)
time.sleep(interval)
@cli.command(name="switch-on")
@click.argument('ain')
@click.pass_context
def switch_on(context, ain):
"""Switch an actor's power to ON"""
context.obj.login()
actor = context.obj.get_actor_by_ain(ain)
if actor:
click.echo("Switching {} on".format(actor.name))
actor.switch_on()
else:
click.echo("Actor not found: {}".format(ain))
@cli.command(name="switch-off")
@click.argument('ain')
@click.pass_context
def switch_off(context, ain):
"""Switch an actor's power to OFF"""
context.obj.login()
actor = context.obj.get_actor_by_ain(ain)
if actor:
click.echo("Switching {} off".format(actor.name))
actor.switch_off()
else:
click.echo("Actor not found: {}".format(ain))
@cli.command(name="switch-state")
@click.argument('ain')
@click.pass_context
@cli.command(name="switch-toggle")
@click.argument('ain')
@click.pass_context
def switch_toggle(context, ain):
"""Toggle an actor's power state"""
context.obj.login()
actor = context.obj.get_actor_by_ain(ain)
if actor:
if actor.get_state():
actor.switch_off()
click.echo("State for {} is now OFF".format(ain))
else:
actor.switch_on()
click.echo("State for {} is now ON".format(ain))
else:
click.echo("Actor not found: {}".format(ain))
@cli.command()
@click.option('--format', type=click.Choice(['plain', 'json']),
default='plain')
@click.pass_context
def logs(context, format):
"""Show system logs since last reboot"""
fritz = context.obj
fritz.login()
messages = fritz.get_logs()
if format == "plain":
for msg in messages:
merged = "{} {} {}".format(msg.date, msg.time, msg.message.encode("UTF-8"))
click.echo(merged)
if format == "json":
entries = [msg._asdict() for msg in messages]
click.echo(json.dumps({
"entries": entries,
}))
if __name__ == '__main__':
cli()
|
DerMitch/fritzbox-smarthome | fritzhome/__main__.py | switch_toggle | python | def switch_toggle(context, ain):
context.obj.login()
actor = context.obj.get_actor_by_ain(ain)
if actor:
if actor.get_state():
actor.switch_off()
click.echo("State for {} is now OFF".format(ain))
else:
actor.switch_on()
click.echo("State for {} is now ON".format(ain))
else:
click.echo("Actor not found: {}".format(ain)) | Toggle an actor's power state | train | https://github.com/DerMitch/fritzbox-smarthome/blob/84cbd7c1b33e6256add041b0395ff5fccc01f103/fritzhome/__main__.py#L204-L216 | null | # coding: utf-8
"""
FRITZ!Box SmartHome Client
~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
from __future__ import print_function, division
import re
import time
import json
import socket
import click
from .fritz import FritzBox
@click.group()
@click.option('--host', default='169.254.1.1') # fritzbox "emergency" IP
@click.option('--username', default='smarthome')
@click.option('--password', default='smarthome')
@click.pass_context
def cli(context, host, username, password):
"""
FritzBox SmartHome Tool
\b
Provides the following functions:
- A easy to use library for querying SmartHome actors
- This CLI tool for testing
- A carbon client for pipeing data into graphite
"""
context.obj = FritzBox(host, username, password)
@cli.command()
@click.pass_context
def actors(context):
"""Display a list of actors"""
fritz = context.obj
fritz.login()
for actor in fritz.get_actors():
click.echo("{} ({} {}; AIN {} )".format(
actor.name,
actor.manufacturer,
actor.productname,
actor.actor_id,
))
if actor.has_temperature:
click.echo("Temp: act {} target {}; battery (low): {}".format(
actor.temperature,
actor.target_temperature,
actor.battery_low,
))
click.echo("Temp (via get): act {} target {}".format(
actor.get_temperature(),
actor.get_target_temperature(),
))
@cli.command()
@click.option('--features', type=bool, default=False, help="Show device features")
@click.pass_context
def energy(context, features):
"""Display energy stats of all actors"""
fritz = context.obj
fritz.login()
for actor in fritz.get_actors():
if actor.temperature is not None:
click.echo("{} ({}): {:.2f} Watt current, {:.3f} wH total, {:.2f} °C".format(
actor.name.encode('utf-8'),
actor.actor_id,
(actor.get_power() or 0.0) / 1000,
(actor.get_energy() or 0.0) / 100,
actor.temperature
))
else:
click.echo("{} ({}): {:.2f} Watt current, {:.3f} wH total, offline".format(
actor.name.encode('utf-8'),
actor.actor_id,
(actor.get_power() or 0.0) / 1000,
(actor.get_energy() or 0.0) / 100
))
if features:
click.echo(" Features: PowerMeter: {}, Temperatur: {}, Switch: {}".format(
actor.has_powermeter, actor.has_temperature, actor.has_switch
))
@cli.command()
@click.argument('server')
@click.option('--port', type=int, default=2003)
@click.option('--interval', type=int, default=10)
@click.option('--prefix', default="smarthome")
@click.pass_context
def graphite(context, server, port, interval, prefix):
"""Display energy stats of all actors"""
fritz = context.obj
fritz.login()
sid_ttl = time.time() + 600
# Find actors and create carbon keys
click.echo(" * Requesting actors list")
simple_chars = re.compile('[^A-Za-z0-9]+')
actors = fritz.get_actors()
keys = {}
for actor in actors:
keys[actor.name] = "{}.{}".format(
prefix,
simple_chars.sub('_', actor.name)
)
# Connect to carbon
click.echo(" * Trying to connect to carbon")
timeout = 2
sock = socket.socket()
sock.settimeout(timeout)
try:
sock.connect((server, port))
except socket.timeout:
raise Exception("Took over {} second(s) to connect to {}".format(
timeout, server
))
except Exception as error:
raise Exception("unknown exception while connecting to {} - {}".format(
server, error
))
def send(key, value):
"""Send a key-value-pair to carbon"""
now = int(time.time())
payload = "{} {} {}\n".format(key, value, now)
sock.sendall(payload)
while True:
if time.time() > sid_ttl:
click.echo(" * Requesting new SID")
fritz.login()
sid_ttl = time.time() + 600
click.echo(" * Requesting statistics")
for actor in actors:
power = actor.get_power()
total = actor.get_energy()
click.echo(" -> {}: {:.2f} Watt current, {:.3f} wH total".format(
actor.name, power / 1000, total / 100
))
send(keys[actor.name] + '.current', power)
send(keys[actor.name] + '.total', total)
time.sleep(interval)
@cli.command(name="switch-on")
@click.argument('ain')
@click.pass_context
def switch_on(context, ain):
"""Switch an actor's power to ON"""
context.obj.login()
actor = context.obj.get_actor_by_ain(ain)
if actor:
click.echo("Switching {} on".format(actor.name))
actor.switch_on()
else:
click.echo("Actor not found: {}".format(ain))
@cli.command(name="switch-off")
@click.argument('ain')
@click.pass_context
def switch_off(context, ain):
"""Switch an actor's power to OFF"""
context.obj.login()
actor = context.obj.get_actor_by_ain(ain)
if actor:
click.echo("Switching {} off".format(actor.name))
actor.switch_off()
else:
click.echo("Actor not found: {}".format(ain))
@cli.command(name="switch-state")
@click.argument('ain')
@click.pass_context
def switch_state(context, ain):
"""Get an actor's power state"""
context.obj.login()
actor = context.obj.get_actor_by_ain(ain)
if actor:
click.echo("State for {} is: {}".format(ain,'ON' if actor.get_state() else 'OFF'))
else:
click.echo("Actor not found: {}".format(ain))
@cli.command(name="switch-toggle")
@click.argument('ain')
@click.pass_context
@cli.command()
@click.option('--format', type=click.Choice(['plain', 'json']),
default='plain')
@click.pass_context
def logs(context, format):
"""Show system logs since last reboot"""
fritz = context.obj
fritz.login()
messages = fritz.get_logs()
if format == "plain":
for msg in messages:
merged = "{} {} {}".format(msg.date, msg.time, msg.message.encode("UTF-8"))
click.echo(merged)
if format == "json":
entries = [msg._asdict() for msg in messages]
click.echo(json.dumps({
"entries": entries,
}))
if __name__ == '__main__':
cli()
|
DerMitch/fritzbox-smarthome | fritzhome/__main__.py | logs | python | def logs(context, format):
fritz = context.obj
fritz.login()
messages = fritz.get_logs()
if format == "plain":
for msg in messages:
merged = "{} {} {}".format(msg.date, msg.time, msg.message.encode("UTF-8"))
click.echo(merged)
if format == "json":
entries = [msg._asdict() for msg in messages]
click.echo(json.dumps({
"entries": entries,
})) | Show system logs since last reboot | train | https://github.com/DerMitch/fritzbox-smarthome/blob/84cbd7c1b33e6256add041b0395ff5fccc01f103/fritzhome/__main__.py#L223-L238 | null | # coding: utf-8
"""
FRITZ!Box SmartHome Client
~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
from __future__ import print_function, division
import re
import time
import json
import socket
import click
from .fritz import FritzBox
@click.group()
@click.option('--host', default='169.254.1.1') # fritzbox "emergency" IP
@click.option('--username', default='smarthome')
@click.option('--password', default='smarthome')
@click.pass_context
def cli(context, host, username, password):
"""
FritzBox SmartHome Tool
\b
Provides the following functions:
- A easy to use library for querying SmartHome actors
- This CLI tool for testing
- A carbon client for pipeing data into graphite
"""
context.obj = FritzBox(host, username, password)
@cli.command()
@click.pass_context
def actors(context):
"""Display a list of actors"""
fritz = context.obj
fritz.login()
for actor in fritz.get_actors():
click.echo("{} ({} {}; AIN {} )".format(
actor.name,
actor.manufacturer,
actor.productname,
actor.actor_id,
))
if actor.has_temperature:
click.echo("Temp: act {} target {}; battery (low): {}".format(
actor.temperature,
actor.target_temperature,
actor.battery_low,
))
click.echo("Temp (via get): act {} target {}".format(
actor.get_temperature(),
actor.get_target_temperature(),
))
@cli.command()
@click.option('--features', type=bool, default=False, help="Show device features")
@click.pass_context
def energy(context, features):
"""Display energy stats of all actors"""
fritz = context.obj
fritz.login()
for actor in fritz.get_actors():
if actor.temperature is not None:
click.echo("{} ({}): {:.2f} Watt current, {:.3f} wH total, {:.2f} °C".format(
actor.name.encode('utf-8'),
actor.actor_id,
(actor.get_power() or 0.0) / 1000,
(actor.get_energy() or 0.0) / 100,
actor.temperature
))
else:
click.echo("{} ({}): {:.2f} Watt current, {:.3f} wH total, offline".format(
actor.name.encode('utf-8'),
actor.actor_id,
(actor.get_power() or 0.0) / 1000,
(actor.get_energy() or 0.0) / 100
))
if features:
click.echo(" Features: PowerMeter: {}, Temperatur: {}, Switch: {}".format(
actor.has_powermeter, actor.has_temperature, actor.has_switch
))
@cli.command()
@click.argument('server')
@click.option('--port', type=int, default=2003)
@click.option('--interval', type=int, default=10)
@click.option('--prefix', default="smarthome")
@click.pass_context
def graphite(context, server, port, interval, prefix):
"""Display energy stats of all actors"""
fritz = context.obj
fritz.login()
sid_ttl = time.time() + 600
# Find actors and create carbon keys
click.echo(" * Requesting actors list")
simple_chars = re.compile('[^A-Za-z0-9]+')
actors = fritz.get_actors()
keys = {}
for actor in actors:
keys[actor.name] = "{}.{}".format(
prefix,
simple_chars.sub('_', actor.name)
)
# Connect to carbon
click.echo(" * Trying to connect to carbon")
timeout = 2
sock = socket.socket()
sock.settimeout(timeout)
try:
sock.connect((server, port))
except socket.timeout:
raise Exception("Took over {} second(s) to connect to {}".format(
timeout, server
))
except Exception as error:
raise Exception("unknown exception while connecting to {} - {}".format(
server, error
))
def send(key, value):
"""Send a key-value-pair to carbon"""
now = int(time.time())
payload = "{} {} {}\n".format(key, value, now)
sock.sendall(payload)
while True:
if time.time() > sid_ttl:
click.echo(" * Requesting new SID")
fritz.login()
sid_ttl = time.time() + 600
click.echo(" * Requesting statistics")
for actor in actors:
power = actor.get_power()
total = actor.get_energy()
click.echo(" -> {}: {:.2f} Watt current, {:.3f} wH total".format(
actor.name, power / 1000, total / 100
))
send(keys[actor.name] + '.current', power)
send(keys[actor.name] + '.total', total)
time.sleep(interval)
@cli.command(name="switch-on")
@click.argument('ain')
@click.pass_context
def switch_on(context, ain):
"""Switch an actor's power to ON"""
context.obj.login()
actor = context.obj.get_actor_by_ain(ain)
if actor:
click.echo("Switching {} on".format(actor.name))
actor.switch_on()
else:
click.echo("Actor not found: {}".format(ain))
@cli.command(name="switch-off")
@click.argument('ain')
@click.pass_context
def switch_off(context, ain):
"""Switch an actor's power to OFF"""
context.obj.login()
actor = context.obj.get_actor_by_ain(ain)
if actor:
click.echo("Switching {} off".format(actor.name))
actor.switch_off()
else:
click.echo("Actor not found: {}".format(ain))
@cli.command(name="switch-state")
@click.argument('ain')
@click.pass_context
def switch_state(context, ain):
"""Get an actor's power state"""
context.obj.login()
actor = context.obj.get_actor_by_ain(ain)
if actor:
click.echo("State for {} is: {}".format(ain,'ON' if actor.get_state() else 'OFF'))
else:
click.echo("Actor not found: {}".format(ain))
@cli.command(name="switch-toggle")
@click.argument('ain')
@click.pass_context
def switch_toggle(context, ain):
"""Toggle an actor's power state"""
context.obj.login()
actor = context.obj.get_actor_by_ain(ain)
if actor:
if actor.get_state():
actor.switch_off()
click.echo("State for {} is now OFF".format(ain))
else:
actor.switch_on()
click.echo("State for {} is now ON".format(ain))
else:
click.echo("Actor not found: {}".format(ain))
@cli.command()
@click.option('--format', type=click.Choice(['plain', 'json']),
default='plain')
@click.pass_context
if __name__ == '__main__':
cli()
|
DerMitch/fritzbox-smarthome | fritzhome/actor.py | Actor.get_power | python | def get_power(self):
value = self.box.homeautoswitch("getswitchpower", self.actor_id)
return int(value) if value.isdigit() else None | Returns the current power usage in milliWatts.
Attention: Returns None if the value can't be queried or is unknown. | train | https://github.com/DerMitch/fritzbox-smarthome/blob/84cbd7c1b33e6256add041b0395ff5fccc01f103/fritzhome/actor.py#L83-L89 | null | class Actor(object):
"""
Represents a single SmartHome actor.
You usally don't create that class yourself, use FritzBox.get_actors
instead.
"""
def __init__(self, fritzbox, device):
self.box = fritzbox
self.actor_id = device.attrib['identifier']
self.device_id = device.attrib['id']
self.name = device.find('name').text
self.fwversion = device.attrib['fwversion']
self.productname = device.attrib['productname']
self.manufacturer = device.attrib['manufacturer']
self.functionbitmask = int(device.attrib['functionbitmask'])
self.has_powermeter = self.functionbitmask & (1 << 7) > 0
self.has_temperature = self.functionbitmask & (1 << 8) > 0
self.has_switch = self.functionbitmask & (1 << 9) > 0
self.has_heating_controller = self.functionbitmask & (1 << 6) > 0
self.temperature = 0.0
if self.has_temperature:
if device.find("temperature").find("celsius").text is not None:
self.temperature = int(device.find("temperature").find("celsius").text) / 10
else:
logger.info("Actor " + self.name + " seems offline. Returning None as temperature.")
self.temperature = None
self.target_temperature = 0.0
self.target_temperature = 0.0
self.battery_low = True
if self.has_heating_controller:
hkr = device.find("hkr")
if hkr is not None:
for child in hkr:
if child.tag == 'tist':
self.temperature = self.__get_temp(child.text)
elif child.tag == 'tsoll':
self.target_temperature = self.__get_temp(child.text)
elif child.tag == 'batterylow':
self.battery_low = (child.text == '1')
def switch_on(self):
"""
Set the power switch to ON.
"""
return self.box.set_switch_on(self.actor_id)
def switch_off(self):
"""
Set the power switch to OFF.
"""
return self.box.set_switch_off(self.actor_id)
def get_state(self):
"""
Get the current switch state.
"""
return bool(
int(self.box.homeautoswitch("getswitchstate", self.actor_id))
)
def get_present(self):
"""
Check if the registered actor is currently present (reachable).
"""
return bool(
int(self.box.homeautoswitch("getswitchpresent", self.actor_id))
)
def get_energy(self):
"""
Returns the consumed energy since the start of the statistics in Wh.
Attention: Returns None if the value can't be queried or is unknown.
"""
value = self.box.homeautoswitch("getswitchenergy", self.actor_id)
return int(value) if value.isdigit() else None
def get_temperature(self):
"""
Returns the current environment temperature.
Attention: Returns None if the value can't be queried or is unknown.
"""
#raise NotImplementedError("This should work according to the AVM docs, but don't...")
value = self.box.homeautoswitch("gettemperature", self.actor_id)
if value.isdigit():
self.temperature = float(value)/10
else:
self.temperature = None
return self.temperature
def __get_temp(self, value):
# Temperature is send from fritz.box a little weird
if value.isdigit():
value = float(value)
if value == 253:
return 0
elif value == 254:
return 30
else:
return value / 2
else:
return None
def get_target_temperature(self):
"""
Returns the actual target temperature.
Attention: Returns None if the value can't be queried or is unknown.
"""
value = self.box.homeautoswitch("gethkrtsoll", self.actor_id)
self.target_temperature = self.__get_temp(value)
return self.target_temperature
def set_temperature(self, temp):
"""
Sets the temperature in celcius
"""
# Temperature is send to fritz.box a little weird
param = 16 + ( ( temp - 8 ) * 2 )
if param < 16:
param = 253
logger.info("Actor " + self.name + ": Temperature control set to off")
elif param >= 56:
param = 254
logger.info("Actor " + self.name + ": Temperature control set to on")
else:
logger.info("Actor " + self.name + ": Temperature control set to " + str(temp))
return self.box.homeautoswitch("sethkrtsoll", self.actor_id, param)
def get_consumption(self, timerange="10"):
"""
Return the energy report for the device.
"""
return self.box.get_consumption(self.device_id, timerange)
def __repr__(self):
return u"<Actor {}>".format(self.name)
|
DerMitch/fritzbox-smarthome | fritzhome/actor.py | Actor.get_energy | python | def get_energy(self):
value = self.box.homeautoswitch("getswitchenergy", self.actor_id)
return int(value) if value.isdigit() else None | Returns the consumed energy since the start of the statistics in Wh.
Attention: Returns None if the value can't be queried or is unknown. | train | https://github.com/DerMitch/fritzbox-smarthome/blob/84cbd7c1b33e6256add041b0395ff5fccc01f103/fritzhome/actor.py#L91-L97 | null | class Actor(object):
"""
Represents a single SmartHome actor.
You usally don't create that class yourself, use FritzBox.get_actors
instead.
"""
def __init__(self, fritzbox, device):
self.box = fritzbox
self.actor_id = device.attrib['identifier']
self.device_id = device.attrib['id']
self.name = device.find('name').text
self.fwversion = device.attrib['fwversion']
self.productname = device.attrib['productname']
self.manufacturer = device.attrib['manufacturer']
self.functionbitmask = int(device.attrib['functionbitmask'])
self.has_powermeter = self.functionbitmask & (1 << 7) > 0
self.has_temperature = self.functionbitmask & (1 << 8) > 0
self.has_switch = self.functionbitmask & (1 << 9) > 0
self.has_heating_controller = self.functionbitmask & (1 << 6) > 0
self.temperature = 0.0
if self.has_temperature:
if device.find("temperature").find("celsius").text is not None:
self.temperature = int(device.find("temperature").find("celsius").text) / 10
else:
logger.info("Actor " + self.name + " seems offline. Returning None as temperature.")
self.temperature = None
self.target_temperature = 0.0
self.target_temperature = 0.0
self.battery_low = True
if self.has_heating_controller:
hkr = device.find("hkr")
if hkr is not None:
for child in hkr:
if child.tag == 'tist':
self.temperature = self.__get_temp(child.text)
elif child.tag == 'tsoll':
self.target_temperature = self.__get_temp(child.text)
elif child.tag == 'batterylow':
self.battery_low = (child.text == '1')
def switch_on(self):
"""
Set the power switch to ON.
"""
return self.box.set_switch_on(self.actor_id)
def switch_off(self):
"""
Set the power switch to OFF.
"""
return self.box.set_switch_off(self.actor_id)
def get_state(self):
"""
Get the current switch state.
"""
return bool(
int(self.box.homeautoswitch("getswitchstate", self.actor_id))
)
def get_present(self):
"""
Check if the registered actor is currently present (reachable).
"""
return bool(
int(self.box.homeautoswitch("getswitchpresent", self.actor_id))
)
def get_power(self):
"""
Returns the current power usage in milliWatts.
Attention: Returns None if the value can't be queried or is unknown.
"""
value = self.box.homeautoswitch("getswitchpower", self.actor_id)
return int(value) if value.isdigit() else None
def get_temperature(self):
"""
Returns the current environment temperature.
Attention: Returns None if the value can't be queried or is unknown.
"""
#raise NotImplementedError("This should work according to the AVM docs, but don't...")
value = self.box.homeautoswitch("gettemperature", self.actor_id)
if value.isdigit():
self.temperature = float(value)/10
else:
self.temperature = None
return self.temperature
def __get_temp(self, value):
# Temperature is send from fritz.box a little weird
if value.isdigit():
value = float(value)
if value == 253:
return 0
elif value == 254:
return 30
else:
return value / 2
else:
return None
def get_target_temperature(self):
"""
Returns the actual target temperature.
Attention: Returns None if the value can't be queried or is unknown.
"""
value = self.box.homeautoswitch("gethkrtsoll", self.actor_id)
self.target_temperature = self.__get_temp(value)
return self.target_temperature
def set_temperature(self, temp):
"""
Sets the temperature in celcius
"""
# Temperature is send to fritz.box a little weird
param = 16 + ( ( temp - 8 ) * 2 )
if param < 16:
param = 253
logger.info("Actor " + self.name + ": Temperature control set to off")
elif param >= 56:
param = 254
logger.info("Actor " + self.name + ": Temperature control set to on")
else:
logger.info("Actor " + self.name + ": Temperature control set to " + str(temp))
return self.box.homeautoswitch("sethkrtsoll", self.actor_id, param)
def get_consumption(self, timerange="10"):
"""
Return the energy report for the device.
"""
return self.box.get_consumption(self.device_id, timerange)
def __repr__(self):
return u"<Actor {}>".format(self.name)
|
DerMitch/fritzbox-smarthome | fritzhome/actor.py | Actor.get_temperature | python | def get_temperature(self):
#raise NotImplementedError("This should work according to the AVM docs, but don't...")
value = self.box.homeautoswitch("gettemperature", self.actor_id)
if value.isdigit():
self.temperature = float(value)/10
else:
self.temperature = None
return self.temperature | Returns the current environment temperature.
Attention: Returns None if the value can't be queried or is unknown. | train | https://github.com/DerMitch/fritzbox-smarthome/blob/84cbd7c1b33e6256add041b0395ff5fccc01f103/fritzhome/actor.py#L99-L110 | null | class Actor(object):
"""
Represents a single SmartHome actor.
You usally don't create that class yourself, use FritzBox.get_actors
instead.
"""
def __init__(self, fritzbox, device):
self.box = fritzbox
self.actor_id = device.attrib['identifier']
self.device_id = device.attrib['id']
self.name = device.find('name').text
self.fwversion = device.attrib['fwversion']
self.productname = device.attrib['productname']
self.manufacturer = device.attrib['manufacturer']
self.functionbitmask = int(device.attrib['functionbitmask'])
self.has_powermeter = self.functionbitmask & (1 << 7) > 0
self.has_temperature = self.functionbitmask & (1 << 8) > 0
self.has_switch = self.functionbitmask & (1 << 9) > 0
self.has_heating_controller = self.functionbitmask & (1 << 6) > 0
self.temperature = 0.0
if self.has_temperature:
if device.find("temperature").find("celsius").text is not None:
self.temperature = int(device.find("temperature").find("celsius").text) / 10
else:
logger.info("Actor " + self.name + " seems offline. Returning None as temperature.")
self.temperature = None
self.target_temperature = 0.0
self.target_temperature = 0.0
self.battery_low = True
if self.has_heating_controller:
hkr = device.find("hkr")
if hkr is not None:
for child in hkr:
if child.tag == 'tist':
self.temperature = self.__get_temp(child.text)
elif child.tag == 'tsoll':
self.target_temperature = self.__get_temp(child.text)
elif child.tag == 'batterylow':
self.battery_low = (child.text == '1')
def switch_on(self):
"""
Set the power switch to ON.
"""
return self.box.set_switch_on(self.actor_id)
def switch_off(self):
"""
Set the power switch to OFF.
"""
return self.box.set_switch_off(self.actor_id)
def get_state(self):
"""
Get the current switch state.
"""
return bool(
int(self.box.homeautoswitch("getswitchstate", self.actor_id))
)
def get_present(self):
"""
Check if the registered actor is currently present (reachable).
"""
return bool(
int(self.box.homeautoswitch("getswitchpresent", self.actor_id))
)
def get_power(self):
"""
Returns the current power usage in milliWatts.
Attention: Returns None if the value can't be queried or is unknown.
"""
value = self.box.homeautoswitch("getswitchpower", self.actor_id)
return int(value) if value.isdigit() else None
def get_energy(self):
"""
Returns the consumed energy since the start of the statistics in Wh.
Attention: Returns None if the value can't be queried or is unknown.
"""
value = self.box.homeautoswitch("getswitchenergy", self.actor_id)
return int(value) if value.isdigit() else None
def __get_temp(self, value):
# Temperature is send from fritz.box a little weird
if value.isdigit():
value = float(value)
if value == 253:
return 0
elif value == 254:
return 30
else:
return value / 2
else:
return None
def get_target_temperature(self):
"""
Returns the actual target temperature.
Attention: Returns None if the value can't be queried or is unknown.
"""
value = self.box.homeautoswitch("gethkrtsoll", self.actor_id)
self.target_temperature = self.__get_temp(value)
return self.target_temperature
def set_temperature(self, temp):
"""
Sets the temperature in celcius
"""
# Temperature is send to fritz.box a little weird
param = 16 + ( ( temp - 8 ) * 2 )
if param < 16:
param = 253
logger.info("Actor " + self.name + ": Temperature control set to off")
elif param >= 56:
param = 254
logger.info("Actor " + self.name + ": Temperature control set to on")
else:
logger.info("Actor " + self.name + ": Temperature control set to " + str(temp))
return self.box.homeautoswitch("sethkrtsoll", self.actor_id, param)
def get_consumption(self, timerange="10"):
"""
Return the energy report for the device.
"""
return self.box.get_consumption(self.device_id, timerange)
def __repr__(self):
return u"<Actor {}>".format(self.name)
|
DerMitch/fritzbox-smarthome | fritzhome/actor.py | Actor.get_target_temperature | python | def get_target_temperature(self):
value = self.box.homeautoswitch("gethkrtsoll", self.actor_id)
self.target_temperature = self.__get_temp(value)
return self.target_temperature | Returns the actual target temperature.
Attention: Returns None if the value can't be queried or is unknown. | train | https://github.com/DerMitch/fritzbox-smarthome/blob/84cbd7c1b33e6256add041b0395ff5fccc01f103/fritzhome/actor.py#L125-L132 | [
"def __get_temp(self, value):\n # Temperature is send from fritz.box a little weird\n if value.isdigit():\n value = float(value)\n if value == 253:\n return 0\n elif value == 254:\n return 30\n else:\n return value / 2\n else:\n return None\n"
] | class Actor(object):
"""
Represents a single SmartHome actor.
You usally don't create that class yourself, use FritzBox.get_actors
instead.
"""
def __init__(self, fritzbox, device):
self.box = fritzbox
self.actor_id = device.attrib['identifier']
self.device_id = device.attrib['id']
self.name = device.find('name').text
self.fwversion = device.attrib['fwversion']
self.productname = device.attrib['productname']
self.manufacturer = device.attrib['manufacturer']
self.functionbitmask = int(device.attrib['functionbitmask'])
self.has_powermeter = self.functionbitmask & (1 << 7) > 0
self.has_temperature = self.functionbitmask & (1 << 8) > 0
self.has_switch = self.functionbitmask & (1 << 9) > 0
self.has_heating_controller = self.functionbitmask & (1 << 6) > 0
self.temperature = 0.0
if self.has_temperature:
if device.find("temperature").find("celsius").text is not None:
self.temperature = int(device.find("temperature").find("celsius").text) / 10
else:
logger.info("Actor " + self.name + " seems offline. Returning None as temperature.")
self.temperature = None
self.target_temperature = 0.0
self.target_temperature = 0.0
self.battery_low = True
if self.has_heating_controller:
hkr = device.find("hkr")
if hkr is not None:
for child in hkr:
if child.tag == 'tist':
self.temperature = self.__get_temp(child.text)
elif child.tag == 'tsoll':
self.target_temperature = self.__get_temp(child.text)
elif child.tag == 'batterylow':
self.battery_low = (child.text == '1')
def switch_on(self):
"""
Set the power switch to ON.
"""
return self.box.set_switch_on(self.actor_id)
def switch_off(self):
"""
Set the power switch to OFF.
"""
return self.box.set_switch_off(self.actor_id)
def get_state(self):
"""
Get the current switch state.
"""
return bool(
int(self.box.homeautoswitch("getswitchstate", self.actor_id))
)
def get_present(self):
"""
Check if the registered actor is currently present (reachable).
"""
return bool(
int(self.box.homeautoswitch("getswitchpresent", self.actor_id))
)
def get_power(self):
"""
Returns the current power usage in milliWatts.
Attention: Returns None if the value can't be queried or is unknown.
"""
value = self.box.homeautoswitch("getswitchpower", self.actor_id)
return int(value) if value.isdigit() else None
def get_energy(self):
"""
Returns the consumed energy since the start of the statistics in Wh.
Attention: Returns None if the value can't be queried or is unknown.
"""
value = self.box.homeautoswitch("getswitchenergy", self.actor_id)
return int(value) if value.isdigit() else None
def get_temperature(self):
"""
Returns the current environment temperature.
Attention: Returns None if the value can't be queried or is unknown.
"""
#raise NotImplementedError("This should work according to the AVM docs, but don't...")
value = self.box.homeautoswitch("gettemperature", self.actor_id)
if value.isdigit():
self.temperature = float(value)/10
else:
self.temperature = None
return self.temperature
def __get_temp(self, value):
# Temperature is send from fritz.box a little weird
if value.isdigit():
value = float(value)
if value == 253:
return 0
elif value == 254:
return 30
else:
return value / 2
else:
return None
def set_temperature(self, temp):
"""
Sets the temperature in celcius
"""
# Temperature is send to fritz.box a little weird
param = 16 + ( ( temp - 8 ) * 2 )
if param < 16:
param = 253
logger.info("Actor " + self.name + ": Temperature control set to off")
elif param >= 56:
param = 254
logger.info("Actor " + self.name + ": Temperature control set to on")
else:
logger.info("Actor " + self.name + ": Temperature control set to " + str(temp))
return self.box.homeautoswitch("sethkrtsoll", self.actor_id, param)
def get_consumption(self, timerange="10"):
"""
Return the energy report for the device.
"""
return self.box.get_consumption(self.device_id, timerange)
def __repr__(self):
return u"<Actor {}>".format(self.name)
|
DerMitch/fritzbox-smarthome | fritzhome/actor.py | Actor.set_temperature | python | def set_temperature(self, temp):
# Temperature is send to fritz.box a little weird
param = 16 + ( ( temp - 8 ) * 2 )
if param < 16:
param = 253
logger.info("Actor " + self.name + ": Temperature control set to off")
elif param >= 56:
param = 254
logger.info("Actor " + self.name + ": Temperature control set to on")
else:
logger.info("Actor " + self.name + ": Temperature control set to " + str(temp))
return self.box.homeautoswitch("sethkrtsoll", self.actor_id, param) | Sets the temperature in celcius | train | https://github.com/DerMitch/fritzbox-smarthome/blob/84cbd7c1b33e6256add041b0395ff5fccc01f103/fritzhome/actor.py#L134-L150 | null | class Actor(object):
"""
Represents a single SmartHome actor.
You usally don't create that class yourself, use FritzBox.get_actors
instead.
"""
def __init__(self, fritzbox, device):
self.box = fritzbox
self.actor_id = device.attrib['identifier']
self.device_id = device.attrib['id']
self.name = device.find('name').text
self.fwversion = device.attrib['fwversion']
self.productname = device.attrib['productname']
self.manufacturer = device.attrib['manufacturer']
self.functionbitmask = int(device.attrib['functionbitmask'])
self.has_powermeter = self.functionbitmask & (1 << 7) > 0
self.has_temperature = self.functionbitmask & (1 << 8) > 0
self.has_switch = self.functionbitmask & (1 << 9) > 0
self.has_heating_controller = self.functionbitmask & (1 << 6) > 0
self.temperature = 0.0
if self.has_temperature:
if device.find("temperature").find("celsius").text is not None:
self.temperature = int(device.find("temperature").find("celsius").text) / 10
else:
logger.info("Actor " + self.name + " seems offline. Returning None as temperature.")
self.temperature = None
self.target_temperature = 0.0
self.target_temperature = 0.0
self.battery_low = True
if self.has_heating_controller:
hkr = device.find("hkr")
if hkr is not None:
for child in hkr:
if child.tag == 'tist':
self.temperature = self.__get_temp(child.text)
elif child.tag == 'tsoll':
self.target_temperature = self.__get_temp(child.text)
elif child.tag == 'batterylow':
self.battery_low = (child.text == '1')
def switch_on(self):
"""
Set the power switch to ON.
"""
return self.box.set_switch_on(self.actor_id)
def switch_off(self):
"""
Set the power switch to OFF.
"""
return self.box.set_switch_off(self.actor_id)
def get_state(self):
"""
Get the current switch state.
"""
return bool(
int(self.box.homeautoswitch("getswitchstate", self.actor_id))
)
def get_present(self):
"""
Check if the registered actor is currently present (reachable).
"""
return bool(
int(self.box.homeautoswitch("getswitchpresent", self.actor_id))
)
def get_power(self):
"""
Returns the current power usage in milliWatts.
Attention: Returns None if the value can't be queried or is unknown.
"""
value = self.box.homeautoswitch("getswitchpower", self.actor_id)
return int(value) if value.isdigit() else None
def get_energy(self):
"""
Returns the consumed energy since the start of the statistics in Wh.
Attention: Returns None if the value can't be queried or is unknown.
"""
value = self.box.homeautoswitch("getswitchenergy", self.actor_id)
return int(value) if value.isdigit() else None
def get_temperature(self):
"""
Returns the current environment temperature.
Attention: Returns None if the value can't be queried or is unknown.
"""
#raise NotImplementedError("This should work according to the AVM docs, but don't...")
value = self.box.homeautoswitch("gettemperature", self.actor_id)
if value.isdigit():
self.temperature = float(value)/10
else:
self.temperature = None
return self.temperature
def __get_temp(self, value):
# Temperature is send from fritz.box a little weird
if value.isdigit():
value = float(value)
if value == 253:
return 0
elif value == 254:
return 30
else:
return value / 2
else:
return None
def get_target_temperature(self):
"""
Returns the actual target temperature.
Attention: Returns None if the value can't be queried or is unknown.
"""
value = self.box.homeautoswitch("gethkrtsoll", self.actor_id)
self.target_temperature = self.__get_temp(value)
return self.target_temperature
def get_consumption(self, timerange="10"):
"""
Return the energy report for the device.
"""
return self.box.get_consumption(self.device_id, timerange)
def __repr__(self):
return u"<Actor {}>".format(self.name)
|
mretegan/crispy | setup.py | main | python | def main():
if sys.version_info < (2, 7):
sys.exit('crispy requires at least Python 2.7')
elif sys.version_info[0] == 3 and sys.version_info < (3, 4):
sys.exit('crispy requires at least Python 3.4')
kwargs = dict(
name='crispy',
version=get_version(),
description='Core-Level Spectroscopy Simulations in Python',
long_description=get_readme(),
license='MIT',
author='Marius Retegan',
author_email='marius.retegan@esrf.eu',
url='https://github.com/mretegan/crispy',
download_url='https://github.com/mretegan/crispy/releases',
keywords='gui, spectroscopy, simulation, synchrotron, science',
install_requires=get_requirements(),
platforms=[
'MacOS :: MacOS X',
'Microsoft :: Windows',
'POSIX :: Linux',
],
packages=[
'crispy',
'crispy.gui',
'crispy.gui.uis',
'crispy.gui.icons',
'crispy.modules',
'crispy.modules.quanty',
'crispy.modules.orca',
'crispy.utils',
],
package_data={
'crispy.gui.uis': [
'*.ui',
'quanty/*.ui',
],
'crispy.gui.icons': [
'*.svg',
],
'crispy.modules.quanty': [
'parameters/*.json.gz',
'templates/*.lua',
],
},
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: X11 Applications :: Qt',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Scientific/Engineering :: Visualization',
]
)
# At the moment pip/setuptools doesn't play nice with shebang paths
# containing white spaces.
# See: https://github.com/pypa/pip/issues/2783
# https://github.com/xonsh/xonsh/issues/879
# The most straight forward workaround is to have a .bat script to run
# crispy on Windows.
if 'win32' in sys.platform:
kwargs['scripts'] = ['scripts/crispy.bat']
else:
kwargs['scripts'] = ['scripts/crispy']
setup(**kwargs) | The main entry point. | train | https://github.com/mretegan/crispy/blob/7e241ac1a48d34ca769f3a6183c430360b5f6725/setup.py#L68-L145 | [
"def get_version():\n from crispy import version\n return version.strictversion\n",
"def get_readme():\n _dir = os.path.dirname(os.path.abspath(__file__))\n long_description = ''\n with open(os.path.join(_dir, 'README.rst')) as f:\n for line in f:\n if 'main_window' not in line:\n long_description += line\n return long_description\n",
"def get_requirements():\n requirements = list()\n with open('requirements.txt') as fp:\n for line in fp:\n if line.startswith('#') or line == '\\n':\n continue\n line = line.strip('\\n')\n requirements.append(line)\n return requirements\n"
] | #!/usr/bin/env python
# coding: utf-8
# /*##########################################################################
#
# Copyright (c) 2016-2018 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ###########################################################################*/
from __future__ import absolute_import, division
__authors__ = ['Marius Retegan']
__license__ = 'MIT'
__date__ = '07/10/2018'
import os
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
def get_readme():
_dir = os.path.dirname(os.path.abspath(__file__))
long_description = ''
with open(os.path.join(_dir, 'README.rst')) as f:
for line in f:
if 'main_window' not in line:
long_description += line
return long_description
def get_version():
from crispy import version
return version.strictversion
def get_requirements():
requirements = list()
with open('requirements.txt') as fp:
for line in fp:
if line.startswith('#') or line == '\n':
continue
line = line.strip('\n')
requirements.append(line)
return requirements
if __name__ == '__main__':
main()
|
mretegan/crispy | crispy/gui/quanty.py | QuantySpectra.loadFromDisk | python | def loadFromDisk(self, calculation):
suffixes = {
'Isotropic': 'iso',
'Circular Dichroism (R-L)': 'cd',
'Right Polarized (R)': 'r',
'Left Polarized (L)': 'l',
'Linear Dichroism (V-H)': 'ld',
'Vertical Polarized (V)': 'v',
'Horizontal Polarized (H)': 'h',
}
self.raw = list()
for spectrumName in self.toPlot:
suffix = suffixes[spectrumName]
path = '{}_{}.spec'.format(calculation.baseName, suffix)
try:
data = np.loadtxt(path, skiprows=5)
except (OSError, IOError) as e:
raise e
rows, columns = data.shape
if calculation.experiment in ['XAS', 'XPS', 'XES']:
xMin = calculation.xMin
xMax = calculation.xMax
xNPoints = calculation.xNPoints
if calculation.experiment == 'XES':
x = np.linspace(xMin, xMax, xNPoints + 1)
x = x[::-1]
y = data[:, 2]
y = y / np.abs(y.max())
else:
x = np.linspace(xMin, xMax, xNPoints + 1)
y = data[:, 2::2].flatten()
spectrum = Spectrum1D(x, y)
spectrum.name = spectrumName
if len(suffix) > 2:
spectrum.shortName = suffix.title()
else:
spectrum.shortName = suffix.upper()
if calculation.experiment in ['XAS', ]:
spectrum.xLabel = 'Absorption Energy (eV)'
elif calculation.experiment in ['XPS', ]:
spectrum.xLabel = 'Binding Energy (eV)'
elif calculation.experiment in ['XES', ]:
spectrum.xLabel = 'Emission Energy (eV)'
spectrum.yLabel = 'Intensity (a.u.)'
self.broadenings = {'gaussian': (calculation.xGaussian, ), }
else:
xMin = calculation.xMin
xMax = calculation.xMax
xNPoints = calculation.xNPoints
yMin = calculation.yMin
yMax = calculation.yMax
yNPoints = calculation.yNPoints
x = np.linspace(xMin, xMax, xNPoints + 1)
y = np.linspace(yMin, yMax, yNPoints + 1)
z = data[:, 2::2]
spectrum = Spectrum2D(x, y, z)
spectrum.name = spectrumName
if len(suffix) > 2:
spectrum.shortName = suffix.title()
else:
spectrum.shortName = suffix.upper()
spectrum.xLabel = 'Incident Energy (eV)'
spectrum.yLabel = 'Energy Transfer (eV)'
self.broadenings = {'gaussian': (calculation.xGaussian,
calculation.yGaussian), }
self.raw.append(spectrum)
# Process the spectra once they where read from disk.
self.process() | Read the spectra from the files generated by Quanty and store them
as a list of spectum objects. | train | https://github.com/mretegan/crispy/blob/7e241ac1a48d34ca769f3a6183c430360b5f6725/crispy/gui/quanty.py#L284-L372 | [
"def process(self):\n try:\n self.processed = copy.deepcopy(self.raw)\n except AttributeError:\n return\n\n for spectrum in self.processed:\n if self.broadenings:\n spectrum.broaden(self.broadenings)\n if self.scale != self._defaults['scale']:\n spectrum.scale(self.scale)\n if self.shift != self._defaults['shift']:\n spectrum.shift(self.shift)\n spectrum.normalize(self.normalization)\n"
] | class QuantySpectra(object):
_defaults = {
'scale': 1.0,
'shift': [0.0, 0.0],
'broadenings': dict(),
'normalization': 'None',
'toCalculateChecked': None,
'_toCalculateChecked': None,
'toPlotChecked': None,
'_toPlotChecked': None,
'toPlot': None,
}
def __init__(self):
self.__dict__.update(self._defaults)
self.aliases = {
'Isotropic': 'Isotropic',
'Circular Dichroism': 'Circular Dichroism (R-L)',
'Linear Dichroism': 'Linear Dichroism (V-H)',
}
@property
def toPlot(self):
spectraNames = list()
for spectrum in self.toCalculateChecked:
if spectrum == 'Isotropic':
spectraNames.append(self.aliases[spectrum])
if spectrum == 'Circular Dichroism':
spectraNames.append(self.aliases[spectrum])
spectraNames.append('Right Polarized (R)')
spectraNames.append('Left Polarized (L)')
elif spectrum == 'Linear Dichroism':
spectraNames.append(self.aliases[spectrum])
spectraNames.append('Vertical Polarized (V)')
spectraNames.append('Horizontal Polarized (H)')
return spectraNames
@property
def toCalculateChecked(self):
return self._toCalculateChecked
@toCalculateChecked.setter
def toCalculateChecked(self, values):
self._toCalculateChecked = values
spectraNames = list()
for spectrum in values:
if spectrum in (
'Isotropic', 'Circular Dichroism', 'Linear Dichroism'):
spectraNames.append(self.aliases[spectrum])
self.toPlotChecked = spectraNames
@property
def toPlotChecked(self):
return self._toPlotChecked
@toPlotChecked.setter
def toPlotChecked(self, values):
self._toPlotChecked = values
def process(self):
try:
self.processed = copy.deepcopy(self.raw)
except AttributeError:
return
for spectrum in self.processed:
if self.broadenings:
spectrum.broaden(self.broadenings)
if self.scale != self._defaults['scale']:
spectrum.scale(self.scale)
if self.shift != self._defaults['shift']:
spectrum.shift(self.shift)
spectrum.normalize(self.normalization)
|
mretegan/crispy | crispy/gui/quanty.py | QuantyDockWidget.populateWidget | python | def populateWidget(self):
self.elementComboBox.setItems(self.state._elements, self.state.element)
self.chargeComboBox.setItems(self.state._charges, self.state.charge)
self.symmetryComboBox.setItems(
self.state._symmetries, self.state.symmetry)
self.experimentComboBox.setItems(
self.state._experiments, self.state.experiment)
self.edgeComboBox.setItems(self.state._edges, self.state.edge)
self.temperatureLineEdit.setValue(self.state.temperature)
self.magneticFieldLineEdit.setValue(self.state.magneticField)
self.axesTabWidget.setTabText(0, str(self.state.xLabel))
self.xMinLineEdit.setValue(self.state.xMin)
self.xMaxLineEdit.setValue(self.state.xMax)
self.xNPointsLineEdit.setValue(self.state.xNPoints)
self.xLorentzianLineEdit.setList(self.state.xLorentzian)
self.xGaussianLineEdit.setValue(self.state.xGaussian)
self.k1LineEdit.setVector(self.state.k1)
self.eps11LineEdit.setVector(self.state.eps11)
self.eps12LineEdit.setVector(self.state.eps12)
if self.state.experiment in ['RIXS', ]:
if self.axesTabWidget.count() == 1:
tab = self.axesTabWidget.findChild(QWidget, 'yTab')
self.axesTabWidget.addTab(tab, tab.objectName())
self.axesTabWidget.setTabText(1, self.state.yLabel)
self.yMinLineEdit.setValue(self.state.yMin)
self.yMaxLineEdit.setValue(self.state.yMax)
self.yNPointsLineEdit.setValue(self.state.yNPoints)
self.yLorentzianLineEdit.setList(self.state.yLorentzian)
self.yGaussianLineEdit.setValue(self.state.yGaussian)
self.k2LineEdit.setVector(self.state.k2)
self.eps21LineEdit.setVector(self.state.eps21)
self.eps22LineEdit.setVector(self.state.eps22)
text = self.eps11Label.text()
text = re.sub('>[vσ]', '>σ', text)
self.eps11Label.setText(text)
text = self.eps12Label.text()
text = re.sub('>[hπ]', '>π', text)
self.eps12Label.setText(text)
else:
self.axesTabWidget.removeTab(1)
text = self.eps11Label.text()
text = re.sub('>[vσ]', '>v', text)
self.eps11Label.setText(text)
text = self.eps12Label.text()
text = re.sub('>[hπ]', '>h', text)
self.eps12Label.setText(text)
# Create the spectra selection model.
self.spectraModel = SpectraModel(parent=self)
self.spectraModel.setModelData(
self.state.spectra.toCalculate,
self.state.spectra.toCalculateChecked)
self.spectraModel.checkStateChanged.connect(
self.updateSpectraCheckState)
self.spectraListView.setModel(self.spectraModel)
self.spectraListView.selectionModel().setCurrentIndex(
self.spectraModel.index(0, 0), QItemSelectionModel.Select)
self.fkLineEdit.setValue(self.state.fk)
self.gkLineEdit.setValue(self.state.gk)
self.zetaLineEdit.setValue(self.state.zeta)
# Create the Hamiltonian model.
self.hamiltonianModel = HamiltonianModel(parent=self)
self.hamiltonianModel.setModelData(self.state.hamiltonianData)
self.hamiltonianModel.setNodesCheckState(self.state.hamiltonianState)
if self.syncParametersCheckBox.isChecked():
self.hamiltonianModel.setSyncState(True)
else:
self.hamiltonianModel.setSyncState(False)
self.hamiltonianModel.dataChanged.connect(self.updateHamiltonianData)
self.hamiltonianModel.itemCheckStateChanged.connect(
self.updateHamiltonianNodeCheckState)
# Assign the Hamiltonian model to the Hamiltonian terms view.
self.hamiltonianTermsView.setModel(self.hamiltonianModel)
self.hamiltonianTermsView.selectionModel().setCurrentIndex(
self.hamiltonianModel.index(0, 0), QItemSelectionModel.Select)
self.hamiltonianTermsView.selectionModel().selectionChanged.connect(
self.selectedHamiltonianTermChanged)
# Assign the Hamiltonian model to the Hamiltonian parameters view.
self.hamiltonianParametersView.setModel(self.hamiltonianModel)
self.hamiltonianParametersView.expandAll()
self.hamiltonianParametersView.resizeAllColumnsToContents()
self.hamiltonianParametersView.setColumnWidth(0, 130)
self.hamiltonianParametersView.setRootIndex(
self.hamiltonianTermsView.currentIndex())
self.nPsisLineEdit.setValue(self.state.nPsis)
self.nPsisAutoCheckBox.setChecked(self.state.nPsisAuto)
self.nConfigurationsLineEdit.setValue(self.state.nConfigurations)
self.nConfigurationsLineEdit.setEnabled(False)
name = '{}-Ligands Hybridization'.format(self.state.block)
for termName in self.state.hamiltonianData:
if name in termName:
termState = self.state.hamiltonianState[termName]
if termState == 0:
continue
else:
self.nConfigurationsLineEdit.setEnabled(True)
if not hasattr(self, 'resultsModel'):
# Create the results model.
self.resultsModel = ResultsModel(parent=self)
self.resultsModel.itemNameChanged.connect(
self.updateCalculationName)
self.resultsModel.itemCheckStateChanged.connect(
self.updatePlotWidget)
self.resultsModel.dataChanged.connect(self.updatePlotWidget)
self.resultsModel.dataChanged.connect(self.updateResultsView)
# Assign the results model to the results view.
self.resultsView.setModel(self.resultsModel)
self.resultsView.selectionModel().selectionChanged.connect(
self.selectedResultsChanged)
self.resultsView.resizeColumnsToContents()
self.resultsView.horizontalHeader().setSectionsMovable(False)
self.resultsView.horizontalHeader().setSectionsClickable(False)
if sys.platform == 'darwin':
self.resultsView.horizontalHeader().setMaximumHeight(17)
# Add a context menu to the view.
self.resultsView.setContextMenuPolicy(Qt.CustomContextMenu)
self.resultsView.customContextMenuRequested[QPoint].connect(
self.showResultsContextMenu)
if not hasattr(self, 'resultDetailsDialog'):
self.resultDetailsDialog = QuantyResultDetailsDialog(parent=self)
self.updateMainWindowTitle(self.state.baseName) | Populate the widget using data stored in the state
object. The order in which the individual widgets are populated
follows their arrangment.
The models are recreated every time the function is called.
This might seem to be an overkill, but in practice it is very fast.
Don't try to move the model creation outside this function; is not
worth the effort, and there is nothing to gain from it. | train | https://github.com/mretegan/crispy/blob/7e241ac1a48d34ca769f3a6183c430360b5f6725/crispy/gui/quanty.py#L754-L899 | null | class QuantyDockWidget(QDockWidget):
def __init__(self, parent=None):
super(QuantyDockWidget, self).__init__(parent=parent)
# Load the external .ui file for the widget.
path = resourceFileName('uis:quanty/main.ui')
loadUi(path, baseinstance=self, package='crispy.gui')
# Load the settings from file.
config = Config()
self.settings = config.read()
# Set the state object
self.state = QuantyCalculation()
self.populateWidget()
self.activateWidget()
self.timeout = 4000
self.hamiltonianSplitter.setSizes((150, 300, 10))
def activateWidget(self):
self.elementComboBox.currentTextChanged.connect(self.resetState)
self.chargeComboBox.currentTextChanged.connect(self.resetState)
self.symmetryComboBox.currentTextChanged.connect(self.resetState)
self.experimentComboBox.currentTextChanged.connect(
self.resetState)
self.edgeComboBox.currentTextChanged.connect(self.resetState)
self.temperatureLineEdit.returnPressed.connect(
self.updateTemperature)
self.magneticFieldLineEdit.returnPressed.connect(
self.updateMagneticField)
self.xMinLineEdit.returnPressed.connect(self.updateXMin)
self.xMaxLineEdit.returnPressed.connect(self.updateXMax)
self.xNPointsLineEdit.returnPressed.connect(self.updateXNPoints)
self.xLorentzianLineEdit.returnPressed.connect(
self.updateXLorentzian)
self.xGaussianLineEdit.returnPressed.connect(self.updateXGaussian)
self.k1LineEdit.returnPressed.connect(self.updateIncidentWaveVector)
self.eps11LineEdit.returnPressed.connect(
self.updateIncidentPolarizationVectors)
self.yMinLineEdit.returnPressed.connect(self.updateYMin)
self.yMaxLineEdit.returnPressed.connect(self.updateYMax)
self.yNPointsLineEdit.returnPressed.connect(self.updateYNPoints)
self.yLorentzianLineEdit.returnPressed.connect(
self.updateYLorentzian)
self.yGaussianLineEdit.returnPressed.connect(self.updateYGaussian)
self.fkLineEdit.returnPressed.connect(self.updateScaleFactors)
self.gkLineEdit.returnPressed.connect(self.updateScaleFactors)
self.zetaLineEdit.returnPressed.connect(self.updateScaleFactors)
self.syncParametersCheckBox.toggled.connect(self.updateSyncParameters)
self.nPsisAutoCheckBox.toggled.connect(self.updateNPsisAuto)
self.nPsisLineEdit.returnPressed.connect(self.updateNPsis)
self.nConfigurationsLineEdit.returnPressed.connect(
self.updateConfigurations)
self.saveInputAsPushButton.clicked.connect(self.saveInputAs)
self.calculationPushButton.clicked.connect(self.runCalculation)
def enableWidget(self, flag=True, result=None):
self.elementComboBox.setEnabled(flag)
self.chargeComboBox.setEnabled(flag)
self.symmetryComboBox.setEnabled(flag)
self.experimentComboBox.setEnabled(flag)
self.edgeComboBox.setEnabled(flag)
self.temperatureLineEdit.setEnabled(flag)
self.magneticFieldLineEdit.setEnabled(flag)
self.xMinLineEdit.setEnabled(flag)
self.xMaxLineEdit.setEnabled(flag)
self.xNPointsLineEdit.setEnabled(flag)
self.xLorentzianLineEdit.setEnabled(flag)
self.xGaussianLineEdit.setEnabled(flag)
self.k1LineEdit.setEnabled(flag)
self.eps11LineEdit.setEnabled(flag)
self.yMinLineEdit.setEnabled(flag)
self.yMaxLineEdit.setEnabled(flag)
self.yNPointsLineEdit.setEnabled(flag)
self.yLorentzianLineEdit.setEnabled(flag)
self.yGaussianLineEdit.setEnabled(flag)
self.spectraListView.setEnabled(flag)
self.fkLineEdit.setEnabled(flag)
self.gkLineEdit.setEnabled(flag)
self.zetaLineEdit.setEnabled(flag)
self.syncParametersCheckBox.setEnabled(flag)
self.nPsisAutoCheckBox.setEnabled(flag)
if self.nPsisAutoCheckBox.isChecked():
self.nPsisLineEdit.setEnabled(False)
else:
self.nPsisLineEdit.setEnabled(flag)
self.hamiltonianTermsView.setEnabled(flag)
self.hamiltonianParametersView.setEnabled(flag)
self.resultsView.setEnabled(flag)
self.saveInputAsPushButton.setEnabled(flag)
if result is None or isinstance(result, QuantyCalculation):
self.nConfigurationsLineEdit.setEnabled(flag)
self.resultsView.setEnabled(flag)
self.calculationPushButton.setEnabled(True)
self.resultDetailsDialog.enableWidget(flag)
else:
self.nConfigurationsLineEdit.setEnabled(False)
self.calculationPushButton.setEnabled(flag)
self.resultsView.setEnabled(True)
self.resultDetailsDialog.enableWidget(False)
def updateTemperature(self):
temperature = self.temperatureLineEdit.getValue()
if temperature < 0:
message = 'The temperature cannot be negative.'
self.getStatusBar().showMessage(message, self.timeout)
self.temperatureLineEdit.setValue(self.state.temperature)
return
elif temperature == 0:
self.nPsisAutoCheckBox.setChecked(False)
self.updateNPsisAuto()
self.nPsisLineEdit.setValue(1)
self.updateNPsis()
self.state.temperature = temperature
def updateMagneticField(self):
magneticField = self.magneticFieldLineEdit.getValue()
TESLA_TO_EV = 5.788e-05
# Normalize the current incident vector.
k1 = np.array(self.state.k1)
k1 = k1 / np.linalg.norm(k1)
configurations = self.state.hamiltonianData['Magnetic Field']
for configuration in configurations:
parameters = configurations[configuration]
for i, parameter in enumerate(parameters):
value = float(magneticField * np.abs(k1[i]) * TESLA_TO_EV)
if abs(value) == 0.0:
value = 0.0
configurations[configuration][parameter] = value
self.hamiltonianModel.updateModelData(self.state.hamiltonianData)
self.state.magneticField = magneticField
def updateXMin(self):
xMin = self.xMinLineEdit.getValue()
if xMin > self.state.xMax:
message = ('The lower energy limit cannot be larger than '
'the upper limit.')
self.getStatusBar().showMessage(message, self.timeout)
self.xMinLineEdit.setValue(self.state.xMin)
return
self.state.xMin = xMin
def updateXMax(self):
xMax = self.xMaxLineEdit.getValue()
if xMax < self.state.xMin:
message = ('The upper energy limit cannot be smaller than '
'the lower limit.')
self.getStatusBar().showMessage(message, self.timeout)
self.xMaxLineEdit.setValue(self.state.xMax)
return
self.state.xMax = xMax
def updateXNPoints(self):
xNPoints = self.xNPointsLineEdit.getValue()
xMin = self.state.xMin
xMax = self.state.xMax
xLorentzianMin = float(self.state.xLorentzian[0])
xNPointsMin = int(np.floor((xMax - xMin) / xLorentzianMin))
if xNPoints < xNPointsMin:
message = ('The number of points must be greater than '
'{}.'.format(xNPointsMin))
self.getStatusBar().showMessage(message, self.timeout)
self.xNPointsLineEdit.setValue(self.state.xNPoints)
return
self.state.xNPoints = xNPoints
def updateXLorentzian(self):
try:
xLorentzian = self.xLorentzianLineEdit.getList()
except ValueError:
message = 'Invalid data for the Lorentzian brodening.'
self.getStatusBar().showMessage(message, self.timeout)
self.xLorentzianLineEdit.setList(self.state.xLorentzian)
return
# Do some validation of the input value.
if len(xLorentzian) > 3:
message = 'The broadening can have at most three elements.'
self.getStatusBar().showMessage(message, self.timeout)
self.xLorentzianLineEdit.setList(self.state.xLorentzian)
return
try:
xLorentzianMin = float(xLorentzian[0])
except IndexError:
pass
else:
if xLorentzianMin < 0.1:
message = 'The broadening cannot be smaller than 0.1.'
self.getStatusBar().showMessage(message, self.timeout)
self.xLorentzianLineEdit.setList(
self.state.xLorentzian)
return
try:
xLorentzianMax = float(xLorentzian[1])
except IndexError:
pass
else:
if xLorentzianMax < 0.1:
message = 'The broadening cannot be smaller than 0.1.'
self.getStatusBar().showMessage(message, self.timeout)
self.xLorentzianLineEdit.setList(
self.state.xLorentzian)
try:
xLorentzianPivotEnergy = float(xLorentzian[2])
except IndexError:
pass
else:
xMin = self.state.xMin
xMax = self.state.xMax
if not (xMin < xLorentzianPivotEnergy < xMax):
message = ('The transition point must lie between the upper '
'and lower energy limits.')
self.getStatusBar().showMessage(message, self.timeout)
self.xLorentzianLineEdit.setList(
self.state.xLorentzian)
return
self.state.xLorentzian = xLorentzian
def updateXGaussian(self):
xGaussian = self.xGaussianLineEdit.getValue()
if xGaussian < 0:
message = 'The broadening cannot be negative.'
self.getStatusBar().showMessage(message, self.timeout)
self.xGaussianLineEdit.setValue(self.state.xGaussian)
return
self.state.xGaussian = xGaussian
def updateIncidentWaveVector(self):
try:
k1 = self.k1LineEdit.getVector()
except ValueError:
message = 'Invalid data for the wave vector.'
self.getStatusBar().showMessage(message, self.timeout)
self.k1LineEdit.setVector(self.state.k1)
return
if np.all(np.array(k1) == 0):
message = 'The wave vector cannot be null.'
self.getStatusBar().showMessage(message, self.timeout)
self.k1LineEdit.setVector(self.state.k1)
return
# The k1 value should be fine; save it.
self.state.k1 = k1
# The polarization vector must be correct.
eps11 = self.eps11LineEdit.getVector()
# If the wave and polarization vectors are not perpendicular, select a
# new perpendicular vector for the polarization.
if np.dot(np.array(k1), np.array(eps11)) != 0:
if k1[2] != 0 or (-k1[0] - k1[1]) != 0:
eps11 = (k1[2], k1[2], -k1[0] - k1[1])
else:
eps11 = (-k1[2] - k1[1], k1[0], k1[0])
self.eps11LineEdit.setVector(eps11)
self.state.eps11 = eps11
# Generate a second, perpendicular, polarization vector to the plane
# defined by the wave vector and the first polarization vector.
eps12 = np.cross(np.array(eps11), np.array(k1))
eps12 = eps12.tolist()
self.eps12LineEdit.setVector(eps12)
self.state.eps12 = eps12
# Update the magnetic field.
self.updateMagneticField()
def updateIncidentPolarizationVectors(self):
try:
eps11 = self.eps11LineEdit.getVector()
except ValueError:
message = 'Invalid data for the polarization vector.'
self.getStatusBar().showMessage(message, self.timeout)
self.eps11LineEdit.setVector(self.state.eps11)
return
if np.all(np.array(eps11) == 0):
message = 'The polarization vector cannot be null.'
self.getStatusBar().showMessage(message, self.timeout)
self.eps11LineEdit.setVector(self.state.eps11)
return
k1 = self.state.k1
if np.dot(np.array(k1), np.array(eps11)) != 0:
message = ('The wave and polarization vectors need to be '
'perpendicular.')
self.getStatusBar().showMessage(message, self.timeout)
self.eps11LineEdit.setVector(self.state.eps11)
return
self.state.eps11 = eps11
# Generate a second, perpendicular, polarization vector to the plane
# defined by the wave vector and the first polarization vector.
eps12 = np.cross(np.array(eps11), np.array(k1))
eps12 = eps12.tolist()
self.eps12LineEdit.setVector(eps12)
self.state.eps12 = eps12
def updateYMin(self):
yMin = self.yMinLineEdit.getValue()
if yMin > self.state.yMax:
message = ('The lower energy limit cannot be larger than '
'the upper limit.')
self.getStatusBar().showMessage(message, self.timeout)
self.yMinLineEdit.setValue(self.state.yMin)
return
self.state.yMin = yMin
def updateYMax(self):
yMax = self.yMaxLineEdit.getValue()
if yMax < self.state.yMin:
message = ('The upper energy limit cannot be smaller than '
'the lower limit.')
self.getStatusBar().showMessage(message, self.timeout)
self.yMaxLineEdit.setValue(self.state.yMax)
return
self.state.yMax = yMax
def updateYNPoints(self):
yNPoints = self.yNPointsLineEdit.getValue()
yMin = self.state.yMin
yMax = self.state.yMax
yLorentzianMin = float(self.state.yLorentzian[0])
yNPointsMin = int(np.floor((yMax - yMin) / yLorentzianMin))
if yNPoints < yNPointsMin:
message = ('The number of points must be greater than '
'{}.'.format(yNPointsMin))
self.getStatusBar().showMessage(message, self.timeout)
self.yNPointsLineEdit.setValue(self.state.yNPoints)
return
self.state.yNPoints = yNPoints
def updateYLorentzian(self):
try:
yLorentzian = self.yLorentzianLineEdit.getList()
except ValueError:
message = 'Invalid data for the Lorentzian brodening.'
self.getStatusBar().showMessage(message, self.timeout)
self.yLorentzianLineEdit.setList(self.state.yLorentzian)
return
# Do some validation of the input value.
if len(yLorentzian) > 3:
message = 'The broadening can have at most three elements.'
self.getStatusBar().showMessage(message, self.timeout)
self.yLorentzianLineEdit.setList(self.state.yLorentzian)
return
try:
yLorentzianMin = float(yLorentzian[0])
except IndexError:
pass
else:
if yLorentzianMin < 0.1:
message = 'The broadening cannot be smaller than 0.1.'
self.getStatusBar().showMessage(message, self.timeout)
self.yLorentzianLineEdit.setList(
self.state.yLorentzian)
return
try:
yLorentzianMax = float(yLorentzian[1])
except IndexError:
pass
else:
if yLorentzianMax < 0.1:
message = 'The broadening cannot be smaller than 0.1.'
self.getStatusBar().showMessage(message, self.timeout)
self.yLorentzianLineEdit.setList(
self.state.yLorentzian)
try:
yLorentzianPivotEnergy = float(yLorentzian[2])
except IndexError:
pass
else:
yMin = self.state.yMin
yMax = self.state.yMax
if not (yMin < yLorentzianPivotEnergy < yMax):
message = ('The transition point must lie between the upper '
'and lower energy limits.')
self.getStatusBar().showMessage(message, self.timeout)
self.yLorentzianLineEdit.setList(
self.state.yLorentzian)
return
self.state.yLorentzian = list(map(float, yLorentzian))
def updateYGaussian(self):
yGaussian = self.yGaussianLineEdit.getValue()
if yGaussian < 0:
message = 'The broadening cannot be negative.'
self.getStatusBar().showMessage(message, self.timeout)
self.yGaussianLineEdit.setValue(self.state.yGaussian)
return
self.state.yGaussian = yGaussian
def updateSpectraCheckState(self, checkedItems):
self.state.spectra.toCalculateChecked = checkedItems
def updateScaleFactors(self):
fk = self.fkLineEdit.getValue()
gk = self.gkLineEdit.getValue()
zeta = self.zetaLineEdit.getValue()
if fk < 0 or gk < 0 or zeta < 0:
message = 'The scale factors cannot be negative.'
self.getStatusBar().showMessage(message, self.timeout)
self.fkLineEdit.setValue(self.state.fk)
self.gkLineEdit.setValue(self.state.gk)
self.zetaLineEdit.setValue(self.state.zeta)
return
self.state.fk = fk
self.state.gk = gk
self.state.zeta = zeta
# TODO: This should be already updated to the most recent data.
# self.state.hamiltonianData = self.hamiltonianModel.getModelData()
terms = self.state.hamiltonianData
for term in terms:
if not ('Atomic' in term or 'Hybridization' in term):
continue
configurations = terms[term]
for configuration in configurations:
parameters = configurations[configuration]
for parameter in parameters:
# Change the scale factors if the parameter has one.
try:
value, _ = parameters[parameter]
except TypeError:
continue
if parameter.startswith('F'):
terms[term][configuration][parameter] = [value, fk]
elif parameter.startswith('G'):
terms[term][configuration][parameter] = [value, gk]
elif parameter.startswith('ζ'):
terms[term][configuration][parameter] = [value, zeta]
self.hamiltonianModel.updateModelData(self.state.hamiltonianData)
# I have no idea why this is needed. Both views should update after
# the above function call.
self.hamiltonianTermsView.viewport().repaint()
self.hamiltonianParametersView.viewport().repaint()
def updateNPsisAuto(self):
nPsisAuto = int(self.nPsisAutoCheckBox.isChecked())
if nPsisAuto:
self.nPsisLineEdit.setValue(self.state.nPsisMax)
self.nPsisLineEdit.setEnabled(False)
else:
self.nPsisLineEdit.setEnabled(True)
self.state.nPsisAuto = nPsisAuto
def updateNPsis(self):
nPsis = self.nPsisLineEdit.getValue()
if nPsis <= 0:
message = 'The number of states must be larger than zero.'
self.getStatusBar().showMessage(message, self.timeout)
self.nPsisLineEdit.setValue(self.state.nPsis)
return
if nPsis > self.state.nPsisMax:
message = 'The selected number of states exceeds the maximum.'
self.getStatusBar().showMessage(message, self.timeout)
self.nPsisLineEdit.setValue(self.state.nPsisMax)
nPsis = self.state.nPsisMax
self.state.nPsis = nPsis
def updateSyncParameters(self, flag):
self.hamiltonianModel.setSyncState(flag)
def updateHamiltonianData(self):
self.state.hamiltonianData = self.hamiltonianModel.getModelData()
def updateHamiltonianNodeCheckState(self, index, state):
toggledTerm = index.data()
states = self.hamiltonianModel.getNodesCheckState()
# Allow only one type of hybridization with the ligands, either
# LMCT or MLCT.
for term in states:
if 'LMCT' in term and 'MLCT' in toggledTerm:
states[term] = 0
elif 'MLCT' in term and 'LMCT' in toggledTerm:
states[term] = 0
self.state.hamiltonianState = states
self.hamiltonianModel.setNodesCheckState(states)
# Determine the maximum number of allowed configurations.
if 'LMCT' in toggledTerm:
if 'd' in self.state.block:
self.state.nConfigurationsMax = 10 - self.state.nElectrons + 1
elif 'f' in self.state.block:
self.state.nConfigurationsMax = 14 - self.state.nElectrons + 1
elif 'MLCT' in toggledTerm:
self.state.nConfigurationsMax = self.state.nElectrons + 1
term = '{}-Ligands Hybridization'.format(self.state.block)
if term in index.data():
if state == 0:
nConfigurations = 1
self.nConfigurationsLineEdit.setEnabled(False)
elif state == 2:
if self.state.nConfigurationsMax == 1:
nConfigurations = 1
else:
nConfigurations = 2
self.nConfigurationsLineEdit.setEnabled(True)
self.nConfigurationsLineEdit.setValue(nConfigurations)
self.state.nConfigurations = nConfigurations
def updateConfigurations(self, *args):
nConfigurations = self.nConfigurationsLineEdit.getValue()
if nConfigurations > self.state.nConfigurationsMax:
message = 'The maximum number of configurations is {}.'.format(
self.state.nConfigurationsMax)
self.getStatusBar().showMessage(message, self.timeout)
self.nConfigurationsLineEdit.setValue(
self.state.nConfigurationsMax)
nConfigurations = self.state.nConfigurationsMax
self.state.nConfigurations = nConfigurations
def saveInput(self):
# TODO: If the user changes a value in a widget without pressing Return
# before running the calculation, the values are not updated.
self.state.verbosity = self.getVerbosity()
self.state.denseBorder = self.getDenseBorder()
path = self.getCurrentPath()
try:
os.chdir(path)
except OSError as e:
message = ('The specified folder doesn\'t exist. Use the \'Save '
'Input As...\' button to save the input file to an '
'alternative location.')
self.getStatusBar().showMessage(message, 2 * self.timeout)
raise e
# The folder might exist, but is not writable.
try:
self.state.saveInput()
except (IOError, OSError) as e:
message = 'Failed to write the Quanty input file.'
self.getStatusBar().showMessage(message, self.timeout)
raise e
def saveInputAs(self):
path, _ = QFileDialog.getSaveFileName(
self, 'Save Quanty Input',
os.path.join(self.getCurrentPath(), '{}.lua'.format(
self.state.baseName)), 'Quanty Input File (*.lua)')
if path:
basename = os.path.basename(path)
self.state.baseName, _ = os.path.splitext(basename)
self.setCurrentPath(path)
try:
self.saveInput()
except (IOError, OSError) as e:
return
self.updateMainWindowTitle(self.state.baseName)
def saveAllResultsAs(self):
path, _ = QFileDialog.getSaveFileName(
self, 'Save Results',
os.path.join(self.getCurrentPath(), '{}.pkl'.format(
'untitled')), 'Pickle File (*.pkl)')
if path:
self.setCurrentPath(path)
results = self.resultsModel.getAllItems()
results.reverse()
with open(path, 'wb') as p:
pickle.dump(results, p, pickle.HIGHEST_PROTOCOL)
def saveSelectedResultsAs(self):
path, _ = QFileDialog.getSaveFileName(
self, 'Save Results',
os.path.join(self.getCurrentPath(), '{}.pkl'.format(
'untitled')), 'Pickle File (*.pkl)')
if path:
self.setCurrentPath(path)
indexes = self.resultsView.selectedIndexes()
results = self.resultsModel.getSelectedItems(indexes)
results.reverse()
with open(path, 'wb') as p:
pickle.dump(results, p, pickle.HIGHEST_PROTOCOL)
def resetState(self):
element = self.elementComboBox.currentText()
charge = self.chargeComboBox.currentText()
symmetry = self.symmetryComboBox.currentText()
experiment = self.experimentComboBox.currentText()
edge = self.edgeComboBox.currentText()
self.state = QuantyCalculation(
element=element, charge=charge, symmetry=symmetry,
experiment=experiment, edge=edge)
self.resultsView.selectionModel().clearSelection()
self.populateWidget()
self.updateMainWindowTitle(self.state.baseName)
self.resultDetailsDialog.clear()
def removeSelectedCalculations(self):
indexes = self.resultsView.selectedIndexes()
if not indexes:
self.getPlotWidget().reset()
return
self.resultsModel.removeItems(indexes)
# self.resultsView.reset()
def removeAllResults(self):
self.resultsModel.reset()
self.getPlotWidget().reset()
def loadResults(self):
path, _ = QFileDialog.getOpenFileName(
self, 'Load Results',
self.getCurrentPath(), 'Pickle File (*.pkl)')
if path:
self.setCurrentPath(path)
with open(path, 'rb') as p:
self.resultsModel.appendItems(pickle.load(p))
self.updateMainWindowTitle(self.state.baseName)
self.quantyToolBox.setCurrentWidget(self.resultsPage)
def runCalculation(self):
path = self.getQuantyPath()
if path:
command = path
else:
message = ('The path to the Quanty executable is not set. '
'Please use the preferences menu to set it.')
self.getStatusBar().showMessage(message, 2 * self.timeout)
return
# Test the executable.
with open(os.devnull, 'w') as f:
try:
subprocess.call(command, stdout=f, stderr=f)
except OSError as e:
if e.errno == os.errno.ENOENT:
message = ('The Quanty executable is not working '
'properly. Is the PATH set correctly?')
self.getStatusBar().showMessage(message, 2 * self.timeout)
return
else:
raise e
# Write the input file to disk.
try:
self.saveInput()
except (IOError, OSError) as e:
return
# Disable the widget while the calculation is running.
self.enableWidget(False)
self.state.startingTime = datetime.datetime.now()
# Run Quanty using QProcess.
self.process = QProcess()
self.process.start(command, (self.state.baseName + '.lua', ))
message = (
'Running "Quanty {}" in {}.'.format(
self.state.baseName + '.lua', os.getcwd()))
self.getStatusBar().showMessage(message)
if sys.platform == 'win32' and self.process.waitForStarted():
self.updateCalculationPushButton()
else:
self.process.started.connect(self.updateCalculationPushButton)
self.process.readyReadStandardOutput.connect(self.handleOutputLogging)
self.process.finished.connect(self.processCalculation)
def updateCalculationPushButton(self, kind='stop'):
self.calculationPushButton.disconnect()
if kind == 'run':
icon = QIcon(resourceFileName('icons:play.svg'))
self.calculationPushButton.setIcon(icon)
self.calculationPushButton.setText('Run')
self.calculationPushButton.setToolTip('Run Quanty.')
self.calculationPushButton.clicked.connect(self.runCalculation)
elif kind == 'stop':
icon = QIcon(resourceFileName('icons:stop.svg'))
self.calculationPushButton.setIcon(icon)
self.calculationPushButton.setText('Stop')
self.calculationPushButton.setToolTip('Stop Quanty.')
self.calculationPushButton.clicked.connect(self.stopCalculation)
else:
pass
def stopCalculation(self):
self.process.kill()
def processCalculation(self, *args):
startingTime = self.state.startingTime
# When did I finish?
endingTime = datetime.datetime.now()
self.state.endingTime = endingTime
# Re-enable the widget when the calculation has finished.
self.enableWidget(True)
# Reset the calculation button.
self.updateCalculationPushButton('run')
# Evaluate the exit code and status of the process.
exitStatus = self.process.exitStatus()
exitCode = self.process.exitCode()
if exitStatus == 0 and exitCode == 0:
message = ('Quanty has finished successfully in ')
delta = (endingTime - startingTime).total_seconds()
hours, reminder = divmod(delta, 3600)
minutes, seconds = divmod(reminder, 60)
seconds = round(seconds, 2)
if hours > 0:
message += '{} hours {} minutes and {} seconds.'.format(
hours, minutes, seconds)
elif minutes > 0:
message += '{} minutes and {} seconds.'.format(
minutes, seconds)
else:
message += '{} seconds.'.format(seconds)
self.getStatusBar().showMessage(message, self.timeout)
elif exitStatus == 0 and exitCode == 1:
self.handleErrorLogging()
message = (
'Quanty has finished unsuccessfully. '
'Check the logging window for more details.')
self.getStatusBar().showMessage(message, self.timeout)
return
# exitCode is platform dependent; exitStatus is always 1.
elif exitStatus == 1:
message = 'Quanty was stopped.'
self.getStatusBar().showMessage(message, self.timeout)
return
# Scroll to the bottom of the logger widget.
scrollBar = self.getLoggerWidget().verticalScrollBar()
scrollBar.setValue(scrollBar.maximum())
# Load the spectra from disk.
self.state.spectra.loadFromDisk(self.state)
# If the calculated spectrum is an image, uncheck all the other
# calculations. This way the current result can be disaplyed in the
# plot widget.
if self.state.experiment in ['RIXS', ]:
self.resultsModel.uncheckAllItems()
# Once all processing is done, store the state in the
# results model. Upon finishing this, a signal is emitted by the
# model which triggers some updates to be performed.
self.state.isChecked = True
self.resultsModel.appendItems(self.state)
# If the "Hamiltonian Setup" page is currently selected, when the
# current widget is set to the "Results Page", the former is not
# displayed. To avoid this I switch first to the "General Setup" page.
self.quantyToolBox.setCurrentWidget(self.generalPage)
self.quantyToolBox.setCurrentWidget(self.resultsPage)
self.resultsView.setFocus()
# Remove files if requested.
if self.doRemoveFiles():
os.remove('{}.lua'.format(self.state.baseName))
spectra = glob.glob('{}_*.spec'.format(self.state.baseName))
for spectrum in spectra:
os.remove(spectrum)
def selectedHamiltonianTermChanged(self):
index = self.hamiltonianTermsView.currentIndex()
self.hamiltonianParametersView.setRootIndex(index)
def showResultsContextMenu(self, position):
icon = QIcon(resourceFileName('icons:clipboard.svg'))
self.showDetailsAction = QAction(
icon, 'Show Details', self, triggered=self.showResultDetailsDialog)
icon = QIcon(resourceFileName('icons:save.svg'))
self.saveSelectedResultsAsAction = QAction(
icon, 'Save Selected Results As...', self,
triggered=self.saveSelectedResultsAs)
self.saveAllResultsAsAction = QAction(
icon, 'Save All Results As...', self,
triggered=self.saveAllResultsAs)
icon = QIcon(resourceFileName('icons:trash.svg'))
self.removeSelectedResultsAction = QAction(
icon, 'Remove Selected Results', self,
triggered=self.removeSelectedCalculations)
self.removeAllResultsAction = QAction(
icon, 'Remove All Results', self, triggered=self.removeAllResults)
icon = QIcon(resourceFileName('icons:folder-open.svg'))
self.loadResultsAction = QAction(
icon, 'Load Results', self, triggered=self.loadResults)
self.resultsContextMenu = QMenu('Results Context Menu', self)
self.resultsContextMenu.addAction(self.showDetailsAction)
self.resultsContextMenu.addSeparator()
self.resultsContextMenu.addAction(self.saveSelectedResultsAsAction)
self.resultsContextMenu.addAction(self.removeSelectedResultsAction)
self.resultsContextMenu.addSeparator()
self.resultsContextMenu.addAction(self.saveAllResultsAsAction)
self.resultsContextMenu.addAction(self.removeAllResultsAction)
self.resultsContextMenu.addSeparator()
self.resultsContextMenu.addAction(self.loadResultsAction)
if not self.resultsView.selectedIndexes():
self.removeSelectedResultsAction.setEnabled(False)
self.saveSelectedResultsAsAction.setEnabled(False)
if not self.resultsModel.modelData:
self.showDetailsAction.setEnabled(False)
self.saveAllResultsAsAction.setEnabled(False)
self.removeAllResultsAction.setEnabled(False)
self.resultsContextMenu.exec_(self.resultsView.mapToGlobal(position))
def updateResultsView(self, index):
"""
Update the selection to contain only the result specified by
the index. This should be the last index of the model. Finally updade
the context menu.
The selectionChanged signal is used to trigger the update of
the Quanty dock widget and result details dialog.
:param index: Index of the last item of the model.
:type index: QModelIndex
"""
flags = (QItemSelectionModel.Clear | QItemSelectionModel.Rows |
QItemSelectionModel.Select)
self.resultsView.selectionModel().select(index, flags)
self.resultsView.resizeColumnsToContents()
self.resultsView.setFocus()
def getLastSelectedResultsModelIndex(self):
rows = self.resultsView.selectionModel().selectedRows()
try:
index = rows[-1]
except IndexError:
index = None
return index
def selectedResultsChanged(self):
indexes = self.resultsView.selectedIndexes()
if len(indexes) > 1:
return
index = self.getLastSelectedResultsModelIndex()
if index is None:
result = None
else:
result = self.resultsModel.getItem(index)
if isinstance(result, QuantyCalculation):
self.enableWidget(True, result)
self.state = result
self.populateWidget()
self.resultDetailsDialog.populateWidget()
elif isinstance(result, ExperimentalData):
self.enableWidget(False, result)
self.updateMainWindowTitle(result.baseName)
self.resultDetailsDialog.clear()
else:
self.enableWidget(True, result)
self.resetState()
def updateResultsModelData(self):
index = self.getLastSelectedResultsModelIndex()
if index is None:
return
self.resultsModel.updateItem(index, self.state)
self.resultsView.viewport().repaint()
def updatePlotWidget(self):
"""Updating the plotting widget should not require any information
about the current state of the widget."""
pw = self.getPlotWidget()
pw.reset()
results = self.resultsModel.getCheckedItems()
for result in results:
if isinstance(result, ExperimentalData):
spectrum = result.spectra['Expt']
spectrum.legend = '{}-{}'.format(result.index, 'Expt')
spectrum.xLabel = 'X'
spectrum.yLabel = 'Y'
spectrum.plot(plotWidget=pw)
else:
if len(results) > 1 and result.experiment in ['RIXS', ]:
continue
for spectrum in result.spectra.processed:
spectrum.legend = '{}-{}'.format(
result.index, spectrum.shortName)
if spectrum.name in result.spectra.toPlotChecked:
spectrum.plot(plotWidget=pw)
def showResultDetailsDialog(self):
self.resultDetailsDialog.show()
self.resultDetailsDialog.raise_()
def updateCalculationName(self, name):
self.state.baseName = name
self.updateMainWindowTitle(name)
self.resultDetailsDialog.updateTitle(name)
if isinstance(self.state, QuantyCalculation):
self.resultDetailsDialog.updateSummary()
def loadExperimentalData(self):
path, _ = QFileDialog.getOpenFileName(
self, 'Load Experimental Data',
self.getCurrentPath(), 'Data File (*.dat)')
if path:
result = ExperimentalData(path)
if result.spectra is not None:
self.resultsModel.appendItems([result])
else:
message = ('Failed to read experimental data. Please check '
'that the file is properly formatted.')
self.getStatusBar().showMessage(message, self.timeout)
def handleOutputLogging(self):
self.process.setReadChannel(QProcess.StandardOutput)
data = self.process.readAllStandardOutput().data()
data = data.decode('utf-8').rstrip()
self.getLoggerWidget().appendPlainText(data)
self.state.output = self.state.output + data
def handleErrorLogging(self):
self.process.setReadChannel(QProcess.StandardError)
data = self.process.readAllStandardError().data()
self.getLoggerWidget().appendPlainText(data.decode('utf-8'))
def updateMainWindowTitle(self, name=None):
if name is None:
title = 'Crispy'
else:
title = 'Crispy - {}'.format(name)
self.setMainWindowTitle(title)
def setMainWindowTitle(self, title):
self.parent().setWindowTitle(title)
def getStatusBar(self):
return self.parent().statusBar()
def getPlotWidget(self):
return self.parent().plotWidget
def getLoggerWidget(self):
return self.parent().loggerWidget
def setCurrentPath(self, path):
path = os.path.dirname(path)
self.settings.setValue('CurrentPath', path)
def getCurrentPath(self):
path = self.settings.value('CurrentPath')
if path is None:
path = os.path.expanduser('~')
return path
def getQuantyPath(self):
return self.settings.value('Quanty/Path')
def getVerbosity(self):
return self.settings.value('Quanty/Verbosity')
def getDenseBorder(self):
return self.settings.value('Quanty/DenseBorder')
def doRemoveFiles(self):
return self.settings.value('Quanty/RemoveFiles', True, type=bool)
|
mretegan/crispy | crispy/gui/quanty.py | QuantyDockWidget.updateResultsView | python | def updateResultsView(self, index):
flags = (QItemSelectionModel.Clear | QItemSelectionModel.Rows |
QItemSelectionModel.Select)
self.resultsView.selectionModel().select(index, flags)
self.resultsView.resizeColumnsToContents()
self.resultsView.setFocus() | Update the selection to contain only the result specified by
the index. This should be the last index of the model. Finally updade
the context menu.
The selectionChanged signal is used to trigger the update of
the Quanty dock widget and result details dialog.
:param index: Index of the last item of the model.
:type index: QModelIndex | train | https://github.com/mretegan/crispy/blob/7e241ac1a48d34ca769f3a6183c430360b5f6725/crispy/gui/quanty.py#L1774-L1791 | null | class QuantyDockWidget(QDockWidget):
def __init__(self, parent=None):
super(QuantyDockWidget, self).__init__(parent=parent)
# Load the external .ui file for the widget.
path = resourceFileName('uis:quanty/main.ui')
loadUi(path, baseinstance=self, package='crispy.gui')
# Load the settings from file.
config = Config()
self.settings = config.read()
# Set the state object
self.state = QuantyCalculation()
self.populateWidget()
self.activateWidget()
self.timeout = 4000
self.hamiltonianSplitter.setSizes((150, 300, 10))
def populateWidget(self):
"""
Populate the widget using data stored in the state
object. The order in which the individual widgets are populated
follows their arrangment.
The models are recreated every time the function is called.
This might seem to be an overkill, but in practice it is very fast.
Don't try to move the model creation outside this function; is not
worth the effort, and there is nothing to gain from it.
"""
self.elementComboBox.setItems(self.state._elements, self.state.element)
self.chargeComboBox.setItems(self.state._charges, self.state.charge)
self.symmetryComboBox.setItems(
self.state._symmetries, self.state.symmetry)
self.experimentComboBox.setItems(
self.state._experiments, self.state.experiment)
self.edgeComboBox.setItems(self.state._edges, self.state.edge)
self.temperatureLineEdit.setValue(self.state.temperature)
self.magneticFieldLineEdit.setValue(self.state.magneticField)
self.axesTabWidget.setTabText(0, str(self.state.xLabel))
self.xMinLineEdit.setValue(self.state.xMin)
self.xMaxLineEdit.setValue(self.state.xMax)
self.xNPointsLineEdit.setValue(self.state.xNPoints)
self.xLorentzianLineEdit.setList(self.state.xLorentzian)
self.xGaussianLineEdit.setValue(self.state.xGaussian)
self.k1LineEdit.setVector(self.state.k1)
self.eps11LineEdit.setVector(self.state.eps11)
self.eps12LineEdit.setVector(self.state.eps12)
if self.state.experiment in ['RIXS', ]:
if self.axesTabWidget.count() == 1:
tab = self.axesTabWidget.findChild(QWidget, 'yTab')
self.axesTabWidget.addTab(tab, tab.objectName())
self.axesTabWidget.setTabText(1, self.state.yLabel)
self.yMinLineEdit.setValue(self.state.yMin)
self.yMaxLineEdit.setValue(self.state.yMax)
self.yNPointsLineEdit.setValue(self.state.yNPoints)
self.yLorentzianLineEdit.setList(self.state.yLorentzian)
self.yGaussianLineEdit.setValue(self.state.yGaussian)
self.k2LineEdit.setVector(self.state.k2)
self.eps21LineEdit.setVector(self.state.eps21)
self.eps22LineEdit.setVector(self.state.eps22)
text = self.eps11Label.text()
text = re.sub('>[vσ]', '>σ', text)
self.eps11Label.setText(text)
text = self.eps12Label.text()
text = re.sub('>[hπ]', '>π', text)
self.eps12Label.setText(text)
else:
self.axesTabWidget.removeTab(1)
text = self.eps11Label.text()
text = re.sub('>[vσ]', '>v', text)
self.eps11Label.setText(text)
text = self.eps12Label.text()
text = re.sub('>[hπ]', '>h', text)
self.eps12Label.setText(text)
# Create the spectra selection model.
self.spectraModel = SpectraModel(parent=self)
self.spectraModel.setModelData(
self.state.spectra.toCalculate,
self.state.spectra.toCalculateChecked)
self.spectraModel.checkStateChanged.connect(
self.updateSpectraCheckState)
self.spectraListView.setModel(self.spectraModel)
self.spectraListView.selectionModel().setCurrentIndex(
self.spectraModel.index(0, 0), QItemSelectionModel.Select)
self.fkLineEdit.setValue(self.state.fk)
self.gkLineEdit.setValue(self.state.gk)
self.zetaLineEdit.setValue(self.state.zeta)
# Create the Hamiltonian model.
self.hamiltonianModel = HamiltonianModel(parent=self)
self.hamiltonianModel.setModelData(self.state.hamiltonianData)
self.hamiltonianModel.setNodesCheckState(self.state.hamiltonianState)
if self.syncParametersCheckBox.isChecked():
self.hamiltonianModel.setSyncState(True)
else:
self.hamiltonianModel.setSyncState(False)
self.hamiltonianModel.dataChanged.connect(self.updateHamiltonianData)
self.hamiltonianModel.itemCheckStateChanged.connect(
self.updateHamiltonianNodeCheckState)
# Assign the Hamiltonian model to the Hamiltonian terms view.
self.hamiltonianTermsView.setModel(self.hamiltonianModel)
self.hamiltonianTermsView.selectionModel().setCurrentIndex(
self.hamiltonianModel.index(0, 0), QItemSelectionModel.Select)
self.hamiltonianTermsView.selectionModel().selectionChanged.connect(
self.selectedHamiltonianTermChanged)
# Assign the Hamiltonian model to the Hamiltonian parameters view.
self.hamiltonianParametersView.setModel(self.hamiltonianModel)
self.hamiltonianParametersView.expandAll()
self.hamiltonianParametersView.resizeAllColumnsToContents()
self.hamiltonianParametersView.setColumnWidth(0, 130)
self.hamiltonianParametersView.setRootIndex(
self.hamiltonianTermsView.currentIndex())
self.nPsisLineEdit.setValue(self.state.nPsis)
self.nPsisAutoCheckBox.setChecked(self.state.nPsisAuto)
self.nConfigurationsLineEdit.setValue(self.state.nConfigurations)
self.nConfigurationsLineEdit.setEnabled(False)
name = '{}-Ligands Hybridization'.format(self.state.block)
for termName in self.state.hamiltonianData:
if name in termName:
termState = self.state.hamiltonianState[termName]
if termState == 0:
continue
else:
self.nConfigurationsLineEdit.setEnabled(True)
if not hasattr(self, 'resultsModel'):
# Create the results model.
self.resultsModel = ResultsModel(parent=self)
self.resultsModel.itemNameChanged.connect(
self.updateCalculationName)
self.resultsModel.itemCheckStateChanged.connect(
self.updatePlotWidget)
self.resultsModel.dataChanged.connect(self.updatePlotWidget)
self.resultsModel.dataChanged.connect(self.updateResultsView)
# Assign the results model to the results view.
self.resultsView.setModel(self.resultsModel)
self.resultsView.selectionModel().selectionChanged.connect(
self.selectedResultsChanged)
self.resultsView.resizeColumnsToContents()
self.resultsView.horizontalHeader().setSectionsMovable(False)
self.resultsView.horizontalHeader().setSectionsClickable(False)
if sys.platform == 'darwin':
self.resultsView.horizontalHeader().setMaximumHeight(17)
# Add a context menu to the view.
self.resultsView.setContextMenuPolicy(Qt.CustomContextMenu)
self.resultsView.customContextMenuRequested[QPoint].connect(
self.showResultsContextMenu)
if not hasattr(self, 'resultDetailsDialog'):
self.resultDetailsDialog = QuantyResultDetailsDialog(parent=self)
self.updateMainWindowTitle(self.state.baseName)
def activateWidget(self):
self.elementComboBox.currentTextChanged.connect(self.resetState)
self.chargeComboBox.currentTextChanged.connect(self.resetState)
self.symmetryComboBox.currentTextChanged.connect(self.resetState)
self.experimentComboBox.currentTextChanged.connect(
self.resetState)
self.edgeComboBox.currentTextChanged.connect(self.resetState)
self.temperatureLineEdit.returnPressed.connect(
self.updateTemperature)
self.magneticFieldLineEdit.returnPressed.connect(
self.updateMagneticField)
self.xMinLineEdit.returnPressed.connect(self.updateXMin)
self.xMaxLineEdit.returnPressed.connect(self.updateXMax)
self.xNPointsLineEdit.returnPressed.connect(self.updateXNPoints)
self.xLorentzianLineEdit.returnPressed.connect(
self.updateXLorentzian)
self.xGaussianLineEdit.returnPressed.connect(self.updateXGaussian)
self.k1LineEdit.returnPressed.connect(self.updateIncidentWaveVector)
self.eps11LineEdit.returnPressed.connect(
self.updateIncidentPolarizationVectors)
self.yMinLineEdit.returnPressed.connect(self.updateYMin)
self.yMaxLineEdit.returnPressed.connect(self.updateYMax)
self.yNPointsLineEdit.returnPressed.connect(self.updateYNPoints)
self.yLorentzianLineEdit.returnPressed.connect(
self.updateYLorentzian)
self.yGaussianLineEdit.returnPressed.connect(self.updateYGaussian)
self.fkLineEdit.returnPressed.connect(self.updateScaleFactors)
self.gkLineEdit.returnPressed.connect(self.updateScaleFactors)
self.zetaLineEdit.returnPressed.connect(self.updateScaleFactors)
self.syncParametersCheckBox.toggled.connect(self.updateSyncParameters)
self.nPsisAutoCheckBox.toggled.connect(self.updateNPsisAuto)
self.nPsisLineEdit.returnPressed.connect(self.updateNPsis)
self.nConfigurationsLineEdit.returnPressed.connect(
self.updateConfigurations)
self.saveInputAsPushButton.clicked.connect(self.saveInputAs)
self.calculationPushButton.clicked.connect(self.runCalculation)
def enableWidget(self, flag=True, result=None):
self.elementComboBox.setEnabled(flag)
self.chargeComboBox.setEnabled(flag)
self.symmetryComboBox.setEnabled(flag)
self.experimentComboBox.setEnabled(flag)
self.edgeComboBox.setEnabled(flag)
self.temperatureLineEdit.setEnabled(flag)
self.magneticFieldLineEdit.setEnabled(flag)
self.xMinLineEdit.setEnabled(flag)
self.xMaxLineEdit.setEnabled(flag)
self.xNPointsLineEdit.setEnabled(flag)
self.xLorentzianLineEdit.setEnabled(flag)
self.xGaussianLineEdit.setEnabled(flag)
self.k1LineEdit.setEnabled(flag)
self.eps11LineEdit.setEnabled(flag)
self.yMinLineEdit.setEnabled(flag)
self.yMaxLineEdit.setEnabled(flag)
self.yNPointsLineEdit.setEnabled(flag)
self.yLorentzianLineEdit.setEnabled(flag)
self.yGaussianLineEdit.setEnabled(flag)
self.spectraListView.setEnabled(flag)
self.fkLineEdit.setEnabled(flag)
self.gkLineEdit.setEnabled(flag)
self.zetaLineEdit.setEnabled(flag)
self.syncParametersCheckBox.setEnabled(flag)
self.nPsisAutoCheckBox.setEnabled(flag)
if self.nPsisAutoCheckBox.isChecked():
self.nPsisLineEdit.setEnabled(False)
else:
self.nPsisLineEdit.setEnabled(flag)
self.hamiltonianTermsView.setEnabled(flag)
self.hamiltonianParametersView.setEnabled(flag)
self.resultsView.setEnabled(flag)
self.saveInputAsPushButton.setEnabled(flag)
if result is None or isinstance(result, QuantyCalculation):
self.nConfigurationsLineEdit.setEnabled(flag)
self.resultsView.setEnabled(flag)
self.calculationPushButton.setEnabled(True)
self.resultDetailsDialog.enableWidget(flag)
else:
self.nConfigurationsLineEdit.setEnabled(False)
self.calculationPushButton.setEnabled(flag)
self.resultsView.setEnabled(True)
self.resultDetailsDialog.enableWidget(False)
def updateTemperature(self):
temperature = self.temperatureLineEdit.getValue()
if temperature < 0:
message = 'The temperature cannot be negative.'
self.getStatusBar().showMessage(message, self.timeout)
self.temperatureLineEdit.setValue(self.state.temperature)
return
elif temperature == 0:
self.nPsisAutoCheckBox.setChecked(False)
self.updateNPsisAuto()
self.nPsisLineEdit.setValue(1)
self.updateNPsis()
self.state.temperature = temperature
def updateMagneticField(self):
magneticField = self.magneticFieldLineEdit.getValue()
TESLA_TO_EV = 5.788e-05
# Normalize the current incident vector.
k1 = np.array(self.state.k1)
k1 = k1 / np.linalg.norm(k1)
configurations = self.state.hamiltonianData['Magnetic Field']
for configuration in configurations:
parameters = configurations[configuration]
for i, parameter in enumerate(parameters):
value = float(magneticField * np.abs(k1[i]) * TESLA_TO_EV)
if abs(value) == 0.0:
value = 0.0
configurations[configuration][parameter] = value
self.hamiltonianModel.updateModelData(self.state.hamiltonianData)
self.state.magneticField = magneticField
def updateXMin(self):
xMin = self.xMinLineEdit.getValue()
if xMin > self.state.xMax:
message = ('The lower energy limit cannot be larger than '
'the upper limit.')
self.getStatusBar().showMessage(message, self.timeout)
self.xMinLineEdit.setValue(self.state.xMin)
return
self.state.xMin = xMin
def updateXMax(self):
xMax = self.xMaxLineEdit.getValue()
if xMax < self.state.xMin:
message = ('The upper energy limit cannot be smaller than '
'the lower limit.')
self.getStatusBar().showMessage(message, self.timeout)
self.xMaxLineEdit.setValue(self.state.xMax)
return
self.state.xMax = xMax
def updateXNPoints(self):
xNPoints = self.xNPointsLineEdit.getValue()
xMin = self.state.xMin
xMax = self.state.xMax
xLorentzianMin = float(self.state.xLorentzian[0])
xNPointsMin = int(np.floor((xMax - xMin) / xLorentzianMin))
if xNPoints < xNPointsMin:
message = ('The number of points must be greater than '
'{}.'.format(xNPointsMin))
self.getStatusBar().showMessage(message, self.timeout)
self.xNPointsLineEdit.setValue(self.state.xNPoints)
return
self.state.xNPoints = xNPoints
def updateXLorentzian(self):
try:
xLorentzian = self.xLorentzianLineEdit.getList()
except ValueError:
message = 'Invalid data for the Lorentzian brodening.'
self.getStatusBar().showMessage(message, self.timeout)
self.xLorentzianLineEdit.setList(self.state.xLorentzian)
return
# Do some validation of the input value.
if len(xLorentzian) > 3:
message = 'The broadening can have at most three elements.'
self.getStatusBar().showMessage(message, self.timeout)
self.xLorentzianLineEdit.setList(self.state.xLorentzian)
return
try:
xLorentzianMin = float(xLorentzian[0])
except IndexError:
pass
else:
if xLorentzianMin < 0.1:
message = 'The broadening cannot be smaller than 0.1.'
self.getStatusBar().showMessage(message, self.timeout)
self.xLorentzianLineEdit.setList(
self.state.xLorentzian)
return
try:
xLorentzianMax = float(xLorentzian[1])
except IndexError:
pass
else:
if xLorentzianMax < 0.1:
message = 'The broadening cannot be smaller than 0.1.'
self.getStatusBar().showMessage(message, self.timeout)
self.xLorentzianLineEdit.setList(
self.state.xLorentzian)
try:
xLorentzianPivotEnergy = float(xLorentzian[2])
except IndexError:
pass
else:
xMin = self.state.xMin
xMax = self.state.xMax
if not (xMin < xLorentzianPivotEnergy < xMax):
message = ('The transition point must lie between the upper '
'and lower energy limits.')
self.getStatusBar().showMessage(message, self.timeout)
self.xLorentzianLineEdit.setList(
self.state.xLorentzian)
return
self.state.xLorentzian = xLorentzian
def updateXGaussian(self):
xGaussian = self.xGaussianLineEdit.getValue()
if xGaussian < 0:
message = 'The broadening cannot be negative.'
self.getStatusBar().showMessage(message, self.timeout)
self.xGaussianLineEdit.setValue(self.state.xGaussian)
return
self.state.xGaussian = xGaussian
def updateIncidentWaveVector(self):
try:
k1 = self.k1LineEdit.getVector()
except ValueError:
message = 'Invalid data for the wave vector.'
self.getStatusBar().showMessage(message, self.timeout)
self.k1LineEdit.setVector(self.state.k1)
return
if np.all(np.array(k1) == 0):
message = 'The wave vector cannot be null.'
self.getStatusBar().showMessage(message, self.timeout)
self.k1LineEdit.setVector(self.state.k1)
return
# The k1 value should be fine; save it.
self.state.k1 = k1
# The polarization vector must be correct.
eps11 = self.eps11LineEdit.getVector()
# If the wave and polarization vectors are not perpendicular, select a
# new perpendicular vector for the polarization.
if np.dot(np.array(k1), np.array(eps11)) != 0:
if k1[2] != 0 or (-k1[0] - k1[1]) != 0:
eps11 = (k1[2], k1[2], -k1[0] - k1[1])
else:
eps11 = (-k1[2] - k1[1], k1[0], k1[0])
self.eps11LineEdit.setVector(eps11)
self.state.eps11 = eps11
# Generate a second, perpendicular, polarization vector to the plane
# defined by the wave vector and the first polarization vector.
eps12 = np.cross(np.array(eps11), np.array(k1))
eps12 = eps12.tolist()
self.eps12LineEdit.setVector(eps12)
self.state.eps12 = eps12
# Update the magnetic field.
self.updateMagneticField()
def updateIncidentPolarizationVectors(self):
try:
eps11 = self.eps11LineEdit.getVector()
except ValueError:
message = 'Invalid data for the polarization vector.'
self.getStatusBar().showMessage(message, self.timeout)
self.eps11LineEdit.setVector(self.state.eps11)
return
if np.all(np.array(eps11) == 0):
message = 'The polarization vector cannot be null.'
self.getStatusBar().showMessage(message, self.timeout)
self.eps11LineEdit.setVector(self.state.eps11)
return
k1 = self.state.k1
if np.dot(np.array(k1), np.array(eps11)) != 0:
message = ('The wave and polarization vectors need to be '
'perpendicular.')
self.getStatusBar().showMessage(message, self.timeout)
self.eps11LineEdit.setVector(self.state.eps11)
return
self.state.eps11 = eps11
# Generate a second, perpendicular, polarization vector to the plane
# defined by the wave vector and the first polarization vector.
eps12 = np.cross(np.array(eps11), np.array(k1))
eps12 = eps12.tolist()
self.eps12LineEdit.setVector(eps12)
self.state.eps12 = eps12
def updateYMin(self):
yMin = self.yMinLineEdit.getValue()
if yMin > self.state.yMax:
message = ('The lower energy limit cannot be larger than '
'the upper limit.')
self.getStatusBar().showMessage(message, self.timeout)
self.yMinLineEdit.setValue(self.state.yMin)
return
self.state.yMin = yMin
def updateYMax(self):
yMax = self.yMaxLineEdit.getValue()
if yMax < self.state.yMin:
message = ('The upper energy limit cannot be smaller than '
'the lower limit.')
self.getStatusBar().showMessage(message, self.timeout)
self.yMaxLineEdit.setValue(self.state.yMax)
return
self.state.yMax = yMax
def updateYNPoints(self):
yNPoints = self.yNPointsLineEdit.getValue()
yMin = self.state.yMin
yMax = self.state.yMax
yLorentzianMin = float(self.state.yLorentzian[0])
yNPointsMin = int(np.floor((yMax - yMin) / yLorentzianMin))
if yNPoints < yNPointsMin:
message = ('The number of points must be greater than '
'{}.'.format(yNPointsMin))
self.getStatusBar().showMessage(message, self.timeout)
self.yNPointsLineEdit.setValue(self.state.yNPoints)
return
self.state.yNPoints = yNPoints
def updateYLorentzian(self):
try:
yLorentzian = self.yLorentzianLineEdit.getList()
except ValueError:
message = 'Invalid data for the Lorentzian brodening.'
self.getStatusBar().showMessage(message, self.timeout)
self.yLorentzianLineEdit.setList(self.state.yLorentzian)
return
# Do some validation of the input value.
if len(yLorentzian) > 3:
message = 'The broadening can have at most three elements.'
self.getStatusBar().showMessage(message, self.timeout)
self.yLorentzianLineEdit.setList(self.state.yLorentzian)
return
try:
yLorentzianMin = float(yLorentzian[0])
except IndexError:
pass
else:
if yLorentzianMin < 0.1:
message = 'The broadening cannot be smaller than 0.1.'
self.getStatusBar().showMessage(message, self.timeout)
self.yLorentzianLineEdit.setList(
self.state.yLorentzian)
return
try:
yLorentzianMax = float(yLorentzian[1])
except IndexError:
pass
else:
if yLorentzianMax < 0.1:
message = 'The broadening cannot be smaller than 0.1.'
self.getStatusBar().showMessage(message, self.timeout)
self.yLorentzianLineEdit.setList(
self.state.yLorentzian)
try:
yLorentzianPivotEnergy = float(yLorentzian[2])
except IndexError:
pass
else:
yMin = self.state.yMin
yMax = self.state.yMax
if not (yMin < yLorentzianPivotEnergy < yMax):
message = ('The transition point must lie between the upper '
'and lower energy limits.')
self.getStatusBar().showMessage(message, self.timeout)
self.yLorentzianLineEdit.setList(
self.state.yLorentzian)
return
self.state.yLorentzian = list(map(float, yLorentzian))
def updateYGaussian(self):
yGaussian = self.yGaussianLineEdit.getValue()
if yGaussian < 0:
message = 'The broadening cannot be negative.'
self.getStatusBar().showMessage(message, self.timeout)
self.yGaussianLineEdit.setValue(self.state.yGaussian)
return
self.state.yGaussian = yGaussian
def updateSpectraCheckState(self, checkedItems):
self.state.spectra.toCalculateChecked = checkedItems
def updateScaleFactors(self):
fk = self.fkLineEdit.getValue()
gk = self.gkLineEdit.getValue()
zeta = self.zetaLineEdit.getValue()
if fk < 0 or gk < 0 or zeta < 0:
message = 'The scale factors cannot be negative.'
self.getStatusBar().showMessage(message, self.timeout)
self.fkLineEdit.setValue(self.state.fk)
self.gkLineEdit.setValue(self.state.gk)
self.zetaLineEdit.setValue(self.state.zeta)
return
self.state.fk = fk
self.state.gk = gk
self.state.zeta = zeta
# TODO: This should be already updated to the most recent data.
# self.state.hamiltonianData = self.hamiltonianModel.getModelData()
terms = self.state.hamiltonianData
for term in terms:
if not ('Atomic' in term or 'Hybridization' in term):
continue
configurations = terms[term]
for configuration in configurations:
parameters = configurations[configuration]
for parameter in parameters:
# Change the scale factors if the parameter has one.
try:
value, _ = parameters[parameter]
except TypeError:
continue
if parameter.startswith('F'):
terms[term][configuration][parameter] = [value, fk]
elif parameter.startswith('G'):
terms[term][configuration][parameter] = [value, gk]
elif parameter.startswith('ζ'):
terms[term][configuration][parameter] = [value, zeta]
self.hamiltonianModel.updateModelData(self.state.hamiltonianData)
# I have no idea why this is needed. Both views should update after
# the above function call.
self.hamiltonianTermsView.viewport().repaint()
self.hamiltonianParametersView.viewport().repaint()
def updateNPsisAuto(self):
nPsisAuto = int(self.nPsisAutoCheckBox.isChecked())
if nPsisAuto:
self.nPsisLineEdit.setValue(self.state.nPsisMax)
self.nPsisLineEdit.setEnabled(False)
else:
self.nPsisLineEdit.setEnabled(True)
self.state.nPsisAuto = nPsisAuto
def updateNPsis(self):
nPsis = self.nPsisLineEdit.getValue()
if nPsis <= 0:
message = 'The number of states must be larger than zero.'
self.getStatusBar().showMessage(message, self.timeout)
self.nPsisLineEdit.setValue(self.state.nPsis)
return
if nPsis > self.state.nPsisMax:
message = 'The selected number of states exceeds the maximum.'
self.getStatusBar().showMessage(message, self.timeout)
self.nPsisLineEdit.setValue(self.state.nPsisMax)
nPsis = self.state.nPsisMax
self.state.nPsis = nPsis
def updateSyncParameters(self, flag):
self.hamiltonianModel.setSyncState(flag)
def updateHamiltonianData(self):
self.state.hamiltonianData = self.hamiltonianModel.getModelData()
def updateHamiltonianNodeCheckState(self, index, state):
toggledTerm = index.data()
states = self.hamiltonianModel.getNodesCheckState()
# Allow only one type of hybridization with the ligands, either
# LMCT or MLCT.
for term in states:
if 'LMCT' in term and 'MLCT' in toggledTerm:
states[term] = 0
elif 'MLCT' in term and 'LMCT' in toggledTerm:
states[term] = 0
self.state.hamiltonianState = states
self.hamiltonianModel.setNodesCheckState(states)
# Determine the maximum number of allowed configurations.
if 'LMCT' in toggledTerm:
if 'd' in self.state.block:
self.state.nConfigurationsMax = 10 - self.state.nElectrons + 1
elif 'f' in self.state.block:
self.state.nConfigurationsMax = 14 - self.state.nElectrons + 1
elif 'MLCT' in toggledTerm:
self.state.nConfigurationsMax = self.state.nElectrons + 1
term = '{}-Ligands Hybridization'.format(self.state.block)
if term in index.data():
if state == 0:
nConfigurations = 1
self.nConfigurationsLineEdit.setEnabled(False)
elif state == 2:
if self.state.nConfigurationsMax == 1:
nConfigurations = 1
else:
nConfigurations = 2
self.nConfigurationsLineEdit.setEnabled(True)
self.nConfigurationsLineEdit.setValue(nConfigurations)
self.state.nConfigurations = nConfigurations
def updateConfigurations(self, *args):
nConfigurations = self.nConfigurationsLineEdit.getValue()
if nConfigurations > self.state.nConfigurationsMax:
message = 'The maximum number of configurations is {}.'.format(
self.state.nConfigurationsMax)
self.getStatusBar().showMessage(message, self.timeout)
self.nConfigurationsLineEdit.setValue(
self.state.nConfigurationsMax)
nConfigurations = self.state.nConfigurationsMax
self.state.nConfigurations = nConfigurations
def saveInput(self):
# TODO: If the user changes a value in a widget without pressing Return
# before running the calculation, the values are not updated.
self.state.verbosity = self.getVerbosity()
self.state.denseBorder = self.getDenseBorder()
path = self.getCurrentPath()
try:
os.chdir(path)
except OSError as e:
message = ('The specified folder doesn\'t exist. Use the \'Save '
'Input As...\' button to save the input file to an '
'alternative location.')
self.getStatusBar().showMessage(message, 2 * self.timeout)
raise e
# The folder might exist, but is not writable.
try:
self.state.saveInput()
except (IOError, OSError) as e:
message = 'Failed to write the Quanty input file.'
self.getStatusBar().showMessage(message, self.timeout)
raise e
def saveInputAs(self):
path, _ = QFileDialog.getSaveFileName(
self, 'Save Quanty Input',
os.path.join(self.getCurrentPath(), '{}.lua'.format(
self.state.baseName)), 'Quanty Input File (*.lua)')
if path:
basename = os.path.basename(path)
self.state.baseName, _ = os.path.splitext(basename)
self.setCurrentPath(path)
try:
self.saveInput()
except (IOError, OSError) as e:
return
self.updateMainWindowTitle(self.state.baseName)
def saveAllResultsAs(self):
path, _ = QFileDialog.getSaveFileName(
self, 'Save Results',
os.path.join(self.getCurrentPath(), '{}.pkl'.format(
'untitled')), 'Pickle File (*.pkl)')
if path:
self.setCurrentPath(path)
results = self.resultsModel.getAllItems()
results.reverse()
with open(path, 'wb') as p:
pickle.dump(results, p, pickle.HIGHEST_PROTOCOL)
def saveSelectedResultsAs(self):
path, _ = QFileDialog.getSaveFileName(
self, 'Save Results',
os.path.join(self.getCurrentPath(), '{}.pkl'.format(
'untitled')), 'Pickle File (*.pkl)')
if path:
self.setCurrentPath(path)
indexes = self.resultsView.selectedIndexes()
results = self.resultsModel.getSelectedItems(indexes)
results.reverse()
with open(path, 'wb') as p:
pickle.dump(results, p, pickle.HIGHEST_PROTOCOL)
def resetState(self):
element = self.elementComboBox.currentText()
charge = self.chargeComboBox.currentText()
symmetry = self.symmetryComboBox.currentText()
experiment = self.experimentComboBox.currentText()
edge = self.edgeComboBox.currentText()
self.state = QuantyCalculation(
element=element, charge=charge, symmetry=symmetry,
experiment=experiment, edge=edge)
self.resultsView.selectionModel().clearSelection()
self.populateWidget()
self.updateMainWindowTitle(self.state.baseName)
self.resultDetailsDialog.clear()
def removeSelectedCalculations(self):
indexes = self.resultsView.selectedIndexes()
if not indexes:
self.getPlotWidget().reset()
return
self.resultsModel.removeItems(indexes)
# self.resultsView.reset()
def removeAllResults(self):
self.resultsModel.reset()
self.getPlotWidget().reset()
def loadResults(self):
path, _ = QFileDialog.getOpenFileName(
self, 'Load Results',
self.getCurrentPath(), 'Pickle File (*.pkl)')
if path:
self.setCurrentPath(path)
with open(path, 'rb') as p:
self.resultsModel.appendItems(pickle.load(p))
self.updateMainWindowTitle(self.state.baseName)
self.quantyToolBox.setCurrentWidget(self.resultsPage)
def runCalculation(self):
path = self.getQuantyPath()
if path:
command = path
else:
message = ('The path to the Quanty executable is not set. '
'Please use the preferences menu to set it.')
self.getStatusBar().showMessage(message, 2 * self.timeout)
return
# Test the executable.
with open(os.devnull, 'w') as f:
try:
subprocess.call(command, stdout=f, stderr=f)
except OSError as e:
if e.errno == os.errno.ENOENT:
message = ('The Quanty executable is not working '
'properly. Is the PATH set correctly?')
self.getStatusBar().showMessage(message, 2 * self.timeout)
return
else:
raise e
# Write the input file to disk.
try:
self.saveInput()
except (IOError, OSError) as e:
return
# Disable the widget while the calculation is running.
self.enableWidget(False)
self.state.startingTime = datetime.datetime.now()
# Run Quanty using QProcess.
self.process = QProcess()
self.process.start(command, (self.state.baseName + '.lua', ))
message = (
'Running "Quanty {}" in {}.'.format(
self.state.baseName + '.lua', os.getcwd()))
self.getStatusBar().showMessage(message)
if sys.platform == 'win32' and self.process.waitForStarted():
self.updateCalculationPushButton()
else:
self.process.started.connect(self.updateCalculationPushButton)
self.process.readyReadStandardOutput.connect(self.handleOutputLogging)
self.process.finished.connect(self.processCalculation)
def updateCalculationPushButton(self, kind='stop'):
self.calculationPushButton.disconnect()
if kind == 'run':
icon = QIcon(resourceFileName('icons:play.svg'))
self.calculationPushButton.setIcon(icon)
self.calculationPushButton.setText('Run')
self.calculationPushButton.setToolTip('Run Quanty.')
self.calculationPushButton.clicked.connect(self.runCalculation)
elif kind == 'stop':
icon = QIcon(resourceFileName('icons:stop.svg'))
self.calculationPushButton.setIcon(icon)
self.calculationPushButton.setText('Stop')
self.calculationPushButton.setToolTip('Stop Quanty.')
self.calculationPushButton.clicked.connect(self.stopCalculation)
else:
pass
def stopCalculation(self):
self.process.kill()
def processCalculation(self, *args):
startingTime = self.state.startingTime
# When did I finish?
endingTime = datetime.datetime.now()
self.state.endingTime = endingTime
# Re-enable the widget when the calculation has finished.
self.enableWidget(True)
# Reset the calculation button.
self.updateCalculationPushButton('run')
# Evaluate the exit code and status of the process.
exitStatus = self.process.exitStatus()
exitCode = self.process.exitCode()
if exitStatus == 0 and exitCode == 0:
message = ('Quanty has finished successfully in ')
delta = (endingTime - startingTime).total_seconds()
hours, reminder = divmod(delta, 3600)
minutes, seconds = divmod(reminder, 60)
seconds = round(seconds, 2)
if hours > 0:
message += '{} hours {} minutes and {} seconds.'.format(
hours, minutes, seconds)
elif minutes > 0:
message += '{} minutes and {} seconds.'.format(
minutes, seconds)
else:
message += '{} seconds.'.format(seconds)
self.getStatusBar().showMessage(message, self.timeout)
elif exitStatus == 0 and exitCode == 1:
self.handleErrorLogging()
message = (
'Quanty has finished unsuccessfully. '
'Check the logging window for more details.')
self.getStatusBar().showMessage(message, self.timeout)
return
# exitCode is platform dependent; exitStatus is always 1.
elif exitStatus == 1:
message = 'Quanty was stopped.'
self.getStatusBar().showMessage(message, self.timeout)
return
# Scroll to the bottom of the logger widget.
scrollBar = self.getLoggerWidget().verticalScrollBar()
scrollBar.setValue(scrollBar.maximum())
# Load the spectra from disk.
self.state.spectra.loadFromDisk(self.state)
# If the calculated spectrum is an image, uncheck all the other
# calculations. This way the current result can be disaplyed in the
# plot widget.
if self.state.experiment in ['RIXS', ]:
self.resultsModel.uncheckAllItems()
# Once all processing is done, store the state in the
# results model. Upon finishing this, a signal is emitted by the
# model which triggers some updates to be performed.
self.state.isChecked = True
self.resultsModel.appendItems(self.state)
# If the "Hamiltonian Setup" page is currently selected, when the
# current widget is set to the "Results Page", the former is not
# displayed. To avoid this I switch first to the "General Setup" page.
self.quantyToolBox.setCurrentWidget(self.generalPage)
self.quantyToolBox.setCurrentWidget(self.resultsPage)
self.resultsView.setFocus()
# Remove files if requested.
if self.doRemoveFiles():
os.remove('{}.lua'.format(self.state.baseName))
spectra = glob.glob('{}_*.spec'.format(self.state.baseName))
for spectrum in spectra:
os.remove(spectrum)
def selectedHamiltonianTermChanged(self):
index = self.hamiltonianTermsView.currentIndex()
self.hamiltonianParametersView.setRootIndex(index)
def showResultsContextMenu(self, position):
icon = QIcon(resourceFileName('icons:clipboard.svg'))
self.showDetailsAction = QAction(
icon, 'Show Details', self, triggered=self.showResultDetailsDialog)
icon = QIcon(resourceFileName('icons:save.svg'))
self.saveSelectedResultsAsAction = QAction(
icon, 'Save Selected Results As...', self,
triggered=self.saveSelectedResultsAs)
self.saveAllResultsAsAction = QAction(
icon, 'Save All Results As...', self,
triggered=self.saveAllResultsAs)
icon = QIcon(resourceFileName('icons:trash.svg'))
self.removeSelectedResultsAction = QAction(
icon, 'Remove Selected Results', self,
triggered=self.removeSelectedCalculations)
self.removeAllResultsAction = QAction(
icon, 'Remove All Results', self, triggered=self.removeAllResults)
icon = QIcon(resourceFileName('icons:folder-open.svg'))
self.loadResultsAction = QAction(
icon, 'Load Results', self, triggered=self.loadResults)
self.resultsContextMenu = QMenu('Results Context Menu', self)
self.resultsContextMenu.addAction(self.showDetailsAction)
self.resultsContextMenu.addSeparator()
self.resultsContextMenu.addAction(self.saveSelectedResultsAsAction)
self.resultsContextMenu.addAction(self.removeSelectedResultsAction)
self.resultsContextMenu.addSeparator()
self.resultsContextMenu.addAction(self.saveAllResultsAsAction)
self.resultsContextMenu.addAction(self.removeAllResultsAction)
self.resultsContextMenu.addSeparator()
self.resultsContextMenu.addAction(self.loadResultsAction)
if not self.resultsView.selectedIndexes():
self.removeSelectedResultsAction.setEnabled(False)
self.saveSelectedResultsAsAction.setEnabled(False)
if not self.resultsModel.modelData:
self.showDetailsAction.setEnabled(False)
self.saveAllResultsAsAction.setEnabled(False)
self.removeAllResultsAction.setEnabled(False)
self.resultsContextMenu.exec_(self.resultsView.mapToGlobal(position))
def getLastSelectedResultsModelIndex(self):
rows = self.resultsView.selectionModel().selectedRows()
try:
index = rows[-1]
except IndexError:
index = None
return index
def selectedResultsChanged(self):
indexes = self.resultsView.selectedIndexes()
if len(indexes) > 1:
return
index = self.getLastSelectedResultsModelIndex()
if index is None:
result = None
else:
result = self.resultsModel.getItem(index)
if isinstance(result, QuantyCalculation):
self.enableWidget(True, result)
self.state = result
self.populateWidget()
self.resultDetailsDialog.populateWidget()
elif isinstance(result, ExperimentalData):
self.enableWidget(False, result)
self.updateMainWindowTitle(result.baseName)
self.resultDetailsDialog.clear()
else:
self.enableWidget(True, result)
self.resetState()
def updateResultsModelData(self):
index = self.getLastSelectedResultsModelIndex()
if index is None:
return
self.resultsModel.updateItem(index, self.state)
self.resultsView.viewport().repaint()
def updatePlotWidget(self):
"""Updating the plotting widget should not require any information
about the current state of the widget."""
pw = self.getPlotWidget()
pw.reset()
results = self.resultsModel.getCheckedItems()
for result in results:
if isinstance(result, ExperimentalData):
spectrum = result.spectra['Expt']
spectrum.legend = '{}-{}'.format(result.index, 'Expt')
spectrum.xLabel = 'X'
spectrum.yLabel = 'Y'
spectrum.plot(plotWidget=pw)
else:
if len(results) > 1 and result.experiment in ['RIXS', ]:
continue
for spectrum in result.spectra.processed:
spectrum.legend = '{}-{}'.format(
result.index, spectrum.shortName)
if spectrum.name in result.spectra.toPlotChecked:
spectrum.plot(plotWidget=pw)
def showResultDetailsDialog(self):
self.resultDetailsDialog.show()
self.resultDetailsDialog.raise_()
def updateCalculationName(self, name):
self.state.baseName = name
self.updateMainWindowTitle(name)
self.resultDetailsDialog.updateTitle(name)
if isinstance(self.state, QuantyCalculation):
self.resultDetailsDialog.updateSummary()
def loadExperimentalData(self):
path, _ = QFileDialog.getOpenFileName(
self, 'Load Experimental Data',
self.getCurrentPath(), 'Data File (*.dat)')
if path:
result = ExperimentalData(path)
if result.spectra is not None:
self.resultsModel.appendItems([result])
else:
message = ('Failed to read experimental data. Please check '
'that the file is properly formatted.')
self.getStatusBar().showMessage(message, self.timeout)
def handleOutputLogging(self):
self.process.setReadChannel(QProcess.StandardOutput)
data = self.process.readAllStandardOutput().data()
data = data.decode('utf-8').rstrip()
self.getLoggerWidget().appendPlainText(data)
self.state.output = self.state.output + data
def handleErrorLogging(self):
self.process.setReadChannel(QProcess.StandardError)
data = self.process.readAllStandardError().data()
self.getLoggerWidget().appendPlainText(data.decode('utf-8'))
def updateMainWindowTitle(self, name=None):
if name is None:
title = 'Crispy'
else:
title = 'Crispy - {}'.format(name)
self.setMainWindowTitle(title)
def setMainWindowTitle(self, title):
self.parent().setWindowTitle(title)
def getStatusBar(self):
return self.parent().statusBar()
def getPlotWidget(self):
return self.parent().plotWidget
def getLoggerWidget(self):
return self.parent().loggerWidget
def setCurrentPath(self, path):
path = os.path.dirname(path)
self.settings.setValue('CurrentPath', path)
def getCurrentPath(self):
path = self.settings.value('CurrentPath')
if path is None:
path = os.path.expanduser('~')
return path
def getQuantyPath(self):
return self.settings.value('Quanty/Path')
def getVerbosity(self):
return self.settings.value('Quanty/Verbosity')
def getDenseBorder(self):
return self.settings.value('Quanty/DenseBorder')
def doRemoveFiles(self):
return self.settings.value('Quanty/RemoveFiles', True, type=bool)
|
mretegan/crispy | crispy/gui/quanty.py | QuantyDockWidget.updatePlotWidget | python | def updatePlotWidget(self):
pw = self.getPlotWidget()
pw.reset()
results = self.resultsModel.getCheckedItems()
for result in results:
if isinstance(result, ExperimentalData):
spectrum = result.spectra['Expt']
spectrum.legend = '{}-{}'.format(result.index, 'Expt')
spectrum.xLabel = 'X'
spectrum.yLabel = 'Y'
spectrum.plot(plotWidget=pw)
else:
if len(results) > 1 and result.experiment in ['RIXS', ]:
continue
for spectrum in result.spectra.processed:
spectrum.legend = '{}-{}'.format(
result.index, spectrum.shortName)
if spectrum.name in result.spectra.toPlotChecked:
spectrum.plot(plotWidget=pw) | Updating the plotting widget should not require any information
about the current state of the widget. | train | https://github.com/mretegan/crispy/blob/7e241ac1a48d34ca769f3a6183c430360b5f6725/crispy/gui/quanty.py#L1832-L1854 | null | class QuantyDockWidget(QDockWidget):
def __init__(self, parent=None):
super(QuantyDockWidget, self).__init__(parent=parent)
# Load the external .ui file for the widget.
path = resourceFileName('uis:quanty/main.ui')
loadUi(path, baseinstance=self, package='crispy.gui')
# Load the settings from file.
config = Config()
self.settings = config.read()
# Set the state object
self.state = QuantyCalculation()
self.populateWidget()
self.activateWidget()
self.timeout = 4000
self.hamiltonianSplitter.setSizes((150, 300, 10))
def populateWidget(self):
"""
Populate the widget using data stored in the state
object. The order in which the individual widgets are populated
follows their arrangment.
The models are recreated every time the function is called.
This might seem to be an overkill, but in practice it is very fast.
Don't try to move the model creation outside this function; is not
worth the effort, and there is nothing to gain from it.
"""
self.elementComboBox.setItems(self.state._elements, self.state.element)
self.chargeComboBox.setItems(self.state._charges, self.state.charge)
self.symmetryComboBox.setItems(
self.state._symmetries, self.state.symmetry)
self.experimentComboBox.setItems(
self.state._experiments, self.state.experiment)
self.edgeComboBox.setItems(self.state._edges, self.state.edge)
self.temperatureLineEdit.setValue(self.state.temperature)
self.magneticFieldLineEdit.setValue(self.state.magneticField)
self.axesTabWidget.setTabText(0, str(self.state.xLabel))
self.xMinLineEdit.setValue(self.state.xMin)
self.xMaxLineEdit.setValue(self.state.xMax)
self.xNPointsLineEdit.setValue(self.state.xNPoints)
self.xLorentzianLineEdit.setList(self.state.xLorentzian)
self.xGaussianLineEdit.setValue(self.state.xGaussian)
self.k1LineEdit.setVector(self.state.k1)
self.eps11LineEdit.setVector(self.state.eps11)
self.eps12LineEdit.setVector(self.state.eps12)
if self.state.experiment in ['RIXS', ]:
if self.axesTabWidget.count() == 1:
tab = self.axesTabWidget.findChild(QWidget, 'yTab')
self.axesTabWidget.addTab(tab, tab.objectName())
self.axesTabWidget.setTabText(1, self.state.yLabel)
self.yMinLineEdit.setValue(self.state.yMin)
self.yMaxLineEdit.setValue(self.state.yMax)
self.yNPointsLineEdit.setValue(self.state.yNPoints)
self.yLorentzianLineEdit.setList(self.state.yLorentzian)
self.yGaussianLineEdit.setValue(self.state.yGaussian)
self.k2LineEdit.setVector(self.state.k2)
self.eps21LineEdit.setVector(self.state.eps21)
self.eps22LineEdit.setVector(self.state.eps22)
text = self.eps11Label.text()
text = re.sub('>[vσ]', '>σ', text)
self.eps11Label.setText(text)
text = self.eps12Label.text()
text = re.sub('>[hπ]', '>π', text)
self.eps12Label.setText(text)
else:
self.axesTabWidget.removeTab(1)
text = self.eps11Label.text()
text = re.sub('>[vσ]', '>v', text)
self.eps11Label.setText(text)
text = self.eps12Label.text()
text = re.sub('>[hπ]', '>h', text)
self.eps12Label.setText(text)
# Create the spectra selection model.
self.spectraModel = SpectraModel(parent=self)
self.spectraModel.setModelData(
self.state.spectra.toCalculate,
self.state.spectra.toCalculateChecked)
self.spectraModel.checkStateChanged.connect(
self.updateSpectraCheckState)
self.spectraListView.setModel(self.spectraModel)
self.spectraListView.selectionModel().setCurrentIndex(
self.spectraModel.index(0, 0), QItemSelectionModel.Select)
self.fkLineEdit.setValue(self.state.fk)
self.gkLineEdit.setValue(self.state.gk)
self.zetaLineEdit.setValue(self.state.zeta)
# Create the Hamiltonian model.
self.hamiltonianModel = HamiltonianModel(parent=self)
self.hamiltonianModel.setModelData(self.state.hamiltonianData)
self.hamiltonianModel.setNodesCheckState(self.state.hamiltonianState)
if self.syncParametersCheckBox.isChecked():
self.hamiltonianModel.setSyncState(True)
else:
self.hamiltonianModel.setSyncState(False)
self.hamiltonianModel.dataChanged.connect(self.updateHamiltonianData)
self.hamiltonianModel.itemCheckStateChanged.connect(
self.updateHamiltonianNodeCheckState)
# Assign the Hamiltonian model to the Hamiltonian terms view.
self.hamiltonianTermsView.setModel(self.hamiltonianModel)
self.hamiltonianTermsView.selectionModel().setCurrentIndex(
self.hamiltonianModel.index(0, 0), QItemSelectionModel.Select)
self.hamiltonianTermsView.selectionModel().selectionChanged.connect(
self.selectedHamiltonianTermChanged)
# Assign the Hamiltonian model to the Hamiltonian parameters view.
self.hamiltonianParametersView.setModel(self.hamiltonianModel)
self.hamiltonianParametersView.expandAll()
self.hamiltonianParametersView.resizeAllColumnsToContents()
self.hamiltonianParametersView.setColumnWidth(0, 130)
self.hamiltonianParametersView.setRootIndex(
self.hamiltonianTermsView.currentIndex())
self.nPsisLineEdit.setValue(self.state.nPsis)
self.nPsisAutoCheckBox.setChecked(self.state.nPsisAuto)
self.nConfigurationsLineEdit.setValue(self.state.nConfigurations)
self.nConfigurationsLineEdit.setEnabled(False)
name = '{}-Ligands Hybridization'.format(self.state.block)
for termName in self.state.hamiltonianData:
if name in termName:
termState = self.state.hamiltonianState[termName]
if termState == 0:
continue
else:
self.nConfigurationsLineEdit.setEnabled(True)
if not hasattr(self, 'resultsModel'):
# Create the results model.
self.resultsModel = ResultsModel(parent=self)
self.resultsModel.itemNameChanged.connect(
self.updateCalculationName)
self.resultsModel.itemCheckStateChanged.connect(
self.updatePlotWidget)
self.resultsModel.dataChanged.connect(self.updatePlotWidget)
self.resultsModel.dataChanged.connect(self.updateResultsView)
# Assign the results model to the results view.
self.resultsView.setModel(self.resultsModel)
self.resultsView.selectionModel().selectionChanged.connect(
self.selectedResultsChanged)
self.resultsView.resizeColumnsToContents()
self.resultsView.horizontalHeader().setSectionsMovable(False)
self.resultsView.horizontalHeader().setSectionsClickable(False)
if sys.platform == 'darwin':
self.resultsView.horizontalHeader().setMaximumHeight(17)
# Add a context menu to the view.
self.resultsView.setContextMenuPolicy(Qt.CustomContextMenu)
self.resultsView.customContextMenuRequested[QPoint].connect(
self.showResultsContextMenu)
if not hasattr(self, 'resultDetailsDialog'):
self.resultDetailsDialog = QuantyResultDetailsDialog(parent=self)
self.updateMainWindowTitle(self.state.baseName)
def activateWidget(self):
self.elementComboBox.currentTextChanged.connect(self.resetState)
self.chargeComboBox.currentTextChanged.connect(self.resetState)
self.symmetryComboBox.currentTextChanged.connect(self.resetState)
self.experimentComboBox.currentTextChanged.connect(
self.resetState)
self.edgeComboBox.currentTextChanged.connect(self.resetState)
self.temperatureLineEdit.returnPressed.connect(
self.updateTemperature)
self.magneticFieldLineEdit.returnPressed.connect(
self.updateMagneticField)
self.xMinLineEdit.returnPressed.connect(self.updateXMin)
self.xMaxLineEdit.returnPressed.connect(self.updateXMax)
self.xNPointsLineEdit.returnPressed.connect(self.updateXNPoints)
self.xLorentzianLineEdit.returnPressed.connect(
self.updateXLorentzian)
self.xGaussianLineEdit.returnPressed.connect(self.updateXGaussian)
self.k1LineEdit.returnPressed.connect(self.updateIncidentWaveVector)
self.eps11LineEdit.returnPressed.connect(
self.updateIncidentPolarizationVectors)
self.yMinLineEdit.returnPressed.connect(self.updateYMin)
self.yMaxLineEdit.returnPressed.connect(self.updateYMax)
self.yNPointsLineEdit.returnPressed.connect(self.updateYNPoints)
self.yLorentzianLineEdit.returnPressed.connect(
self.updateYLorentzian)
self.yGaussianLineEdit.returnPressed.connect(self.updateYGaussian)
self.fkLineEdit.returnPressed.connect(self.updateScaleFactors)
self.gkLineEdit.returnPressed.connect(self.updateScaleFactors)
self.zetaLineEdit.returnPressed.connect(self.updateScaleFactors)
self.syncParametersCheckBox.toggled.connect(self.updateSyncParameters)
self.nPsisAutoCheckBox.toggled.connect(self.updateNPsisAuto)
self.nPsisLineEdit.returnPressed.connect(self.updateNPsis)
self.nConfigurationsLineEdit.returnPressed.connect(
self.updateConfigurations)
self.saveInputAsPushButton.clicked.connect(self.saveInputAs)
self.calculationPushButton.clicked.connect(self.runCalculation)
def enableWidget(self, flag=True, result=None):
self.elementComboBox.setEnabled(flag)
self.chargeComboBox.setEnabled(flag)
self.symmetryComboBox.setEnabled(flag)
self.experimentComboBox.setEnabled(flag)
self.edgeComboBox.setEnabled(flag)
self.temperatureLineEdit.setEnabled(flag)
self.magneticFieldLineEdit.setEnabled(flag)
self.xMinLineEdit.setEnabled(flag)
self.xMaxLineEdit.setEnabled(flag)
self.xNPointsLineEdit.setEnabled(flag)
self.xLorentzianLineEdit.setEnabled(flag)
self.xGaussianLineEdit.setEnabled(flag)
self.k1LineEdit.setEnabled(flag)
self.eps11LineEdit.setEnabled(flag)
self.yMinLineEdit.setEnabled(flag)
self.yMaxLineEdit.setEnabled(flag)
self.yNPointsLineEdit.setEnabled(flag)
self.yLorentzianLineEdit.setEnabled(flag)
self.yGaussianLineEdit.setEnabled(flag)
self.spectraListView.setEnabled(flag)
self.fkLineEdit.setEnabled(flag)
self.gkLineEdit.setEnabled(flag)
self.zetaLineEdit.setEnabled(flag)
self.syncParametersCheckBox.setEnabled(flag)
self.nPsisAutoCheckBox.setEnabled(flag)
if self.nPsisAutoCheckBox.isChecked():
self.nPsisLineEdit.setEnabled(False)
else:
self.nPsisLineEdit.setEnabled(flag)
self.hamiltonianTermsView.setEnabled(flag)
self.hamiltonianParametersView.setEnabled(flag)
self.resultsView.setEnabled(flag)
self.saveInputAsPushButton.setEnabled(flag)
if result is None or isinstance(result, QuantyCalculation):
self.nConfigurationsLineEdit.setEnabled(flag)
self.resultsView.setEnabled(flag)
self.calculationPushButton.setEnabled(True)
self.resultDetailsDialog.enableWidget(flag)
else:
self.nConfigurationsLineEdit.setEnabled(False)
self.calculationPushButton.setEnabled(flag)
self.resultsView.setEnabled(True)
self.resultDetailsDialog.enableWidget(False)
def updateTemperature(self):
temperature = self.temperatureLineEdit.getValue()
if temperature < 0:
message = 'The temperature cannot be negative.'
self.getStatusBar().showMessage(message, self.timeout)
self.temperatureLineEdit.setValue(self.state.temperature)
return
elif temperature == 0:
self.nPsisAutoCheckBox.setChecked(False)
self.updateNPsisAuto()
self.nPsisLineEdit.setValue(1)
self.updateNPsis()
self.state.temperature = temperature
def updateMagneticField(self):
magneticField = self.magneticFieldLineEdit.getValue()
TESLA_TO_EV = 5.788e-05
# Normalize the current incident vector.
k1 = np.array(self.state.k1)
k1 = k1 / np.linalg.norm(k1)
configurations = self.state.hamiltonianData['Magnetic Field']
for configuration in configurations:
parameters = configurations[configuration]
for i, parameter in enumerate(parameters):
value = float(magneticField * np.abs(k1[i]) * TESLA_TO_EV)
if abs(value) == 0.0:
value = 0.0
configurations[configuration][parameter] = value
self.hamiltonianModel.updateModelData(self.state.hamiltonianData)
self.state.magneticField = magneticField
def updateXMin(self):
xMin = self.xMinLineEdit.getValue()
if xMin > self.state.xMax:
message = ('The lower energy limit cannot be larger than '
'the upper limit.')
self.getStatusBar().showMessage(message, self.timeout)
self.xMinLineEdit.setValue(self.state.xMin)
return
self.state.xMin = xMin
def updateXMax(self):
xMax = self.xMaxLineEdit.getValue()
if xMax < self.state.xMin:
message = ('The upper energy limit cannot be smaller than '
'the lower limit.')
self.getStatusBar().showMessage(message, self.timeout)
self.xMaxLineEdit.setValue(self.state.xMax)
return
self.state.xMax = xMax
def updateXNPoints(self):
xNPoints = self.xNPointsLineEdit.getValue()
xMin = self.state.xMin
xMax = self.state.xMax
xLorentzianMin = float(self.state.xLorentzian[0])
xNPointsMin = int(np.floor((xMax - xMin) / xLorentzianMin))
if xNPoints < xNPointsMin:
message = ('The number of points must be greater than '
'{}.'.format(xNPointsMin))
self.getStatusBar().showMessage(message, self.timeout)
self.xNPointsLineEdit.setValue(self.state.xNPoints)
return
self.state.xNPoints = xNPoints
def updateXLorentzian(self):
try:
xLorentzian = self.xLorentzianLineEdit.getList()
except ValueError:
message = 'Invalid data for the Lorentzian brodening.'
self.getStatusBar().showMessage(message, self.timeout)
self.xLorentzianLineEdit.setList(self.state.xLorentzian)
return
# Do some validation of the input value.
if len(xLorentzian) > 3:
message = 'The broadening can have at most three elements.'
self.getStatusBar().showMessage(message, self.timeout)
self.xLorentzianLineEdit.setList(self.state.xLorentzian)
return
try:
xLorentzianMin = float(xLorentzian[0])
except IndexError:
pass
else:
if xLorentzianMin < 0.1:
message = 'The broadening cannot be smaller than 0.1.'
self.getStatusBar().showMessage(message, self.timeout)
self.xLorentzianLineEdit.setList(
self.state.xLorentzian)
return
try:
xLorentzianMax = float(xLorentzian[1])
except IndexError:
pass
else:
if xLorentzianMax < 0.1:
message = 'The broadening cannot be smaller than 0.1.'
self.getStatusBar().showMessage(message, self.timeout)
self.xLorentzianLineEdit.setList(
self.state.xLorentzian)
try:
xLorentzianPivotEnergy = float(xLorentzian[2])
except IndexError:
pass
else:
xMin = self.state.xMin
xMax = self.state.xMax
if not (xMin < xLorentzianPivotEnergy < xMax):
message = ('The transition point must lie between the upper '
'and lower energy limits.')
self.getStatusBar().showMessage(message, self.timeout)
self.xLorentzianLineEdit.setList(
self.state.xLorentzian)
return
self.state.xLorentzian = xLorentzian
def updateXGaussian(self):
xGaussian = self.xGaussianLineEdit.getValue()
if xGaussian < 0:
message = 'The broadening cannot be negative.'
self.getStatusBar().showMessage(message, self.timeout)
self.xGaussianLineEdit.setValue(self.state.xGaussian)
return
self.state.xGaussian = xGaussian
def updateIncidentWaveVector(self):
try:
k1 = self.k1LineEdit.getVector()
except ValueError:
message = 'Invalid data for the wave vector.'
self.getStatusBar().showMessage(message, self.timeout)
self.k1LineEdit.setVector(self.state.k1)
return
if np.all(np.array(k1) == 0):
message = 'The wave vector cannot be null.'
self.getStatusBar().showMessage(message, self.timeout)
self.k1LineEdit.setVector(self.state.k1)
return
# The k1 value should be fine; save it.
self.state.k1 = k1
# The polarization vector must be correct.
eps11 = self.eps11LineEdit.getVector()
# If the wave and polarization vectors are not perpendicular, select a
# new perpendicular vector for the polarization.
if np.dot(np.array(k1), np.array(eps11)) != 0:
if k1[2] != 0 or (-k1[0] - k1[1]) != 0:
eps11 = (k1[2], k1[2], -k1[0] - k1[1])
else:
eps11 = (-k1[2] - k1[1], k1[0], k1[0])
self.eps11LineEdit.setVector(eps11)
self.state.eps11 = eps11
# Generate a second, perpendicular, polarization vector to the plane
# defined by the wave vector and the first polarization vector.
eps12 = np.cross(np.array(eps11), np.array(k1))
eps12 = eps12.tolist()
self.eps12LineEdit.setVector(eps12)
self.state.eps12 = eps12
# Update the magnetic field.
self.updateMagneticField()
def updateIncidentPolarizationVectors(self):
try:
eps11 = self.eps11LineEdit.getVector()
except ValueError:
message = 'Invalid data for the polarization vector.'
self.getStatusBar().showMessage(message, self.timeout)
self.eps11LineEdit.setVector(self.state.eps11)
return
if np.all(np.array(eps11) == 0):
message = 'The polarization vector cannot be null.'
self.getStatusBar().showMessage(message, self.timeout)
self.eps11LineEdit.setVector(self.state.eps11)
return
k1 = self.state.k1
if np.dot(np.array(k1), np.array(eps11)) != 0:
message = ('The wave and polarization vectors need to be '
'perpendicular.')
self.getStatusBar().showMessage(message, self.timeout)
self.eps11LineEdit.setVector(self.state.eps11)
return
self.state.eps11 = eps11
# Generate a second, perpendicular, polarization vector to the plane
# defined by the wave vector and the first polarization vector.
eps12 = np.cross(np.array(eps11), np.array(k1))
eps12 = eps12.tolist()
self.eps12LineEdit.setVector(eps12)
self.state.eps12 = eps12
def updateYMin(self):
yMin = self.yMinLineEdit.getValue()
if yMin > self.state.yMax:
message = ('The lower energy limit cannot be larger than '
'the upper limit.')
self.getStatusBar().showMessage(message, self.timeout)
self.yMinLineEdit.setValue(self.state.yMin)
return
self.state.yMin = yMin
def updateYMax(self):
yMax = self.yMaxLineEdit.getValue()
if yMax < self.state.yMin:
message = ('The upper energy limit cannot be smaller than '
'the lower limit.')
self.getStatusBar().showMessage(message, self.timeout)
self.yMaxLineEdit.setValue(self.state.yMax)
return
self.state.yMax = yMax
def updateYNPoints(self):
yNPoints = self.yNPointsLineEdit.getValue()
yMin = self.state.yMin
yMax = self.state.yMax
yLorentzianMin = float(self.state.yLorentzian[0])
yNPointsMin = int(np.floor((yMax - yMin) / yLorentzianMin))
if yNPoints < yNPointsMin:
message = ('The number of points must be greater than '
'{}.'.format(yNPointsMin))
self.getStatusBar().showMessage(message, self.timeout)
self.yNPointsLineEdit.setValue(self.state.yNPoints)
return
self.state.yNPoints = yNPoints
def updateYLorentzian(self):
try:
yLorentzian = self.yLorentzianLineEdit.getList()
except ValueError:
message = 'Invalid data for the Lorentzian brodening.'
self.getStatusBar().showMessage(message, self.timeout)
self.yLorentzianLineEdit.setList(self.state.yLorentzian)
return
# Do some validation of the input value.
if len(yLorentzian) > 3:
message = 'The broadening can have at most three elements.'
self.getStatusBar().showMessage(message, self.timeout)
self.yLorentzianLineEdit.setList(self.state.yLorentzian)
return
try:
yLorentzianMin = float(yLorentzian[0])
except IndexError:
pass
else:
if yLorentzianMin < 0.1:
message = 'The broadening cannot be smaller than 0.1.'
self.getStatusBar().showMessage(message, self.timeout)
self.yLorentzianLineEdit.setList(
self.state.yLorentzian)
return
try:
yLorentzianMax = float(yLorentzian[1])
except IndexError:
pass
else:
if yLorentzianMax < 0.1:
message = 'The broadening cannot be smaller than 0.1.'
self.getStatusBar().showMessage(message, self.timeout)
self.yLorentzianLineEdit.setList(
self.state.yLorentzian)
try:
yLorentzianPivotEnergy = float(yLorentzian[2])
except IndexError:
pass
else:
yMin = self.state.yMin
yMax = self.state.yMax
if not (yMin < yLorentzianPivotEnergy < yMax):
message = ('The transition point must lie between the upper '
'and lower energy limits.')
self.getStatusBar().showMessage(message, self.timeout)
self.yLorentzianLineEdit.setList(
self.state.yLorentzian)
return
self.state.yLorentzian = list(map(float, yLorentzian))
def updateYGaussian(self):
yGaussian = self.yGaussianLineEdit.getValue()
if yGaussian < 0:
message = 'The broadening cannot be negative.'
self.getStatusBar().showMessage(message, self.timeout)
self.yGaussianLineEdit.setValue(self.state.yGaussian)
return
self.state.yGaussian = yGaussian
def updateSpectraCheckState(self, checkedItems):
self.state.spectra.toCalculateChecked = checkedItems
def updateScaleFactors(self):
fk = self.fkLineEdit.getValue()
gk = self.gkLineEdit.getValue()
zeta = self.zetaLineEdit.getValue()
if fk < 0 or gk < 0 or zeta < 0:
message = 'The scale factors cannot be negative.'
self.getStatusBar().showMessage(message, self.timeout)
self.fkLineEdit.setValue(self.state.fk)
self.gkLineEdit.setValue(self.state.gk)
self.zetaLineEdit.setValue(self.state.zeta)
return
self.state.fk = fk
self.state.gk = gk
self.state.zeta = zeta
# TODO: This should be already updated to the most recent data.
# self.state.hamiltonianData = self.hamiltonianModel.getModelData()
terms = self.state.hamiltonianData
for term in terms:
if not ('Atomic' in term or 'Hybridization' in term):
continue
configurations = terms[term]
for configuration in configurations:
parameters = configurations[configuration]
for parameter in parameters:
# Change the scale factors if the parameter has one.
try:
value, _ = parameters[parameter]
except TypeError:
continue
if parameter.startswith('F'):
terms[term][configuration][parameter] = [value, fk]
elif parameter.startswith('G'):
terms[term][configuration][parameter] = [value, gk]
elif parameter.startswith('ζ'):
terms[term][configuration][parameter] = [value, zeta]
self.hamiltonianModel.updateModelData(self.state.hamiltonianData)
# I have no idea why this is needed. Both views should update after
# the above function call.
self.hamiltonianTermsView.viewport().repaint()
self.hamiltonianParametersView.viewport().repaint()
def updateNPsisAuto(self):
nPsisAuto = int(self.nPsisAutoCheckBox.isChecked())
if nPsisAuto:
self.nPsisLineEdit.setValue(self.state.nPsisMax)
self.nPsisLineEdit.setEnabled(False)
else:
self.nPsisLineEdit.setEnabled(True)
self.state.nPsisAuto = nPsisAuto
def updateNPsis(self):
nPsis = self.nPsisLineEdit.getValue()
if nPsis <= 0:
message = 'The number of states must be larger than zero.'
self.getStatusBar().showMessage(message, self.timeout)
self.nPsisLineEdit.setValue(self.state.nPsis)
return
if nPsis > self.state.nPsisMax:
message = 'The selected number of states exceeds the maximum.'
self.getStatusBar().showMessage(message, self.timeout)
self.nPsisLineEdit.setValue(self.state.nPsisMax)
nPsis = self.state.nPsisMax
self.state.nPsis = nPsis
def updateSyncParameters(self, flag):
self.hamiltonianModel.setSyncState(flag)
def updateHamiltonianData(self):
self.state.hamiltonianData = self.hamiltonianModel.getModelData()
def updateHamiltonianNodeCheckState(self, index, state):
toggledTerm = index.data()
states = self.hamiltonianModel.getNodesCheckState()
# Allow only one type of hybridization with the ligands, either
# LMCT or MLCT.
for term in states:
if 'LMCT' in term and 'MLCT' in toggledTerm:
states[term] = 0
elif 'MLCT' in term and 'LMCT' in toggledTerm:
states[term] = 0
self.state.hamiltonianState = states
self.hamiltonianModel.setNodesCheckState(states)
# Determine the maximum number of allowed configurations.
if 'LMCT' in toggledTerm:
if 'd' in self.state.block:
self.state.nConfigurationsMax = 10 - self.state.nElectrons + 1
elif 'f' in self.state.block:
self.state.nConfigurationsMax = 14 - self.state.nElectrons + 1
elif 'MLCT' in toggledTerm:
self.state.nConfigurationsMax = self.state.nElectrons + 1
term = '{}-Ligands Hybridization'.format(self.state.block)
if term in index.data():
if state == 0:
nConfigurations = 1
self.nConfigurationsLineEdit.setEnabled(False)
elif state == 2:
if self.state.nConfigurationsMax == 1:
nConfigurations = 1
else:
nConfigurations = 2
self.nConfigurationsLineEdit.setEnabled(True)
self.nConfigurationsLineEdit.setValue(nConfigurations)
self.state.nConfigurations = nConfigurations
def updateConfigurations(self, *args):
nConfigurations = self.nConfigurationsLineEdit.getValue()
if nConfigurations > self.state.nConfigurationsMax:
message = 'The maximum number of configurations is {}.'.format(
self.state.nConfigurationsMax)
self.getStatusBar().showMessage(message, self.timeout)
self.nConfigurationsLineEdit.setValue(
self.state.nConfigurationsMax)
nConfigurations = self.state.nConfigurationsMax
self.state.nConfigurations = nConfigurations
def saveInput(self):
# TODO: If the user changes a value in a widget without pressing Return
# before running the calculation, the values are not updated.
self.state.verbosity = self.getVerbosity()
self.state.denseBorder = self.getDenseBorder()
path = self.getCurrentPath()
try:
os.chdir(path)
except OSError as e:
message = ('The specified folder doesn\'t exist. Use the \'Save '
'Input As...\' button to save the input file to an '
'alternative location.')
self.getStatusBar().showMessage(message, 2 * self.timeout)
raise e
# The folder might exist, but is not writable.
try:
self.state.saveInput()
except (IOError, OSError) as e:
message = 'Failed to write the Quanty input file.'
self.getStatusBar().showMessage(message, self.timeout)
raise e
def saveInputAs(self):
path, _ = QFileDialog.getSaveFileName(
self, 'Save Quanty Input',
os.path.join(self.getCurrentPath(), '{}.lua'.format(
self.state.baseName)), 'Quanty Input File (*.lua)')
if path:
basename = os.path.basename(path)
self.state.baseName, _ = os.path.splitext(basename)
self.setCurrentPath(path)
try:
self.saveInput()
except (IOError, OSError) as e:
return
self.updateMainWindowTitle(self.state.baseName)
def saveAllResultsAs(self):
path, _ = QFileDialog.getSaveFileName(
self, 'Save Results',
os.path.join(self.getCurrentPath(), '{}.pkl'.format(
'untitled')), 'Pickle File (*.pkl)')
if path:
self.setCurrentPath(path)
results = self.resultsModel.getAllItems()
results.reverse()
with open(path, 'wb') as p:
pickle.dump(results, p, pickle.HIGHEST_PROTOCOL)
def saveSelectedResultsAs(self):
path, _ = QFileDialog.getSaveFileName(
self, 'Save Results',
os.path.join(self.getCurrentPath(), '{}.pkl'.format(
'untitled')), 'Pickle File (*.pkl)')
if path:
self.setCurrentPath(path)
indexes = self.resultsView.selectedIndexes()
results = self.resultsModel.getSelectedItems(indexes)
results.reverse()
with open(path, 'wb') as p:
pickle.dump(results, p, pickle.HIGHEST_PROTOCOL)
def resetState(self):
element = self.elementComboBox.currentText()
charge = self.chargeComboBox.currentText()
symmetry = self.symmetryComboBox.currentText()
experiment = self.experimentComboBox.currentText()
edge = self.edgeComboBox.currentText()
self.state = QuantyCalculation(
element=element, charge=charge, symmetry=symmetry,
experiment=experiment, edge=edge)
self.resultsView.selectionModel().clearSelection()
self.populateWidget()
self.updateMainWindowTitle(self.state.baseName)
self.resultDetailsDialog.clear()
def removeSelectedCalculations(self):
indexes = self.resultsView.selectedIndexes()
if not indexes:
self.getPlotWidget().reset()
return
self.resultsModel.removeItems(indexes)
# self.resultsView.reset()
def removeAllResults(self):
self.resultsModel.reset()
self.getPlotWidget().reset()
def loadResults(self):
path, _ = QFileDialog.getOpenFileName(
self, 'Load Results',
self.getCurrentPath(), 'Pickle File (*.pkl)')
if path:
self.setCurrentPath(path)
with open(path, 'rb') as p:
self.resultsModel.appendItems(pickle.load(p))
self.updateMainWindowTitle(self.state.baseName)
self.quantyToolBox.setCurrentWidget(self.resultsPage)
def runCalculation(self):
path = self.getQuantyPath()
if path:
command = path
else:
message = ('The path to the Quanty executable is not set. '
'Please use the preferences menu to set it.')
self.getStatusBar().showMessage(message, 2 * self.timeout)
return
# Test the executable.
with open(os.devnull, 'w') as f:
try:
subprocess.call(command, stdout=f, stderr=f)
except OSError as e:
if e.errno == os.errno.ENOENT:
message = ('The Quanty executable is not working '
'properly. Is the PATH set correctly?')
self.getStatusBar().showMessage(message, 2 * self.timeout)
return
else:
raise e
# Write the input file to disk.
try:
self.saveInput()
except (IOError, OSError) as e:
return
# Disable the widget while the calculation is running.
self.enableWidget(False)
self.state.startingTime = datetime.datetime.now()
# Run Quanty using QProcess.
self.process = QProcess()
self.process.start(command, (self.state.baseName + '.lua', ))
message = (
'Running "Quanty {}" in {}.'.format(
self.state.baseName + '.lua', os.getcwd()))
self.getStatusBar().showMessage(message)
if sys.platform == 'win32' and self.process.waitForStarted():
self.updateCalculationPushButton()
else:
self.process.started.connect(self.updateCalculationPushButton)
self.process.readyReadStandardOutput.connect(self.handleOutputLogging)
self.process.finished.connect(self.processCalculation)
def updateCalculationPushButton(self, kind='stop'):
self.calculationPushButton.disconnect()
if kind == 'run':
icon = QIcon(resourceFileName('icons:play.svg'))
self.calculationPushButton.setIcon(icon)
self.calculationPushButton.setText('Run')
self.calculationPushButton.setToolTip('Run Quanty.')
self.calculationPushButton.clicked.connect(self.runCalculation)
elif kind == 'stop':
icon = QIcon(resourceFileName('icons:stop.svg'))
self.calculationPushButton.setIcon(icon)
self.calculationPushButton.setText('Stop')
self.calculationPushButton.setToolTip('Stop Quanty.')
self.calculationPushButton.clicked.connect(self.stopCalculation)
else:
pass
def stopCalculation(self):
self.process.kill()
def processCalculation(self, *args):
startingTime = self.state.startingTime
# When did I finish?
endingTime = datetime.datetime.now()
self.state.endingTime = endingTime
# Re-enable the widget when the calculation has finished.
self.enableWidget(True)
# Reset the calculation button.
self.updateCalculationPushButton('run')
# Evaluate the exit code and status of the process.
exitStatus = self.process.exitStatus()
exitCode = self.process.exitCode()
if exitStatus == 0 and exitCode == 0:
message = ('Quanty has finished successfully in ')
delta = (endingTime - startingTime).total_seconds()
hours, reminder = divmod(delta, 3600)
minutes, seconds = divmod(reminder, 60)
seconds = round(seconds, 2)
if hours > 0:
message += '{} hours {} minutes and {} seconds.'.format(
hours, minutes, seconds)
elif minutes > 0:
message += '{} minutes and {} seconds.'.format(
minutes, seconds)
else:
message += '{} seconds.'.format(seconds)
self.getStatusBar().showMessage(message, self.timeout)
elif exitStatus == 0 and exitCode == 1:
self.handleErrorLogging()
message = (
'Quanty has finished unsuccessfully. '
'Check the logging window for more details.')
self.getStatusBar().showMessage(message, self.timeout)
return
# exitCode is platform dependent; exitStatus is always 1.
elif exitStatus == 1:
message = 'Quanty was stopped.'
self.getStatusBar().showMessage(message, self.timeout)
return
# Scroll to the bottom of the logger widget.
scrollBar = self.getLoggerWidget().verticalScrollBar()
scrollBar.setValue(scrollBar.maximum())
# Load the spectra from disk.
self.state.spectra.loadFromDisk(self.state)
# If the calculated spectrum is an image, uncheck all the other
# calculations. This way the current result can be disaplyed in the
# plot widget.
if self.state.experiment in ['RIXS', ]:
self.resultsModel.uncheckAllItems()
# Once all processing is done, store the state in the
# results model. Upon finishing this, a signal is emitted by the
# model which triggers some updates to be performed.
self.state.isChecked = True
self.resultsModel.appendItems(self.state)
# If the "Hamiltonian Setup" page is currently selected, when the
# current widget is set to the "Results Page", the former is not
# displayed. To avoid this I switch first to the "General Setup" page.
self.quantyToolBox.setCurrentWidget(self.generalPage)
self.quantyToolBox.setCurrentWidget(self.resultsPage)
self.resultsView.setFocus()
# Remove files if requested.
if self.doRemoveFiles():
os.remove('{}.lua'.format(self.state.baseName))
spectra = glob.glob('{}_*.spec'.format(self.state.baseName))
for spectrum in spectra:
os.remove(spectrum)
def selectedHamiltonianTermChanged(self):
index = self.hamiltonianTermsView.currentIndex()
self.hamiltonianParametersView.setRootIndex(index)
def showResultsContextMenu(self, position):
icon = QIcon(resourceFileName('icons:clipboard.svg'))
self.showDetailsAction = QAction(
icon, 'Show Details', self, triggered=self.showResultDetailsDialog)
icon = QIcon(resourceFileName('icons:save.svg'))
self.saveSelectedResultsAsAction = QAction(
icon, 'Save Selected Results As...', self,
triggered=self.saveSelectedResultsAs)
self.saveAllResultsAsAction = QAction(
icon, 'Save All Results As...', self,
triggered=self.saveAllResultsAs)
icon = QIcon(resourceFileName('icons:trash.svg'))
self.removeSelectedResultsAction = QAction(
icon, 'Remove Selected Results', self,
triggered=self.removeSelectedCalculations)
self.removeAllResultsAction = QAction(
icon, 'Remove All Results', self, triggered=self.removeAllResults)
icon = QIcon(resourceFileName('icons:folder-open.svg'))
self.loadResultsAction = QAction(
icon, 'Load Results', self, triggered=self.loadResults)
self.resultsContextMenu = QMenu('Results Context Menu', self)
self.resultsContextMenu.addAction(self.showDetailsAction)
self.resultsContextMenu.addSeparator()
self.resultsContextMenu.addAction(self.saveSelectedResultsAsAction)
self.resultsContextMenu.addAction(self.removeSelectedResultsAction)
self.resultsContextMenu.addSeparator()
self.resultsContextMenu.addAction(self.saveAllResultsAsAction)
self.resultsContextMenu.addAction(self.removeAllResultsAction)
self.resultsContextMenu.addSeparator()
self.resultsContextMenu.addAction(self.loadResultsAction)
if not self.resultsView.selectedIndexes():
self.removeSelectedResultsAction.setEnabled(False)
self.saveSelectedResultsAsAction.setEnabled(False)
if not self.resultsModel.modelData:
self.showDetailsAction.setEnabled(False)
self.saveAllResultsAsAction.setEnabled(False)
self.removeAllResultsAction.setEnabled(False)
self.resultsContextMenu.exec_(self.resultsView.mapToGlobal(position))
def updateResultsView(self, index):
"""
Update the selection to contain only the result specified by
the index. This should be the last index of the model. Finally updade
the context menu.
The selectionChanged signal is used to trigger the update of
the Quanty dock widget and result details dialog.
:param index: Index of the last item of the model.
:type index: QModelIndex
"""
flags = (QItemSelectionModel.Clear | QItemSelectionModel.Rows |
QItemSelectionModel.Select)
self.resultsView.selectionModel().select(index, flags)
self.resultsView.resizeColumnsToContents()
self.resultsView.setFocus()
def getLastSelectedResultsModelIndex(self):
rows = self.resultsView.selectionModel().selectedRows()
try:
index = rows[-1]
except IndexError:
index = None
return index
def selectedResultsChanged(self):
indexes = self.resultsView.selectedIndexes()
if len(indexes) > 1:
return
index = self.getLastSelectedResultsModelIndex()
if index is None:
result = None
else:
result = self.resultsModel.getItem(index)
if isinstance(result, QuantyCalculation):
self.enableWidget(True, result)
self.state = result
self.populateWidget()
self.resultDetailsDialog.populateWidget()
elif isinstance(result, ExperimentalData):
self.enableWidget(False, result)
self.updateMainWindowTitle(result.baseName)
self.resultDetailsDialog.clear()
else:
self.enableWidget(True, result)
self.resetState()
def updateResultsModelData(self):
index = self.getLastSelectedResultsModelIndex()
if index is None:
return
self.resultsModel.updateItem(index, self.state)
self.resultsView.viewport().repaint()
def showResultDetailsDialog(self):
self.resultDetailsDialog.show()
self.resultDetailsDialog.raise_()
def updateCalculationName(self, name):
self.state.baseName = name
self.updateMainWindowTitle(name)
self.resultDetailsDialog.updateTitle(name)
if isinstance(self.state, QuantyCalculation):
self.resultDetailsDialog.updateSummary()
def loadExperimentalData(self):
path, _ = QFileDialog.getOpenFileName(
self, 'Load Experimental Data',
self.getCurrentPath(), 'Data File (*.dat)')
if path:
result = ExperimentalData(path)
if result.spectra is not None:
self.resultsModel.appendItems([result])
else:
message = ('Failed to read experimental data. Please check '
'that the file is properly formatted.')
self.getStatusBar().showMessage(message, self.timeout)
def handleOutputLogging(self):
self.process.setReadChannel(QProcess.StandardOutput)
data = self.process.readAllStandardOutput().data()
data = data.decode('utf-8').rstrip()
self.getLoggerWidget().appendPlainText(data)
self.state.output = self.state.output + data
def handleErrorLogging(self):
self.process.setReadChannel(QProcess.StandardError)
data = self.process.readAllStandardError().data()
self.getLoggerWidget().appendPlainText(data.decode('utf-8'))
def updateMainWindowTitle(self, name=None):
if name is None:
title = 'Crispy'
else:
title = 'Crispy - {}'.format(name)
self.setMainWindowTitle(title)
def setMainWindowTitle(self, title):
self.parent().setWindowTitle(title)
def getStatusBar(self):
return self.parent().statusBar()
def getPlotWidget(self):
return self.parent().plotWidget
def getLoggerWidget(self):
return self.parent().loggerWidget
def setCurrentPath(self, path):
path = os.path.dirname(path)
self.settings.setValue('CurrentPath', path)
def getCurrentPath(self):
path = self.settings.value('CurrentPath')
if path is None:
path = os.path.expanduser('~')
return path
def getQuantyPath(self):
return self.settings.value('Quanty/Path')
def getVerbosity(self):
return self.settings.value('Quanty/Verbosity')
def getDenseBorder(self):
return self.settings.value('Quanty/DenseBorder')
def doRemoveFiles(self):
return self.settings.value('Quanty/RemoveFiles', True, type=bool)
|
mretegan/crispy | crispy/gui/models.py | HamiltonianItem.row | python | def row(self):
if self.parent is not None:
children = self.parent.getChildren()
# The index method of the list object.
return children.index(self)
else:
return 0 | Return the row of the child. | train | https://github.com/mretegan/crispy/blob/7e241ac1a48d34ca769f3a6183c430360b5f6725/crispy/gui/models.py#L254-L261 | null | class HamiltonianItem(object):
"""Class implementing a tree item to be used in a tree model."""
def __init__(self, parent=None, itemData=None):
self.parent = parent
self.itemData = itemData
self.children = []
self.checkState = None
if parent is not None:
parent.appendChild(self)
def appendChild(self, item):
"""Append a child to the parent item."""
self.children.append(item)
def getChildren(self):
return self.children
def child(self, row):
"""Return the child at a given row."""
return self.children[row]
def childCount(self):
return len(self.children)
def columnCount(self):
return len(self.itemData)
def getItemData(self, column):
"""Return the data for a given column."""
try:
return self.itemData[column]
except IndexError:
return str()
def setItemData(self, column, value):
"""Set the data at a given column."""
try:
self.itemData[column] = value
except IndexError:
pass
def getCheckState(self):
return self.checkState
def setCheckState(self, checkState):
self.checkState = checkState
|
mretegan/crispy | crispy/gui/models.py | HamiltonianModel.index | python | def index(self, row, column, parent=QModelIndex()):
if parent is not None and not parent.isValid():
parentItem = self.rootItem
else:
parentItem = self.item(parent)
childItem = parentItem.child(row)
if childItem:
index = self.createIndex(row, column, childItem)
else:
index = QModelIndex()
return index | Return the index of the item in the model specified by the
given row, column, and parent index. | train | https://github.com/mretegan/crispy/blob/7e241ac1a48d34ca769f3a6183c430360b5f6725/crispy/gui/models.py#L307-L323 | null | class HamiltonianModel(QAbstractItemModel):
"""Class implementing the Hamiltonian tree model. It subclasses
QAbstractItemModel and thus implements: index(), parent(),
rowCount(), columnCount(), and data().
To enable editing, the class implements setData() and reimplements
flags() to ensure that an editable item is returned. headerData() is
also reimplemented to control the way the header is presented.
"""
itemCheckStateChanged = pyqtSignal(QModelIndex, Qt.CheckState)
def __init__(self, parent=None):
super(HamiltonianModel, self).__init__(parent)
self.header = ['Parameter', 'Value', 'Scale Factor']
self.modelData = odict()
def parent(self, index):
"""Return the index of the parent for a given index of the
child. Unfortunately, the name of the method has to be parent,
even though a more verbose name like parentIndex, would avoid
confusion about what parent actually is - an index or an item.
"""
childItem = self.item(index)
parentItem = childItem.parent
if parentItem == self.rootItem:
parentIndex = QModelIndex()
else:
parentIndex = self.createIndex(parentItem.row(), 0, parentItem)
return parentIndex
def siblings(self, index):
item = self.item(index)
parentIndex = self.parent(index)
parentItem = self.item(parentIndex)
siblingIndices = list()
for child in parentItem.children:
if child is item:
continue
else:
row = child.row()
siblingIndex = self.index(row, 0, parentIndex)
siblingIndices.append(siblingIndex)
return siblingIndices
def rowCount(self, parentIndex):
"""Return the number of rows under the given parent. When the
parentIndex is valid, rowCount() returns the number of children
of the parent. For this it uses item() method to extract the
parentItem from the parentIndex, and calls the childCount() of
the item to get number of children.
"""
if parentIndex.column() > 0:
return 0
if not parentIndex.isValid():
parentItem = self.rootItem
else:
parentItem = self.item(parentIndex)
return parentItem.childCount()
def columnCount(self, parentIndex):
"""Return the number of columns. The index of the parent is
required, but not used, as in this implementation it defaults
for all items to the length of the header.
"""
return len(self.header)
def data(self, index, role):
"""Return role specific data for the item referred by
index.column()."""
if not index.isValid():
return
item = self.item(index)
column = index.column()
value = item.getItemData(column)
if role == Qt.DisplayRole:
try:
if column == 1:
# Display small values using scientific notation.
if abs(float(value)) < 1e-3 and float(value) != 0.0:
return '{0:8.1e}'.format(value)
else:
return '{0:8.3f}'.format(value)
else:
return '{0:8.2f}'.format(value)
except ValueError:
return value
elif role == Qt.EditRole:
try:
value = float(value)
if abs(value) < 1e-3 and value != 0.0:
return str('{0:8.1e}'.format(value))
else:
return str('{0:8.3f}'.format(value))
except ValueError:
return str(value)
elif role == Qt.CheckStateRole:
if item.parent == self.rootItem and column == 0:
return item.getCheckState()
elif role == Qt.TextAlignmentRole:
if column > 0:
return Qt.AlignRight
def setData(self, index, value, role):
"""Set the role data for the item at index to value."""
if not index.isValid():
return False
item = self.item(index)
column = index.column()
if role == Qt.EditRole:
items = list()
items.append(item)
if self.sync:
parentIndex = self.parent(index)
# Iterate over the siblings of the parent index.
for sibling in self.siblings(parentIndex):
siblingNode = self.item(sibling)
for child in siblingNode.children:
if child.getItemData(0) == item.getItemData(0):
items.append(child)
for item in items:
columnData = str(item.getItemData(column))
if columnData and columnData != value:
try:
item.setItemData(column, float(value))
except ValueError:
return False
else:
return False
elif role == Qt.CheckStateRole:
item.setCheckState(value)
if value == Qt.Unchecked or value == Qt.Checked:
state = value
self.itemCheckStateChanged.emit(index, state)
self.dataChanged.emit(index, index)
return True
def setSyncState(self, flag):
self.sync = flag
def flags(self, index):
"""Return the active flags for the given index. Add editable
flag to items other than the first column.
"""
activeFlags = (Qt.ItemIsEnabled | Qt.ItemIsSelectable |
Qt.ItemIsUserCheckable)
item = self.item(index)
column = index.column()
if column > 0 and not item.childCount():
activeFlags = activeFlags | Qt.ItemIsEditable
return activeFlags
def headerData(self, section, orientation, role):
"""Return the data for the given role and section in the header
with the specified orientation.
"""
if orientation == Qt.Horizontal and role == Qt.DisplayRole:
return self.header[section]
def item(self, index):
if index is None or not index.isValid():
return self.rootItem
return index.internalPointer()
def setModelData(self, modelData, parentItem=None):
if parentItem is None:
self.rootItem = HamiltonianItem(None, self.header)
parentItem = self.rootItem
if isinstance(modelData, dict):
for key, value in modelData.items():
if isinstance(value, dict):
item = HamiltonianItem(parentItem, [key])
self.setModelData(value, item)
elif isinstance(value, float):
item = HamiltonianItem(parentItem, [key, value])
elif isinstance(value, list):
item = HamiltonianItem(
parentItem, [key, value[0], value[1]])
else:
raise TypeError
def setHeaderData(self, header):
self.header = header
def updateModelData(self, modelData, parentIndex=None):
parentItem = self.item(parentIndex)
if parentItem.childCount():
for child in parentItem.children:
key = child.itemData[0]
childData = modelData[key]
childIndex = self.index(child.row(), 0, parentIndex)
self.updateModelData(childData, childIndex)
else:
if isinstance(modelData, float):
parentItem.setItemData(1, modelData)
elif isinstance(modelData, list):
value, scaling = modelData
parentItem.setItemData(1, value)
parentItem.setItemData(2, scaling)
else:
raise TypeError
self.dataChanged.emit(parentIndex, parentIndex)
return True
def _getModelData(self, modelData, parentItem=None):
"""Return the data contained in the model."""
if parentItem is None:
parentItem = self.rootItem
for item in parentItem.getChildren():
key = item.getItemData(0)
if item.childCount():
modelData[key] = odict()
self._getModelData(modelData[key], item)
else:
if isinstance(item.getItemData(2), float):
modelData[key] = [item.getItemData(1), item.getItemData(2)]
else:
modelData[key] = item.getItemData(1)
def getModelData(self):
modelData = odict()
self._getModelData(modelData)
return modelData
def setNodesCheckState(self, checkState, parentItem=None):
if parentItem is None:
parentItem = self.rootItem
children = parentItem.getChildren()
for child in children:
childName = child.itemData[0]
try:
child.setCheckState(checkState[childName])
except KeyError:
pass
def getNodesCheckState(self, parentItem=None):
"""Return the check state (disabled, tristate, enable) of all items
belonging to a parent.
"""
if parentItem is None:
parentItem = self.rootItem
checkStates = odict()
children = parentItem.getChildren()
for child in children:
checkStates[child.itemData[0]] = child.getCheckState()
return checkStates
def reset(self):
self.beginResetModel()
self.rootItem = None
self.endResetModel()
|
mretegan/crispy | crispy/gui/models.py | HamiltonianModel.parent | python | def parent(self, index):
childItem = self.item(index)
parentItem = childItem.parent
if parentItem == self.rootItem:
parentIndex = QModelIndex()
else:
parentIndex = self.createIndex(parentItem.row(), 0, parentItem)
return parentIndex | Return the index of the parent for a given index of the
child. Unfortunately, the name of the method has to be parent,
even though a more verbose name like parentIndex, would avoid
confusion about what parent actually is - an index or an item. | train | https://github.com/mretegan/crispy/blob/7e241ac1a48d34ca769f3a6183c430360b5f6725/crispy/gui/models.py#L325-L339 | null | class HamiltonianModel(QAbstractItemModel):
"""Class implementing the Hamiltonian tree model. It subclasses
QAbstractItemModel and thus implements: index(), parent(),
rowCount(), columnCount(), and data().
To enable editing, the class implements setData() and reimplements
flags() to ensure that an editable item is returned. headerData() is
also reimplemented to control the way the header is presented.
"""
itemCheckStateChanged = pyqtSignal(QModelIndex, Qt.CheckState)
def __init__(self, parent=None):
super(HamiltonianModel, self).__init__(parent)
self.header = ['Parameter', 'Value', 'Scale Factor']
self.modelData = odict()
def index(self, row, column, parent=QModelIndex()):
"""Return the index of the item in the model specified by the
given row, column, and parent index.
"""
if parent is not None and not parent.isValid():
parentItem = self.rootItem
else:
parentItem = self.item(parent)
childItem = parentItem.child(row)
if childItem:
index = self.createIndex(row, column, childItem)
else:
index = QModelIndex()
return index
def siblings(self, index):
item = self.item(index)
parentIndex = self.parent(index)
parentItem = self.item(parentIndex)
siblingIndices = list()
for child in parentItem.children:
if child is item:
continue
else:
row = child.row()
siblingIndex = self.index(row, 0, parentIndex)
siblingIndices.append(siblingIndex)
return siblingIndices
def rowCount(self, parentIndex):
"""Return the number of rows under the given parent. When the
parentIndex is valid, rowCount() returns the number of children
of the parent. For this it uses item() method to extract the
parentItem from the parentIndex, and calls the childCount() of
the item to get number of children.
"""
if parentIndex.column() > 0:
return 0
if not parentIndex.isValid():
parentItem = self.rootItem
else:
parentItem = self.item(parentIndex)
return parentItem.childCount()
def columnCount(self, parentIndex):
"""Return the number of columns. The index of the parent is
required, but not used, as in this implementation it defaults
for all items to the length of the header.
"""
return len(self.header)
def data(self, index, role):
"""Return role specific data for the item referred by
index.column()."""
if not index.isValid():
return
item = self.item(index)
column = index.column()
value = item.getItemData(column)
if role == Qt.DisplayRole:
try:
if column == 1:
# Display small values using scientific notation.
if abs(float(value)) < 1e-3 and float(value) != 0.0:
return '{0:8.1e}'.format(value)
else:
return '{0:8.3f}'.format(value)
else:
return '{0:8.2f}'.format(value)
except ValueError:
return value
elif role == Qt.EditRole:
try:
value = float(value)
if abs(value) < 1e-3 and value != 0.0:
return str('{0:8.1e}'.format(value))
else:
return str('{0:8.3f}'.format(value))
except ValueError:
return str(value)
elif role == Qt.CheckStateRole:
if item.parent == self.rootItem and column == 0:
return item.getCheckState()
elif role == Qt.TextAlignmentRole:
if column > 0:
return Qt.AlignRight
def setData(self, index, value, role):
"""Set the role data for the item at index to value."""
if not index.isValid():
return False
item = self.item(index)
column = index.column()
if role == Qt.EditRole:
items = list()
items.append(item)
if self.sync:
parentIndex = self.parent(index)
# Iterate over the siblings of the parent index.
for sibling in self.siblings(parentIndex):
siblingNode = self.item(sibling)
for child in siblingNode.children:
if child.getItemData(0) == item.getItemData(0):
items.append(child)
for item in items:
columnData = str(item.getItemData(column))
if columnData and columnData != value:
try:
item.setItemData(column, float(value))
except ValueError:
return False
else:
return False
elif role == Qt.CheckStateRole:
item.setCheckState(value)
if value == Qt.Unchecked or value == Qt.Checked:
state = value
self.itemCheckStateChanged.emit(index, state)
self.dataChanged.emit(index, index)
return True
def setSyncState(self, flag):
self.sync = flag
def flags(self, index):
"""Return the active flags for the given index. Add editable
flag to items other than the first column.
"""
activeFlags = (Qt.ItemIsEnabled | Qt.ItemIsSelectable |
Qt.ItemIsUserCheckable)
item = self.item(index)
column = index.column()
if column > 0 and not item.childCount():
activeFlags = activeFlags | Qt.ItemIsEditable
return activeFlags
def headerData(self, section, orientation, role):
"""Return the data for the given role and section in the header
with the specified orientation.
"""
if orientation == Qt.Horizontal and role == Qt.DisplayRole:
return self.header[section]
def item(self, index):
if index is None or not index.isValid():
return self.rootItem
return index.internalPointer()
def setModelData(self, modelData, parentItem=None):
if parentItem is None:
self.rootItem = HamiltonianItem(None, self.header)
parentItem = self.rootItem
if isinstance(modelData, dict):
for key, value in modelData.items():
if isinstance(value, dict):
item = HamiltonianItem(parentItem, [key])
self.setModelData(value, item)
elif isinstance(value, float):
item = HamiltonianItem(parentItem, [key, value])
elif isinstance(value, list):
item = HamiltonianItem(
parentItem, [key, value[0], value[1]])
else:
raise TypeError
def setHeaderData(self, header):
self.header = header
def updateModelData(self, modelData, parentIndex=None):
parentItem = self.item(parentIndex)
if parentItem.childCount():
for child in parentItem.children:
key = child.itemData[0]
childData = modelData[key]
childIndex = self.index(child.row(), 0, parentIndex)
self.updateModelData(childData, childIndex)
else:
if isinstance(modelData, float):
parentItem.setItemData(1, modelData)
elif isinstance(modelData, list):
value, scaling = modelData
parentItem.setItemData(1, value)
parentItem.setItemData(2, scaling)
else:
raise TypeError
self.dataChanged.emit(parentIndex, parentIndex)
return True
def _getModelData(self, modelData, parentItem=None):
"""Return the data contained in the model."""
if parentItem is None:
parentItem = self.rootItem
for item in parentItem.getChildren():
key = item.getItemData(0)
if item.childCount():
modelData[key] = odict()
self._getModelData(modelData[key], item)
else:
if isinstance(item.getItemData(2), float):
modelData[key] = [item.getItemData(1), item.getItemData(2)]
else:
modelData[key] = item.getItemData(1)
def getModelData(self):
modelData = odict()
self._getModelData(modelData)
return modelData
def setNodesCheckState(self, checkState, parentItem=None):
if parentItem is None:
parentItem = self.rootItem
children = parentItem.getChildren()
for child in children:
childName = child.itemData[0]
try:
child.setCheckState(checkState[childName])
except KeyError:
pass
def getNodesCheckState(self, parentItem=None):
"""Return the check state (disabled, tristate, enable) of all items
belonging to a parent.
"""
if parentItem is None:
parentItem = self.rootItem
checkStates = odict()
children = parentItem.getChildren()
for child in children:
checkStates[child.itemData[0]] = child.getCheckState()
return checkStates
def reset(self):
self.beginResetModel()
self.rootItem = None
self.endResetModel()
|
mretegan/crispy | crispy/gui/models.py | HamiltonianModel.rowCount | python | def rowCount(self, parentIndex):
if parentIndex.column() > 0:
return 0
if not parentIndex.isValid():
parentItem = self.rootItem
else:
parentItem = self.item(parentIndex)
return parentItem.childCount() | Return the number of rows under the given parent. When the
parentIndex is valid, rowCount() returns the number of children
of the parent. For this it uses item() method to extract the
parentItem from the parentIndex, and calls the childCount() of
the item to get number of children. | train | https://github.com/mretegan/crispy/blob/7e241ac1a48d34ca769f3a6183c430360b5f6725/crispy/gui/models.py#L358-L373 | null | class HamiltonianModel(QAbstractItemModel):
"""Class implementing the Hamiltonian tree model. It subclasses
QAbstractItemModel and thus implements: index(), parent(),
rowCount(), columnCount(), and data().
To enable editing, the class implements setData() and reimplements
flags() to ensure that an editable item is returned. headerData() is
also reimplemented to control the way the header is presented.
"""
itemCheckStateChanged = pyqtSignal(QModelIndex, Qt.CheckState)
def __init__(self, parent=None):
super(HamiltonianModel, self).__init__(parent)
self.header = ['Parameter', 'Value', 'Scale Factor']
self.modelData = odict()
def index(self, row, column, parent=QModelIndex()):
"""Return the index of the item in the model specified by the
given row, column, and parent index.
"""
if parent is not None and not parent.isValid():
parentItem = self.rootItem
else:
parentItem = self.item(parent)
childItem = parentItem.child(row)
if childItem:
index = self.createIndex(row, column, childItem)
else:
index = QModelIndex()
return index
def parent(self, index):
"""Return the index of the parent for a given index of the
child. Unfortunately, the name of the method has to be parent,
even though a more verbose name like parentIndex, would avoid
confusion about what parent actually is - an index or an item.
"""
childItem = self.item(index)
parentItem = childItem.parent
if parentItem == self.rootItem:
parentIndex = QModelIndex()
else:
parentIndex = self.createIndex(parentItem.row(), 0, parentItem)
return parentIndex
def siblings(self, index):
item = self.item(index)
parentIndex = self.parent(index)
parentItem = self.item(parentIndex)
siblingIndices = list()
for child in parentItem.children:
if child is item:
continue
else:
row = child.row()
siblingIndex = self.index(row, 0, parentIndex)
siblingIndices.append(siblingIndex)
return siblingIndices
def columnCount(self, parentIndex):
"""Return the number of columns. The index of the parent is
required, but not used, as in this implementation it defaults
for all items to the length of the header.
"""
return len(self.header)
def data(self, index, role):
"""Return role specific data for the item referred by
index.column()."""
if not index.isValid():
return
item = self.item(index)
column = index.column()
value = item.getItemData(column)
if role == Qt.DisplayRole:
try:
if column == 1:
# Display small values using scientific notation.
if abs(float(value)) < 1e-3 and float(value) != 0.0:
return '{0:8.1e}'.format(value)
else:
return '{0:8.3f}'.format(value)
else:
return '{0:8.2f}'.format(value)
except ValueError:
return value
elif role == Qt.EditRole:
try:
value = float(value)
if abs(value) < 1e-3 and value != 0.0:
return str('{0:8.1e}'.format(value))
else:
return str('{0:8.3f}'.format(value))
except ValueError:
return str(value)
elif role == Qt.CheckStateRole:
if item.parent == self.rootItem and column == 0:
return item.getCheckState()
elif role == Qt.TextAlignmentRole:
if column > 0:
return Qt.AlignRight
def setData(self, index, value, role):
"""Set the role data for the item at index to value."""
if not index.isValid():
return False
item = self.item(index)
column = index.column()
if role == Qt.EditRole:
items = list()
items.append(item)
if self.sync:
parentIndex = self.parent(index)
# Iterate over the siblings of the parent index.
for sibling in self.siblings(parentIndex):
siblingNode = self.item(sibling)
for child in siblingNode.children:
if child.getItemData(0) == item.getItemData(0):
items.append(child)
for item in items:
columnData = str(item.getItemData(column))
if columnData and columnData != value:
try:
item.setItemData(column, float(value))
except ValueError:
return False
else:
return False
elif role == Qt.CheckStateRole:
item.setCheckState(value)
if value == Qt.Unchecked or value == Qt.Checked:
state = value
self.itemCheckStateChanged.emit(index, state)
self.dataChanged.emit(index, index)
return True
def setSyncState(self, flag):
self.sync = flag
def flags(self, index):
"""Return the active flags for the given index. Add editable
flag to items other than the first column.
"""
activeFlags = (Qt.ItemIsEnabled | Qt.ItemIsSelectable |
Qt.ItemIsUserCheckable)
item = self.item(index)
column = index.column()
if column > 0 and not item.childCount():
activeFlags = activeFlags | Qt.ItemIsEditable
return activeFlags
def headerData(self, section, orientation, role):
"""Return the data for the given role and section in the header
with the specified orientation.
"""
if orientation == Qt.Horizontal and role == Qt.DisplayRole:
return self.header[section]
def item(self, index):
if index is None or not index.isValid():
return self.rootItem
return index.internalPointer()
def setModelData(self, modelData, parentItem=None):
if parentItem is None:
self.rootItem = HamiltonianItem(None, self.header)
parentItem = self.rootItem
if isinstance(modelData, dict):
for key, value in modelData.items():
if isinstance(value, dict):
item = HamiltonianItem(parentItem, [key])
self.setModelData(value, item)
elif isinstance(value, float):
item = HamiltonianItem(parentItem, [key, value])
elif isinstance(value, list):
item = HamiltonianItem(
parentItem, [key, value[0], value[1]])
else:
raise TypeError
def setHeaderData(self, header):
self.header = header
def updateModelData(self, modelData, parentIndex=None):
parentItem = self.item(parentIndex)
if parentItem.childCount():
for child in parentItem.children:
key = child.itemData[0]
childData = modelData[key]
childIndex = self.index(child.row(), 0, parentIndex)
self.updateModelData(childData, childIndex)
else:
if isinstance(modelData, float):
parentItem.setItemData(1, modelData)
elif isinstance(modelData, list):
value, scaling = modelData
parentItem.setItemData(1, value)
parentItem.setItemData(2, scaling)
else:
raise TypeError
self.dataChanged.emit(parentIndex, parentIndex)
return True
def _getModelData(self, modelData, parentItem=None):
"""Return the data contained in the model."""
if parentItem is None:
parentItem = self.rootItem
for item in parentItem.getChildren():
key = item.getItemData(0)
if item.childCount():
modelData[key] = odict()
self._getModelData(modelData[key], item)
else:
if isinstance(item.getItemData(2), float):
modelData[key] = [item.getItemData(1), item.getItemData(2)]
else:
modelData[key] = item.getItemData(1)
def getModelData(self):
modelData = odict()
self._getModelData(modelData)
return modelData
def setNodesCheckState(self, checkState, parentItem=None):
if parentItem is None:
parentItem = self.rootItem
children = parentItem.getChildren()
for child in children:
childName = child.itemData[0]
try:
child.setCheckState(checkState[childName])
except KeyError:
pass
def getNodesCheckState(self, parentItem=None):
"""Return the check state (disabled, tristate, enable) of all items
belonging to a parent.
"""
if parentItem is None:
parentItem = self.rootItem
checkStates = odict()
children = parentItem.getChildren()
for child in children:
checkStates[child.itemData[0]] = child.getCheckState()
return checkStates
def reset(self):
self.beginResetModel()
self.rootItem = None
self.endResetModel()
|
mretegan/crispy | crispy/gui/models.py | HamiltonianModel.data | python | def data(self, index, role):
if not index.isValid():
return
item = self.item(index)
column = index.column()
value = item.getItemData(column)
if role == Qt.DisplayRole:
try:
if column == 1:
# Display small values using scientific notation.
if abs(float(value)) < 1e-3 and float(value) != 0.0:
return '{0:8.1e}'.format(value)
else:
return '{0:8.3f}'.format(value)
else:
return '{0:8.2f}'.format(value)
except ValueError:
return value
elif role == Qt.EditRole:
try:
value = float(value)
if abs(value) < 1e-3 and value != 0.0:
return str('{0:8.1e}'.format(value))
else:
return str('{0:8.3f}'.format(value))
except ValueError:
return str(value)
elif role == Qt.CheckStateRole:
if item.parent == self.rootItem and column == 0:
return item.getCheckState()
elif role == Qt.TextAlignmentRole:
if column > 0:
return Qt.AlignRight | Return role specific data for the item referred by
index.column(). | train | https://github.com/mretegan/crispy/blob/7e241ac1a48d34ca769f3a6183c430360b5f6725/crispy/gui/models.py#L382-L418 | null | class HamiltonianModel(QAbstractItemModel):
"""Class implementing the Hamiltonian tree model. It subclasses
QAbstractItemModel and thus implements: index(), parent(),
rowCount(), columnCount(), and data().
To enable editing, the class implements setData() and reimplements
flags() to ensure that an editable item is returned. headerData() is
also reimplemented to control the way the header is presented.
"""
itemCheckStateChanged = pyqtSignal(QModelIndex, Qt.CheckState)
def __init__(self, parent=None):
super(HamiltonianModel, self).__init__(parent)
self.header = ['Parameter', 'Value', 'Scale Factor']
self.modelData = odict()
def index(self, row, column, parent=QModelIndex()):
"""Return the index of the item in the model specified by the
given row, column, and parent index.
"""
if parent is not None and not parent.isValid():
parentItem = self.rootItem
else:
parentItem = self.item(parent)
childItem = parentItem.child(row)
if childItem:
index = self.createIndex(row, column, childItem)
else:
index = QModelIndex()
return index
def parent(self, index):
"""Return the index of the parent for a given index of the
child. Unfortunately, the name of the method has to be parent,
even though a more verbose name like parentIndex, would avoid
confusion about what parent actually is - an index or an item.
"""
childItem = self.item(index)
parentItem = childItem.parent
if parentItem == self.rootItem:
parentIndex = QModelIndex()
else:
parentIndex = self.createIndex(parentItem.row(), 0, parentItem)
return parentIndex
def siblings(self, index):
item = self.item(index)
parentIndex = self.parent(index)
parentItem = self.item(parentIndex)
siblingIndices = list()
for child in parentItem.children:
if child is item:
continue
else:
row = child.row()
siblingIndex = self.index(row, 0, parentIndex)
siblingIndices.append(siblingIndex)
return siblingIndices
def rowCount(self, parentIndex):
"""Return the number of rows under the given parent. When the
parentIndex is valid, rowCount() returns the number of children
of the parent. For this it uses item() method to extract the
parentItem from the parentIndex, and calls the childCount() of
the item to get number of children.
"""
if parentIndex.column() > 0:
return 0
if not parentIndex.isValid():
parentItem = self.rootItem
else:
parentItem = self.item(parentIndex)
return parentItem.childCount()
def columnCount(self, parentIndex):
"""Return the number of columns. The index of the parent is
required, but not used, as in this implementation it defaults
for all items to the length of the header.
"""
return len(self.header)
def setData(self, index, value, role):
"""Set the role data for the item at index to value."""
if not index.isValid():
return False
item = self.item(index)
column = index.column()
if role == Qt.EditRole:
items = list()
items.append(item)
if self.sync:
parentIndex = self.parent(index)
# Iterate over the siblings of the parent index.
for sibling in self.siblings(parentIndex):
siblingNode = self.item(sibling)
for child in siblingNode.children:
if child.getItemData(0) == item.getItemData(0):
items.append(child)
for item in items:
columnData = str(item.getItemData(column))
if columnData and columnData != value:
try:
item.setItemData(column, float(value))
except ValueError:
return False
else:
return False
elif role == Qt.CheckStateRole:
item.setCheckState(value)
if value == Qt.Unchecked or value == Qt.Checked:
state = value
self.itemCheckStateChanged.emit(index, state)
self.dataChanged.emit(index, index)
return True
def setSyncState(self, flag):
self.sync = flag
def flags(self, index):
"""Return the active flags for the given index. Add editable
flag to items other than the first column.
"""
activeFlags = (Qt.ItemIsEnabled | Qt.ItemIsSelectable |
Qt.ItemIsUserCheckable)
item = self.item(index)
column = index.column()
if column > 0 and not item.childCount():
activeFlags = activeFlags | Qt.ItemIsEditable
return activeFlags
def headerData(self, section, orientation, role):
"""Return the data for the given role and section in the header
with the specified orientation.
"""
if orientation == Qt.Horizontal and role == Qt.DisplayRole:
return self.header[section]
def item(self, index):
if index is None or not index.isValid():
return self.rootItem
return index.internalPointer()
def setModelData(self, modelData, parentItem=None):
if parentItem is None:
self.rootItem = HamiltonianItem(None, self.header)
parentItem = self.rootItem
if isinstance(modelData, dict):
for key, value in modelData.items():
if isinstance(value, dict):
item = HamiltonianItem(parentItem, [key])
self.setModelData(value, item)
elif isinstance(value, float):
item = HamiltonianItem(parentItem, [key, value])
elif isinstance(value, list):
item = HamiltonianItem(
parentItem, [key, value[0], value[1]])
else:
raise TypeError
def setHeaderData(self, header):
self.header = header
def updateModelData(self, modelData, parentIndex=None):
parentItem = self.item(parentIndex)
if parentItem.childCount():
for child in parentItem.children:
key = child.itemData[0]
childData = modelData[key]
childIndex = self.index(child.row(), 0, parentIndex)
self.updateModelData(childData, childIndex)
else:
if isinstance(modelData, float):
parentItem.setItemData(1, modelData)
elif isinstance(modelData, list):
value, scaling = modelData
parentItem.setItemData(1, value)
parentItem.setItemData(2, scaling)
else:
raise TypeError
self.dataChanged.emit(parentIndex, parentIndex)
return True
def _getModelData(self, modelData, parentItem=None):
"""Return the data contained in the model."""
if parentItem is None:
parentItem = self.rootItem
for item in parentItem.getChildren():
key = item.getItemData(0)
if item.childCount():
modelData[key] = odict()
self._getModelData(modelData[key], item)
else:
if isinstance(item.getItemData(2), float):
modelData[key] = [item.getItemData(1), item.getItemData(2)]
else:
modelData[key] = item.getItemData(1)
def getModelData(self):
modelData = odict()
self._getModelData(modelData)
return modelData
def setNodesCheckState(self, checkState, parentItem=None):
if parentItem is None:
parentItem = self.rootItem
children = parentItem.getChildren()
for child in children:
childName = child.itemData[0]
try:
child.setCheckState(checkState[childName])
except KeyError:
pass
def getNodesCheckState(self, parentItem=None):
"""Return the check state (disabled, tristate, enable) of all items
belonging to a parent.
"""
if parentItem is None:
parentItem = self.rootItem
checkStates = odict()
children = parentItem.getChildren()
for child in children:
checkStates[child.itemData[0]] = child.getCheckState()
return checkStates
def reset(self):
self.beginResetModel()
self.rootItem = None
self.endResetModel()
|
mretegan/crispy | crispy/gui/models.py | HamiltonianModel.setData | python | def setData(self, index, value, role):
if not index.isValid():
return False
item = self.item(index)
column = index.column()
if role == Qt.EditRole:
items = list()
items.append(item)
if self.sync:
parentIndex = self.parent(index)
# Iterate over the siblings of the parent index.
for sibling in self.siblings(parentIndex):
siblingNode = self.item(sibling)
for child in siblingNode.children:
if child.getItemData(0) == item.getItemData(0):
items.append(child)
for item in items:
columnData = str(item.getItemData(column))
if columnData and columnData != value:
try:
item.setItemData(column, float(value))
except ValueError:
return False
else:
return False
elif role == Qt.CheckStateRole:
item.setCheckState(value)
if value == Qt.Unchecked or value == Qt.Checked:
state = value
self.itemCheckStateChanged.emit(index, state)
self.dataChanged.emit(index, index)
return True | Set the role data for the item at index to value. | train | https://github.com/mretegan/crispy/blob/7e241ac1a48d34ca769f3a6183c430360b5f6725/crispy/gui/models.py#L420-L459 | null | class HamiltonianModel(QAbstractItemModel):
"""Class implementing the Hamiltonian tree model. It subclasses
QAbstractItemModel and thus implements: index(), parent(),
rowCount(), columnCount(), and data().
To enable editing, the class implements setData() and reimplements
flags() to ensure that an editable item is returned. headerData() is
also reimplemented to control the way the header is presented.
"""
itemCheckStateChanged = pyqtSignal(QModelIndex, Qt.CheckState)
def __init__(self, parent=None):
super(HamiltonianModel, self).__init__(parent)
self.header = ['Parameter', 'Value', 'Scale Factor']
self.modelData = odict()
def index(self, row, column, parent=QModelIndex()):
"""Return the index of the item in the model specified by the
given row, column, and parent index.
"""
if parent is not None and not parent.isValid():
parentItem = self.rootItem
else:
parentItem = self.item(parent)
childItem = parentItem.child(row)
if childItem:
index = self.createIndex(row, column, childItem)
else:
index = QModelIndex()
return index
def parent(self, index):
"""Return the index of the parent for a given index of the
child. Unfortunately, the name of the method has to be parent,
even though a more verbose name like parentIndex, would avoid
confusion about what parent actually is - an index or an item.
"""
childItem = self.item(index)
parentItem = childItem.parent
if parentItem == self.rootItem:
parentIndex = QModelIndex()
else:
parentIndex = self.createIndex(parentItem.row(), 0, parentItem)
return parentIndex
def siblings(self, index):
item = self.item(index)
parentIndex = self.parent(index)
parentItem = self.item(parentIndex)
siblingIndices = list()
for child in parentItem.children:
if child is item:
continue
else:
row = child.row()
siblingIndex = self.index(row, 0, parentIndex)
siblingIndices.append(siblingIndex)
return siblingIndices
def rowCount(self, parentIndex):
"""Return the number of rows under the given parent. When the
parentIndex is valid, rowCount() returns the number of children
of the parent. For this it uses item() method to extract the
parentItem from the parentIndex, and calls the childCount() of
the item to get number of children.
"""
if parentIndex.column() > 0:
return 0
if not parentIndex.isValid():
parentItem = self.rootItem
else:
parentItem = self.item(parentIndex)
return parentItem.childCount()
def columnCount(self, parentIndex):
"""Return the number of columns. The index of the parent is
required, but not used, as in this implementation it defaults
for all items to the length of the header.
"""
return len(self.header)
def data(self, index, role):
"""Return role specific data for the item referred by
index.column()."""
if not index.isValid():
return
item = self.item(index)
column = index.column()
value = item.getItemData(column)
if role == Qt.DisplayRole:
try:
if column == 1:
# Display small values using scientific notation.
if abs(float(value)) < 1e-3 and float(value) != 0.0:
return '{0:8.1e}'.format(value)
else:
return '{0:8.3f}'.format(value)
else:
return '{0:8.2f}'.format(value)
except ValueError:
return value
elif role == Qt.EditRole:
try:
value = float(value)
if abs(value) < 1e-3 and value != 0.0:
return str('{0:8.1e}'.format(value))
else:
return str('{0:8.3f}'.format(value))
except ValueError:
return str(value)
elif role == Qt.CheckStateRole:
if item.parent == self.rootItem and column == 0:
return item.getCheckState()
elif role == Qt.TextAlignmentRole:
if column > 0:
return Qt.AlignRight
def setSyncState(self, flag):
self.sync = flag
def flags(self, index):
"""Return the active flags for the given index. Add editable
flag to items other than the first column.
"""
activeFlags = (Qt.ItemIsEnabled | Qt.ItemIsSelectable |
Qt.ItemIsUserCheckable)
item = self.item(index)
column = index.column()
if column > 0 and not item.childCount():
activeFlags = activeFlags | Qt.ItemIsEditable
return activeFlags
def headerData(self, section, orientation, role):
"""Return the data for the given role and section in the header
with the specified orientation.
"""
if orientation == Qt.Horizontal and role == Qt.DisplayRole:
return self.header[section]
def item(self, index):
if index is None or not index.isValid():
return self.rootItem
return index.internalPointer()
def setModelData(self, modelData, parentItem=None):
if parentItem is None:
self.rootItem = HamiltonianItem(None, self.header)
parentItem = self.rootItem
if isinstance(modelData, dict):
for key, value in modelData.items():
if isinstance(value, dict):
item = HamiltonianItem(parentItem, [key])
self.setModelData(value, item)
elif isinstance(value, float):
item = HamiltonianItem(parentItem, [key, value])
elif isinstance(value, list):
item = HamiltonianItem(
parentItem, [key, value[0], value[1]])
else:
raise TypeError
def setHeaderData(self, header):
self.header = header
def updateModelData(self, modelData, parentIndex=None):
parentItem = self.item(parentIndex)
if parentItem.childCount():
for child in parentItem.children:
key = child.itemData[0]
childData = modelData[key]
childIndex = self.index(child.row(), 0, parentIndex)
self.updateModelData(childData, childIndex)
else:
if isinstance(modelData, float):
parentItem.setItemData(1, modelData)
elif isinstance(modelData, list):
value, scaling = modelData
parentItem.setItemData(1, value)
parentItem.setItemData(2, scaling)
else:
raise TypeError
self.dataChanged.emit(parentIndex, parentIndex)
return True
def _getModelData(self, modelData, parentItem=None):
"""Return the data contained in the model."""
if parentItem is None:
parentItem = self.rootItem
for item in parentItem.getChildren():
key = item.getItemData(0)
if item.childCount():
modelData[key] = odict()
self._getModelData(modelData[key], item)
else:
if isinstance(item.getItemData(2), float):
modelData[key] = [item.getItemData(1), item.getItemData(2)]
else:
modelData[key] = item.getItemData(1)
def getModelData(self):
modelData = odict()
self._getModelData(modelData)
return modelData
def setNodesCheckState(self, checkState, parentItem=None):
if parentItem is None:
parentItem = self.rootItem
children = parentItem.getChildren()
for child in children:
childName = child.itemData[0]
try:
child.setCheckState(checkState[childName])
except KeyError:
pass
def getNodesCheckState(self, parentItem=None):
"""Return the check state (disabled, tristate, enable) of all items
belonging to a parent.
"""
if parentItem is None:
parentItem = self.rootItem
checkStates = odict()
children = parentItem.getChildren()
for child in children:
checkStates[child.itemData[0]] = child.getCheckState()
return checkStates
def reset(self):
self.beginResetModel()
self.rootItem = None
self.endResetModel()
|
mretegan/crispy | crispy/gui/models.py | HamiltonianModel.flags | python | def flags(self, index):
activeFlags = (Qt.ItemIsEnabled | Qt.ItemIsSelectable |
Qt.ItemIsUserCheckable)
item = self.item(index)
column = index.column()
if column > 0 and not item.childCount():
activeFlags = activeFlags | Qt.ItemIsEditable
return activeFlags | Return the active flags for the given index. Add editable
flag to items other than the first column. | train | https://github.com/mretegan/crispy/blob/7e241ac1a48d34ca769f3a6183c430360b5f6725/crispy/gui/models.py#L464-L477 | null | class HamiltonianModel(QAbstractItemModel):
"""Class implementing the Hamiltonian tree model. It subclasses
QAbstractItemModel and thus implements: index(), parent(),
rowCount(), columnCount(), and data().
To enable editing, the class implements setData() and reimplements
flags() to ensure that an editable item is returned. headerData() is
also reimplemented to control the way the header is presented.
"""
itemCheckStateChanged = pyqtSignal(QModelIndex, Qt.CheckState)
def __init__(self, parent=None):
super(HamiltonianModel, self).__init__(parent)
self.header = ['Parameter', 'Value', 'Scale Factor']
self.modelData = odict()
def index(self, row, column, parent=QModelIndex()):
"""Return the index of the item in the model specified by the
given row, column, and parent index.
"""
if parent is not None and not parent.isValid():
parentItem = self.rootItem
else:
parentItem = self.item(parent)
childItem = parentItem.child(row)
if childItem:
index = self.createIndex(row, column, childItem)
else:
index = QModelIndex()
return index
def parent(self, index):
"""Return the index of the parent for a given index of the
child. Unfortunately, the name of the method has to be parent,
even though a more verbose name like parentIndex, would avoid
confusion about what parent actually is - an index or an item.
"""
childItem = self.item(index)
parentItem = childItem.parent
if parentItem == self.rootItem:
parentIndex = QModelIndex()
else:
parentIndex = self.createIndex(parentItem.row(), 0, parentItem)
return parentIndex
def siblings(self, index):
item = self.item(index)
parentIndex = self.parent(index)
parentItem = self.item(parentIndex)
siblingIndices = list()
for child in parentItem.children:
if child is item:
continue
else:
row = child.row()
siblingIndex = self.index(row, 0, parentIndex)
siblingIndices.append(siblingIndex)
return siblingIndices
def rowCount(self, parentIndex):
"""Return the number of rows under the given parent. When the
parentIndex is valid, rowCount() returns the number of children
of the parent. For this it uses item() method to extract the
parentItem from the parentIndex, and calls the childCount() of
the item to get number of children.
"""
if parentIndex.column() > 0:
return 0
if not parentIndex.isValid():
parentItem = self.rootItem
else:
parentItem = self.item(parentIndex)
return parentItem.childCount()
def columnCount(self, parentIndex):
"""Return the number of columns. The index of the parent is
required, but not used, as in this implementation it defaults
for all items to the length of the header.
"""
return len(self.header)
def data(self, index, role):
"""Return role specific data for the item referred by
index.column()."""
if not index.isValid():
return
item = self.item(index)
column = index.column()
value = item.getItemData(column)
if role == Qt.DisplayRole:
try:
if column == 1:
# Display small values using scientific notation.
if abs(float(value)) < 1e-3 and float(value) != 0.0:
return '{0:8.1e}'.format(value)
else:
return '{0:8.3f}'.format(value)
else:
return '{0:8.2f}'.format(value)
except ValueError:
return value
elif role == Qt.EditRole:
try:
value = float(value)
if abs(value) < 1e-3 and value != 0.0:
return str('{0:8.1e}'.format(value))
else:
return str('{0:8.3f}'.format(value))
except ValueError:
return str(value)
elif role == Qt.CheckStateRole:
if item.parent == self.rootItem and column == 0:
return item.getCheckState()
elif role == Qt.TextAlignmentRole:
if column > 0:
return Qt.AlignRight
def setData(self, index, value, role):
"""Set the role data for the item at index to value."""
if not index.isValid():
return False
item = self.item(index)
column = index.column()
if role == Qt.EditRole:
items = list()
items.append(item)
if self.sync:
parentIndex = self.parent(index)
# Iterate over the siblings of the parent index.
for sibling in self.siblings(parentIndex):
siblingNode = self.item(sibling)
for child in siblingNode.children:
if child.getItemData(0) == item.getItemData(0):
items.append(child)
for item in items:
columnData = str(item.getItemData(column))
if columnData and columnData != value:
try:
item.setItemData(column, float(value))
except ValueError:
return False
else:
return False
elif role == Qt.CheckStateRole:
item.setCheckState(value)
if value == Qt.Unchecked or value == Qt.Checked:
state = value
self.itemCheckStateChanged.emit(index, state)
self.dataChanged.emit(index, index)
return True
def setSyncState(self, flag):
self.sync = flag
def headerData(self, section, orientation, role):
"""Return the data for the given role and section in the header
with the specified orientation.
"""
if orientation == Qt.Horizontal and role == Qt.DisplayRole:
return self.header[section]
def item(self, index):
if index is None or not index.isValid():
return self.rootItem
return index.internalPointer()
def setModelData(self, modelData, parentItem=None):
if parentItem is None:
self.rootItem = HamiltonianItem(None, self.header)
parentItem = self.rootItem
if isinstance(modelData, dict):
for key, value in modelData.items():
if isinstance(value, dict):
item = HamiltonianItem(parentItem, [key])
self.setModelData(value, item)
elif isinstance(value, float):
item = HamiltonianItem(parentItem, [key, value])
elif isinstance(value, list):
item = HamiltonianItem(
parentItem, [key, value[0], value[1]])
else:
raise TypeError
def setHeaderData(self, header):
self.header = header
def updateModelData(self, modelData, parentIndex=None):
parentItem = self.item(parentIndex)
if parentItem.childCount():
for child in parentItem.children:
key = child.itemData[0]
childData = modelData[key]
childIndex = self.index(child.row(), 0, parentIndex)
self.updateModelData(childData, childIndex)
else:
if isinstance(modelData, float):
parentItem.setItemData(1, modelData)
elif isinstance(modelData, list):
value, scaling = modelData
parentItem.setItemData(1, value)
parentItem.setItemData(2, scaling)
else:
raise TypeError
self.dataChanged.emit(parentIndex, parentIndex)
return True
def _getModelData(self, modelData, parentItem=None):
"""Return the data contained in the model."""
if parentItem is None:
parentItem = self.rootItem
for item in parentItem.getChildren():
key = item.getItemData(0)
if item.childCount():
modelData[key] = odict()
self._getModelData(modelData[key], item)
else:
if isinstance(item.getItemData(2), float):
modelData[key] = [item.getItemData(1), item.getItemData(2)]
else:
modelData[key] = item.getItemData(1)
def getModelData(self):
modelData = odict()
self._getModelData(modelData)
return modelData
def setNodesCheckState(self, checkState, parentItem=None):
if parentItem is None:
parentItem = self.rootItem
children = parentItem.getChildren()
for child in children:
childName = child.itemData[0]
try:
child.setCheckState(checkState[childName])
except KeyError:
pass
def getNodesCheckState(self, parentItem=None):
"""Return the check state (disabled, tristate, enable) of all items
belonging to a parent.
"""
if parentItem is None:
parentItem = self.rootItem
checkStates = odict()
children = parentItem.getChildren()
for child in children:
checkStates[child.itemData[0]] = child.getCheckState()
return checkStates
def reset(self):
self.beginResetModel()
self.rootItem = None
self.endResetModel()
|
mretegan/crispy | crispy/gui/models.py | HamiltonianModel._getModelData | python | def _getModelData(self, modelData, parentItem=None):
if parentItem is None:
parentItem = self.rootItem
for item in parentItem.getChildren():
key = item.getItemData(0)
if item.childCount():
modelData[key] = odict()
self._getModelData(modelData[key], item)
else:
if isinstance(item.getItemData(2), float):
modelData[key] = [item.getItemData(1), item.getItemData(2)]
else:
modelData[key] = item.getItemData(1) | Return the data contained in the model. | train | https://github.com/mretegan/crispy/blob/7e241ac1a48d34ca769f3a6183c430360b5f6725/crispy/gui/models.py#L533-L547 | null | class HamiltonianModel(QAbstractItemModel):
"""Class implementing the Hamiltonian tree model. It subclasses
QAbstractItemModel and thus implements: index(), parent(),
rowCount(), columnCount(), and data().
To enable editing, the class implements setData() and reimplements
flags() to ensure that an editable item is returned. headerData() is
also reimplemented to control the way the header is presented.
"""
itemCheckStateChanged = pyqtSignal(QModelIndex, Qt.CheckState)
def __init__(self, parent=None):
super(HamiltonianModel, self).__init__(parent)
self.header = ['Parameter', 'Value', 'Scale Factor']
self.modelData = odict()
def index(self, row, column, parent=QModelIndex()):
"""Return the index of the item in the model specified by the
given row, column, and parent index.
"""
if parent is not None and not parent.isValid():
parentItem = self.rootItem
else:
parentItem = self.item(parent)
childItem = parentItem.child(row)
if childItem:
index = self.createIndex(row, column, childItem)
else:
index = QModelIndex()
return index
def parent(self, index):
"""Return the index of the parent for a given index of the
child. Unfortunately, the name of the method has to be parent,
even though a more verbose name like parentIndex, would avoid
confusion about what parent actually is - an index or an item.
"""
childItem = self.item(index)
parentItem = childItem.parent
if parentItem == self.rootItem:
parentIndex = QModelIndex()
else:
parentIndex = self.createIndex(parentItem.row(), 0, parentItem)
return parentIndex
def siblings(self, index):
item = self.item(index)
parentIndex = self.parent(index)
parentItem = self.item(parentIndex)
siblingIndices = list()
for child in parentItem.children:
if child is item:
continue
else:
row = child.row()
siblingIndex = self.index(row, 0, parentIndex)
siblingIndices.append(siblingIndex)
return siblingIndices
def rowCount(self, parentIndex):
"""Return the number of rows under the given parent. When the
parentIndex is valid, rowCount() returns the number of children
of the parent. For this it uses item() method to extract the
parentItem from the parentIndex, and calls the childCount() of
the item to get number of children.
"""
if parentIndex.column() > 0:
return 0
if not parentIndex.isValid():
parentItem = self.rootItem
else:
parentItem = self.item(parentIndex)
return parentItem.childCount()
def columnCount(self, parentIndex):
"""Return the number of columns. The index of the parent is
required, but not used, as in this implementation it defaults
for all items to the length of the header.
"""
return len(self.header)
def data(self, index, role):
"""Return role specific data for the item referred by
index.column()."""
if not index.isValid():
return
item = self.item(index)
column = index.column()
value = item.getItemData(column)
if role == Qt.DisplayRole:
try:
if column == 1:
# Display small values using scientific notation.
if abs(float(value)) < 1e-3 and float(value) != 0.0:
return '{0:8.1e}'.format(value)
else:
return '{0:8.3f}'.format(value)
else:
return '{0:8.2f}'.format(value)
except ValueError:
return value
elif role == Qt.EditRole:
try:
value = float(value)
if abs(value) < 1e-3 and value != 0.0:
return str('{0:8.1e}'.format(value))
else:
return str('{0:8.3f}'.format(value))
except ValueError:
return str(value)
elif role == Qt.CheckStateRole:
if item.parent == self.rootItem and column == 0:
return item.getCheckState()
elif role == Qt.TextAlignmentRole:
if column > 0:
return Qt.AlignRight
def setData(self, index, value, role):
"""Set the role data for the item at index to value."""
if not index.isValid():
return False
item = self.item(index)
column = index.column()
if role == Qt.EditRole:
items = list()
items.append(item)
if self.sync:
parentIndex = self.parent(index)
# Iterate over the siblings of the parent index.
for sibling in self.siblings(parentIndex):
siblingNode = self.item(sibling)
for child in siblingNode.children:
if child.getItemData(0) == item.getItemData(0):
items.append(child)
for item in items:
columnData = str(item.getItemData(column))
if columnData and columnData != value:
try:
item.setItemData(column, float(value))
except ValueError:
return False
else:
return False
elif role == Qt.CheckStateRole:
item.setCheckState(value)
if value == Qt.Unchecked or value == Qt.Checked:
state = value
self.itemCheckStateChanged.emit(index, state)
self.dataChanged.emit(index, index)
return True
def setSyncState(self, flag):
self.sync = flag
def flags(self, index):
"""Return the active flags for the given index. Add editable
flag to items other than the first column.
"""
activeFlags = (Qt.ItemIsEnabled | Qt.ItemIsSelectable |
Qt.ItemIsUserCheckable)
item = self.item(index)
column = index.column()
if column > 0 and not item.childCount():
activeFlags = activeFlags | Qt.ItemIsEditable
return activeFlags
def headerData(self, section, orientation, role):
"""Return the data for the given role and section in the header
with the specified orientation.
"""
if orientation == Qt.Horizontal and role == Qt.DisplayRole:
return self.header[section]
def item(self, index):
if index is None or not index.isValid():
return self.rootItem
return index.internalPointer()
def setModelData(self, modelData, parentItem=None):
if parentItem is None:
self.rootItem = HamiltonianItem(None, self.header)
parentItem = self.rootItem
if isinstance(modelData, dict):
for key, value in modelData.items():
if isinstance(value, dict):
item = HamiltonianItem(parentItem, [key])
self.setModelData(value, item)
elif isinstance(value, float):
item = HamiltonianItem(parentItem, [key, value])
elif isinstance(value, list):
item = HamiltonianItem(
parentItem, [key, value[0], value[1]])
else:
raise TypeError
def setHeaderData(self, header):
self.header = header
def updateModelData(self, modelData, parentIndex=None):
parentItem = self.item(parentIndex)
if parentItem.childCount():
for child in parentItem.children:
key = child.itemData[0]
childData = modelData[key]
childIndex = self.index(child.row(), 0, parentIndex)
self.updateModelData(childData, childIndex)
else:
if isinstance(modelData, float):
parentItem.setItemData(1, modelData)
elif isinstance(modelData, list):
value, scaling = modelData
parentItem.setItemData(1, value)
parentItem.setItemData(2, scaling)
else:
raise TypeError
self.dataChanged.emit(parentIndex, parentIndex)
return True
def getModelData(self):
modelData = odict()
self._getModelData(modelData)
return modelData
def setNodesCheckState(self, checkState, parentItem=None):
if parentItem is None:
parentItem = self.rootItem
children = parentItem.getChildren()
for child in children:
childName = child.itemData[0]
try:
child.setCheckState(checkState[childName])
except KeyError:
pass
def getNodesCheckState(self, parentItem=None):
"""Return the check state (disabled, tristate, enable) of all items
belonging to a parent.
"""
if parentItem is None:
parentItem = self.rootItem
checkStates = odict()
children = parentItem.getChildren()
for child in children:
checkStates[child.itemData[0]] = child.getCheckState()
return checkStates
def reset(self):
self.beginResetModel()
self.rootItem = None
self.endResetModel()
|
mretegan/crispy | crispy/gui/models.py | HamiltonianModel.getNodesCheckState | python | def getNodesCheckState(self, parentItem=None):
if parentItem is None:
parentItem = self.rootItem
checkStates = odict()
children = parentItem.getChildren()
for child in children:
checkStates[child.itemData[0]] = child.getCheckState()
return checkStates | Return the check state (disabled, tristate, enable) of all items
belonging to a parent. | train | https://github.com/mretegan/crispy/blob/7e241ac1a48d34ca769f3a6183c430360b5f6725/crispy/gui/models.py#L567-L580 | null | class HamiltonianModel(QAbstractItemModel):
"""Class implementing the Hamiltonian tree model. It subclasses
QAbstractItemModel and thus implements: index(), parent(),
rowCount(), columnCount(), and data().
To enable editing, the class implements setData() and reimplements
flags() to ensure that an editable item is returned. headerData() is
also reimplemented to control the way the header is presented.
"""
itemCheckStateChanged = pyqtSignal(QModelIndex, Qt.CheckState)
def __init__(self, parent=None):
super(HamiltonianModel, self).__init__(parent)
self.header = ['Parameter', 'Value', 'Scale Factor']
self.modelData = odict()
def index(self, row, column, parent=QModelIndex()):
"""Return the index of the item in the model specified by the
given row, column, and parent index.
"""
if parent is not None and not parent.isValid():
parentItem = self.rootItem
else:
parentItem = self.item(parent)
childItem = parentItem.child(row)
if childItem:
index = self.createIndex(row, column, childItem)
else:
index = QModelIndex()
return index
def parent(self, index):
"""Return the index of the parent for a given index of the
child. Unfortunately, the name of the method has to be parent,
even though a more verbose name like parentIndex, would avoid
confusion about what parent actually is - an index or an item.
"""
childItem = self.item(index)
parentItem = childItem.parent
if parentItem == self.rootItem:
parentIndex = QModelIndex()
else:
parentIndex = self.createIndex(parentItem.row(), 0, parentItem)
return parentIndex
def siblings(self, index):
item = self.item(index)
parentIndex = self.parent(index)
parentItem = self.item(parentIndex)
siblingIndices = list()
for child in parentItem.children:
if child is item:
continue
else:
row = child.row()
siblingIndex = self.index(row, 0, parentIndex)
siblingIndices.append(siblingIndex)
return siblingIndices
def rowCount(self, parentIndex):
"""Return the number of rows under the given parent. When the
parentIndex is valid, rowCount() returns the number of children
of the parent. For this it uses item() method to extract the
parentItem from the parentIndex, and calls the childCount() of
the item to get number of children.
"""
if parentIndex.column() > 0:
return 0
if not parentIndex.isValid():
parentItem = self.rootItem
else:
parentItem = self.item(parentIndex)
return parentItem.childCount()
def columnCount(self, parentIndex):
"""Return the number of columns. The index of the parent is
required, but not used, as in this implementation it defaults
for all items to the length of the header.
"""
return len(self.header)
def data(self, index, role):
"""Return role specific data for the item referred by
index.column()."""
if not index.isValid():
return
item = self.item(index)
column = index.column()
value = item.getItemData(column)
if role == Qt.DisplayRole:
try:
if column == 1:
# Display small values using scientific notation.
if abs(float(value)) < 1e-3 and float(value) != 0.0:
return '{0:8.1e}'.format(value)
else:
return '{0:8.3f}'.format(value)
else:
return '{0:8.2f}'.format(value)
except ValueError:
return value
elif role == Qt.EditRole:
try:
value = float(value)
if abs(value) < 1e-3 and value != 0.0:
return str('{0:8.1e}'.format(value))
else:
return str('{0:8.3f}'.format(value))
except ValueError:
return str(value)
elif role == Qt.CheckStateRole:
if item.parent == self.rootItem and column == 0:
return item.getCheckState()
elif role == Qt.TextAlignmentRole:
if column > 0:
return Qt.AlignRight
def setData(self, index, value, role):
"""Set the role data for the item at index to value."""
if not index.isValid():
return False
item = self.item(index)
column = index.column()
if role == Qt.EditRole:
items = list()
items.append(item)
if self.sync:
parentIndex = self.parent(index)
# Iterate over the siblings of the parent index.
for sibling in self.siblings(parentIndex):
siblingNode = self.item(sibling)
for child in siblingNode.children:
if child.getItemData(0) == item.getItemData(0):
items.append(child)
for item in items:
columnData = str(item.getItemData(column))
if columnData and columnData != value:
try:
item.setItemData(column, float(value))
except ValueError:
return False
else:
return False
elif role == Qt.CheckStateRole:
item.setCheckState(value)
if value == Qt.Unchecked or value == Qt.Checked:
state = value
self.itemCheckStateChanged.emit(index, state)
self.dataChanged.emit(index, index)
return True
def setSyncState(self, flag):
self.sync = flag
def flags(self, index):
"""Return the active flags for the given index. Add editable
flag to items other than the first column.
"""
activeFlags = (Qt.ItemIsEnabled | Qt.ItemIsSelectable |
Qt.ItemIsUserCheckable)
item = self.item(index)
column = index.column()
if column > 0 and not item.childCount():
activeFlags = activeFlags | Qt.ItemIsEditable
return activeFlags
def headerData(self, section, orientation, role):
"""Return the data for the given role and section in the header
with the specified orientation.
"""
if orientation == Qt.Horizontal and role == Qt.DisplayRole:
return self.header[section]
def item(self, index):
if index is None or not index.isValid():
return self.rootItem
return index.internalPointer()
def setModelData(self, modelData, parentItem=None):
if parentItem is None:
self.rootItem = HamiltonianItem(None, self.header)
parentItem = self.rootItem
if isinstance(modelData, dict):
for key, value in modelData.items():
if isinstance(value, dict):
item = HamiltonianItem(parentItem, [key])
self.setModelData(value, item)
elif isinstance(value, float):
item = HamiltonianItem(parentItem, [key, value])
elif isinstance(value, list):
item = HamiltonianItem(
parentItem, [key, value[0], value[1]])
else:
raise TypeError
def setHeaderData(self, header):
self.header = header
def updateModelData(self, modelData, parentIndex=None):
parentItem = self.item(parentIndex)
if parentItem.childCount():
for child in parentItem.children:
key = child.itemData[0]
childData = modelData[key]
childIndex = self.index(child.row(), 0, parentIndex)
self.updateModelData(childData, childIndex)
else:
if isinstance(modelData, float):
parentItem.setItemData(1, modelData)
elif isinstance(modelData, list):
value, scaling = modelData
parentItem.setItemData(1, value)
parentItem.setItemData(2, scaling)
else:
raise TypeError
self.dataChanged.emit(parentIndex, parentIndex)
return True
def _getModelData(self, modelData, parentItem=None):
"""Return the data contained in the model."""
if parentItem is None:
parentItem = self.rootItem
for item in parentItem.getChildren():
key = item.getItemData(0)
if item.childCount():
modelData[key] = odict()
self._getModelData(modelData[key], item)
else:
if isinstance(item.getItemData(2), float):
modelData[key] = [item.getItemData(1), item.getItemData(2)]
else:
modelData[key] = item.getItemData(1)
def getModelData(self):
modelData = odict()
self._getModelData(modelData)
return modelData
def setNodesCheckState(self, checkState, parentItem=None):
if parentItem is None:
parentItem = self.rootItem
children = parentItem.getChildren()
for child in children:
childName = child.itemData[0]
try:
child.setCheckState(checkState[childName])
except KeyError:
pass
def reset(self):
self.beginResetModel()
self.rootItem = None
self.endResetModel()
|
mretegan/crispy | crispy/version.py | calc_hexversion | python | def calc_hexversion(major=0, minor=0, micro=0, releaselevel='dev', serial=0):
try:
releaselevel = int(releaselevel)
except ValueError:
releaselevel = RELEASE_LEVEL_VALUE.get(releaselevel, 0)
hex_version = int(serial)
hex_version |= releaselevel * 1 << 4
hex_version |= int(micro) * 1 << 8
hex_version |= int(minor) * 1 << 16
hex_version |= int(major) * 1 << 24
return hex_version | Calculate the hexadecimal version number from the tuple version_info:
:param major: integer
:param minor: integer
:param micro: integer
:param relev: integer or string
:param serial: integer
:return: integerm always increasing with revision numbers | train | https://github.com/mretegan/crispy/blob/7e241ac1a48d34ca769f3a6183c430360b5f6725/crispy/version.py#L89-L109 | null | #!/usr/bin/env python
# coding: utf-8
# /*##########################################################################
#
# Copyright (c) 2015-2016 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ###########################################################################*/
"""Unique place where the version number is defined.
provides:
* version = '1.2.3' or '1.2.3-beta4'
* version_info = named tuple (1,2,3,'beta',4)
* hexversion: 0x010203B4
* strictversion = '1.2.3b4
* debianversion = '1.2.3~beta4'
* calc_hexversion: the function to transform a version_tuple into an integer
This is called hexversion since it only really looks meaningful when viewed as the
result of passing it to the built-in hex() function.
The version_info value may be used for a more human-friendly encoding of the same information.
The hexversion is a 32-bit number with the following layout:
Bits (big endian order) Meaning
1-8 PY_MAJOR_VERSION (the 2 in 2.1.0a3)
9-16 PY_MINOR_VERSION (the 1 in 2.1.0a3)
17-24 PY_MICRO_VERSION (the 0 in 2.1.0a3)
25-28 PY_RELEASE_LEVEL (0xA for alpha, 0xB for beta, 0xC for release candidate and 0xF for final)
29-32 PY_RELEASE_SERIAL (the 3 in 2.1.0a3, zero for final releases)
Thus 2.1.0a3 is hexversion 0x020100a3.
"""
from __future__ import absolute_import, print_function, division
__authors__ = ['Jérôme Kieffer']
__license__ = 'MIT'
__date__ = '08/08/2016'
__all__ = ['date', 'version_info', 'strictversion', 'hexversion', 'debianversion', 'calc_hexversion']
RELEASE_LEVEL_VALUE = {'dev': 0,
'alpha': 10,
'beta': 11,
'gamma': 11,
'rc': 12,
'final': 15}
MAJOR = 0
MINOR = 7
MICRO = 3
RELEV = 'dev' # <16
SERIAL = 0 # <16
date = __date__
from collections import namedtuple
_version_info = namedtuple('version_info', ['major', 'minor', 'micro', 'releaselevel', 'serial'])
version_info = _version_info(MAJOR, MINOR, MICRO, RELEV, SERIAL)
strictversion = version = debianversion = '%d.%d.%d' % version_info[:3]
if version_info.releaselevel != 'final':
version += '-%s%s' % version_info[-2:]
debianversion += '~adev%i' % version_info[-1] if RELEV == 'dev' else '~%s%i' % version_info[-2:]
prerel = 'a' if RELEASE_LEVEL_VALUE.get(version_info[3], 0) < 10 else 'b'
if prerel not in 'ab':
prerel = 'a'
strictversion += prerel + str(version_info[-1])
hexversion = calc_hexversion(*version_info)
if __name__ == '__main__':
print(version)
|
mretegan/crispy | crispy/gui/plot.py | MainPlotWidget._contextMenu | python | def _contextMenu(self, pos):
# Create the context menu.
menu = QMenu(self)
menu.addAction(self._zoomBackAction)
# Displaying the context menu at the mouse position requires
# a global position.
# The position received as argument is relative to PlotWidget's
# plot area, and thus needs to be converted.
plotArea = self.getWidgetHandle()
globalPosition = plotArea.mapToGlobal(pos)
menu.exec_(globalPosition) | Handle plot area customContextMenuRequested signal.
:param QPoint pos: Mouse position relative to plot area | train | https://github.com/mretegan/crispy/blob/7e241ac1a48d34ca769f3a6183c430360b5f6725/crispy/gui/plot.py#L247-L262 | null | class MainPlotWidget(BasePlotWidget):
def __init__(self, parent=None, **kwargs):
super(MainPlotWidget, self).__init__(
parent=parent, backend=BackendMatplotlibQt, **kwargs)
# Add a profile toolbar.
self._profileWindow = ProfileWindow()
self._profileToolBar = Profile.ProfileToolBar(
plot=self, profileWindow=self._profileWindow)
self._profileToolBar.actions()[-1].setVisible(False)
self.removeToolBar(self._outputToolBar)
self.addToolBar(self._profileToolBar)
self.addToolBar(self._outputToolBar)
self._outputToolBar.show()
if sys.platform == 'darwin':
self.setIconSize(QSize(24, 24))
# Create QAction for the context menu once for all.
self._zoomBackAction = actions.control.ZoomBackAction(
plot=self, parent=self)
# Retrieve PlotWidget's plot area widget.
plotArea = self.getWidgetHandle()
# Set plot area custom context menu.
plotArea.setContextMenuPolicy(Qt.CustomContextMenu)
plotArea.customContextMenuRequested.connect(self._contextMenu)
# Use the viridis color map by default.
colormap = {'name': 'viridis', 'normalization': 'linear',
'autoscale': True, 'vmin': 0.0, 'vmax': 1.0}
self.setDefaultColormap(colormap)
def closeProfileWindow(self):
self._profileWindow.close()
|
mretegan/crispy | crispy/utils/broaden.py | convolve_fft | python | def convolve_fft(array, kernel):
array = np.asarray(array, dtype=np.complex)
kernel = np.asarray(kernel, dtype=np.complex)
if array.ndim != kernel.ndim:
raise ValueError("Image and kernel must have same number of "
"dimensions")
array_shape = array.shape
kernel_shape = kernel.shape
new_shape = np.array(array_shape) + np.array(kernel_shape)
array_slices = []
kernel_slices = []
for (new_dimsize, array_dimsize, kernel_dimsize) in zip(
new_shape, array_shape, kernel_shape):
center = new_dimsize - (new_dimsize + 1) // 2
array_slices += [slice(center - array_dimsize // 2,
center + (array_dimsize + 1) // 2)]
kernel_slices += [slice(center - kernel_dimsize // 2,
center + (kernel_dimsize + 1) // 2)]
array_slices = tuple(array_slices)
kernel_slices = tuple(kernel_slices)
if not np.all(new_shape == array_shape):
big_array = np.zeros(new_shape, dtype=np.complex)
big_array[array_slices] = array
else:
big_array = array
if not np.all(new_shape == kernel_shape):
big_kernel = np.zeros(new_shape, dtype=np.complex)
big_kernel[kernel_slices] = kernel
else:
big_kernel = kernel
array_fft = np.fft.fftn(big_array)
kernel_fft = np.fft.fftn(np.fft.ifftshift(big_kernel))
rifft = np.fft.ifftn(array_fft * kernel_fft)
return rifft[array_slices].real | Convolve an array with a kernel using FFT.
Implemntation based on the convolve_fft function from astropy.
https://github.com/astropy/astropy/blob/master/astropy/convolution/convolve.py | train | https://github.com/mretegan/crispy/blob/7e241ac1a48d34ca769f3a6183c430360b5f6725/crispy/utils/broaden.py#L65-L114 | null | # coding: utf-8
# /*##########################################################################
#
# Copyright (c) 2016-2017 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ###########################################################################*/
from __future__ import absolute_import, division, unicode_literals
__authors__ = ['Marius Retegan']
__license__ = 'MIT'
__date__ = '04/10/2017'
import numpy as np
MIN_KERNEL_SUM = 1e-8
def gaussian_kernel1d(sigma=None, truncate=6):
size = int(2 * truncate * sigma)
if size % 2 == 0:
size = size + 1
x = np.arange(size)
# print('The size of the kernel is: {}'.format(size))
mu = np.median(x)
# The prefactor 1 / (sigma * np.sqrt(2 * np.pi))
# drops in the normalization.
kernel = np.exp(-0.5 * ((x - mu)**2 / sigma**2))
if kernel.sum() < MIN_KERNEL_SUM:
raise Exception(
'The kernel can\'t be normalized, because its sum is close to '
'zero. The sum of the kernel is < {0}'.format(MIN_KERNEL_SUM))
kernel /= kernel.sum()
return kernel
def gaussian_kernel2d(sigma=None, truncate=(6, 6)):
if sigma.size != 2 or len(truncate) != 2:
raise Exception('Sigma and the truncation parameter don\'t have the '
'required dimenstion.')
kernel_x = gaussian_kernel1d(sigma[0], truncate[0])
kernel_y = gaussian_kernel1d(sigma[1], truncate[1])
kernel = np.outer(kernel_y, kernel_x)
return kernel
def broaden(array, fwhm=None, kind='gaussian'):
if fwhm is None:
return
fwhm = np.array(fwhm)
if (fwhm <= 0).any():
return array
if kind == 'gaussian':
sigma = fwhm / (2 * np.sqrt(2 * np.log(2)))
if fwhm.size == 1:
kernel = gaussian_kernel1d(sigma)
elif fwhm.size == 2:
kernel = gaussian_kernel2d(sigma)
else:
print('Unvailable type of broadening.')
return array
return convolve_fft(array, kernel)
|
mretegan/crispy | crispy/modules/orca/parser.py | Tensor.diagonalize | python | def diagonalize(self):
'''Diagonalize the tensor.'''
self.eigvals, self.eigvecs = np.linalg.eig(
(self.tensor.transpose() + self.tensor) / 2.0)
self.eigvals = np.diag(np.dot(
np.dot(self.eigvecs.transpose(), self.tensor), self.eigvecs)) | Diagonalize the tensor. | train | https://github.com/mretegan/crispy/blob/7e241ac1a48d34ca769f3a6183c430360b5f6725/crispy/modules/orca/parser.py#L61-L66 | null | class Tensor(object):
'''Class for tensor objects'''
def __init__(self, tensor):
self.tensor = tensor
'''Start by diagonalizing the tensor. Next sort the eigenvalues
according to some requirement specific to a particular tensor.
This method is usually overloaded by the subclasses that inherit
from Tensor. Finally calculate all the possible Euler angles.
The rotation matrix resulted from the eigenvectors has to have a
positive determinant in order to be considered a proper rotation.
This can be achived by multipling the columns with -1.'''
self.diagonalize()
self.sort()
self.euler_angles_and_eigenframes()
def sort(self):
'''Sort the eigenvalues and eigenvectors according to the indexes
that result in increasing eigenvalues in absolute value.'''
self.ids = np.argsort(np.abs(self.eigvals))
self.eigvals = self.eigvals[self.ids]
self.eigvecs = self.eigvecs[:, self.ids]
def euler_angles_and_eigenframes(self):
'''Calculate the Euler angles only if the rotation matrix
(eigenframe) has positive determinant.'''
signs = np.array([[1, 1, 1], [-1, 1, 1], [1, -1, 1],
[1, 1, -1], [-1, -1, 1], [-1, 1, -1],
[1, -1, -1], [-1, -1, -1]])
eulangs = []
eigframes = []
for i, sign in enumerate(signs):
eigframe = np.dot(self.eigvecs, np.diag(sign))
if np.linalg.det(eigframe) > 1e-4:
eigframes.append(np.array(eigframe))
eulangs.append(np.array(
transformations.euler_from_matrix(eigframe, axes='szyz')))
self.eigframes = np.array(eigframes)
# The sign has to be inverted to be consistent with ORCA and EasySpin.
self.eulangs = -np.array(eulangs)
|
mretegan/crispy | crispy/modules/orca/parser.py | Tensor.euler_angles_and_eigenframes | python | def euler_angles_and_eigenframes(self):
'''Calculate the Euler angles only if the rotation matrix
(eigenframe) has positive determinant.'''
signs = np.array([[1, 1, 1], [-1, 1, 1], [1, -1, 1],
[1, 1, -1], [-1, -1, 1], [-1, 1, -1],
[1, -1, -1], [-1, -1, -1]])
eulangs = []
eigframes = []
for i, sign in enumerate(signs):
eigframe = np.dot(self.eigvecs, np.diag(sign))
if np.linalg.det(eigframe) > 1e-4:
eigframes.append(np.array(eigframe))
eulangs.append(np.array(
transformations.euler_from_matrix(eigframe, axes='szyz')))
self.eigframes = np.array(eigframes)
# The sign has to be inverted to be consistent with ORCA and EasySpin.
self.eulangs = -np.array(eulangs) | Calculate the Euler angles only if the rotation matrix
(eigenframe) has positive determinant. | train | https://github.com/mretegan/crispy/blob/7e241ac1a48d34ca769f3a6183c430360b5f6725/crispy/modules/orca/parser.py#L75-L92 | null | class Tensor(object):
'''Class for tensor objects'''
def __init__(self, tensor):
self.tensor = tensor
'''Start by diagonalizing the tensor. Next sort the eigenvalues
according to some requirement specific to a particular tensor.
This method is usually overloaded by the subclasses that inherit
from Tensor. Finally calculate all the possible Euler angles.
The rotation matrix resulted from the eigenvectors has to have a
positive determinant in order to be considered a proper rotation.
This can be achived by multipling the columns with -1.'''
self.diagonalize()
self.sort()
self.euler_angles_and_eigenframes()
def diagonalize(self):
'''Diagonalize the tensor.'''
self.eigvals, self.eigvecs = np.linalg.eig(
(self.tensor.transpose() + self.tensor) / 2.0)
self.eigvals = np.diag(np.dot(
np.dot(self.eigvecs.transpose(), self.tensor), self.eigvecs))
def sort(self):
'''Sort the eigenvalues and eigenvectors according to the indexes
that result in increasing eigenvalues in absolute value.'''
self.ids = np.argsort(np.abs(self.eigvals))
self.eigvals = self.eigvals[self.ids]
self.eigvecs = self.eigvecs[:, self.ids]
|
mretegan/crispy | crispy/modules/orca/parser.py | OutputData._skip_lines | python | def _skip_lines(self, n):
'''Skip a number of lines from the output.'''
for i in range(n):
self.line = next(self.output)
return self.line | Skip a number of lines from the output. | train | https://github.com/mretegan/crispy/blob/7e241ac1a48d34ca769f3a6183c430360b5f6725/crispy/modules/orca/parser.py#L151-L155 | null | class OutputData(object):
'''Extract data from the ORCA output.'''
def __init__(self, output):
self.output = open(output, 'r')
self.parse()
def _parse_tensor(self, indices=False):
'''Parse a tensor.'''
if indices:
self.line = self._skip_lines(1)
tensor = np.zeros((3, 3))
for i in range(3):
tokens = self.line.split()
if indices:
tensor[i][0] = float(tokens[1])
tensor[i][1] = float(tokens[2])
tensor[i][2] = float(tokens[3])
else:
tensor[i][0] = float(tokens[0])
tensor[i][1] = float(tokens[1])
tensor[i][2] = float(tokens[2])
self.line = self._skip_lines(1)
return tensor
def _parse_components(self):
tokens = self.line.split()
components = np.zeros((3,))
components[0] = float(tokens[1])
components[1] = float(tokens[2])
components[2] = float(tokens[3])
self.line = self._skip_lines(1)
return components
def parse(self):
'''Iterate over the lines and extract the required data.'''
for self.line in self.output:
# Parse general data: charge, multiplicity, coordinates, etc.
self.index = 0
if self.line[1:13] == 'Total Charge':
tokens = self.line.split()
self.charge = int(tokens[-1])
if (self.line[1:13] or self.line[0:12]) == 'Multiplicity':
tokens = self.line.split()
self.multiplicity = int(tokens[-1])
if self.line[0:33] == 'CARTESIAN COORDINATES (ANGSTROEM)':
if not hasattr(self, 'names'):
self.names = dict()
if not hasattr(self, 'coords'):
self.coords = dict()
self.line = self._skip_lines(2)
names = list()
coords = list()
while self.line.strip():
tokens = self.line.split()
names.append(tokens[0])
x = float(tokens[1])
y = float(tokens[2])
z = float(tokens[3])
coords.append((x, y, z))
self.line = next(self.output)
self.names = np.array(names)
self.coords[self.index] = np.array(coords)
if self.line[22:50] == 'MULLIKEN POPULATION ANALYSIS':
if not hasattr(self, 'populations'):
self.populations = dict()
self.line = self._skip_lines(6)
populations = list()
while self.line.strip() and 'Sum' not in self.line:
tokens = self.line.split()
populations.append((float(tokens[-2]), float(tokens[-1])))
self.line = next(self.output)
self.populations['mulliken'][self.index] = np.array(populations) # noqa
# Parse data from the EPR/NMR module
if self.line[37:44] == 'EPR/NMR':
self.eprnmr = dict()
if self.line[0:19] == 'ELECTRONIC G-MATRIX':
self.line = self._skip_lines(4)
self.eprnmr['g']['tensor'] = self._parse_tensor()
if self.line[0:27] == 'ZERO-FIELD-SPLITTING TENSOR':
self.line = self._skip_lines(4)
self.eprnmr['zfs']['tensor'] = self._parse_tensor()
if self.line[1:8] == 'Nucleus':
tokens = self.line.split()
nucleus = int(re.findall(r'\d+', tokens[1])[0])
while 'Raw HFC' not in self.line:
self.line = self._skip_lines(1)
self.line = self._skip_lines(2)
self.eprnmr['hfc'][nucleus]['tensor'] = self._parse_tensor()
self.line = self._skip_lines(1)
self.eprnmr['hfc'][nucleus]['fc'] = self._parse_components()
self.eprnmr['hfc'][nucleus]['sd'] = self._parse_components()
self.line = self._skip_lines(1)
self.eprnmr['hfc'][nucleus]['orb'] = self._parse_components()
self.eprnmr['hfc'][nucleus]['dia'] = self._parse_components()
# Parse data from the MRCI module
if self.line[36:43] == 'M R C I':
self.mrci = dict()
if self.line[1:19] == 'SPIN-SPIN COUPLING':
self.line = self._skip_lines(4)
self.mrci['zfs']['ssc']['tensor'] = self._parse_tensor()
if self.line[1:30] == '2ND ORDER SPIN-ORBIT COUPLING':
while 'Second' not in self.line:
self.line = self._skip_lines(1)
self.line = self._skip_lines(1)
self.mrci['zfs']['soc']['second_order']['0']['tensor'] = self._parse_tensor() # noqa
self.line = self._skip_lines(2)
self.mrci['zfs']['soc']['second_order']['m']['tensor'] = self._parse_tensor() # noqa
self.line = self._skip_lines(2)
self.mrci['zfs']['soc']['second_order']['p']['tensor'] = self._parse_tensor() # noqa
if self.line[1:42] == 'EFFECTIVE HAMILTONIAN SPIN-ORBIT COUPLING':
self.line = self._skip_lines(4)
self.mrci['zfs']['soc']['heff']['tensor'] = self._parse_tensor() # noqa
|
mretegan/crispy | crispy/modules/orca/parser.py | OutputData._parse_tensor | python | def _parse_tensor(self, indices=False):
'''Parse a tensor.'''
if indices:
self.line = self._skip_lines(1)
tensor = np.zeros((3, 3))
for i in range(3):
tokens = self.line.split()
if indices:
tensor[i][0] = float(tokens[1])
tensor[i][1] = float(tokens[2])
tensor[i][2] = float(tokens[3])
else:
tensor[i][0] = float(tokens[0])
tensor[i][1] = float(tokens[1])
tensor[i][2] = float(tokens[2])
self.line = self._skip_lines(1)
return tensor | Parse a tensor. | train | https://github.com/mretegan/crispy/blob/7e241ac1a48d34ca769f3a6183c430360b5f6725/crispy/modules/orca/parser.py#L157-L174 | null | class OutputData(object):
'''Extract data from the ORCA output.'''
def __init__(self, output):
self.output = open(output, 'r')
self.parse()
def _skip_lines(self, n):
'''Skip a number of lines from the output.'''
for i in range(n):
self.line = next(self.output)
return self.line
def _parse_components(self):
tokens = self.line.split()
components = np.zeros((3,))
components[0] = float(tokens[1])
components[1] = float(tokens[2])
components[2] = float(tokens[3])
self.line = self._skip_lines(1)
return components
def parse(self):
'''Iterate over the lines and extract the required data.'''
for self.line in self.output:
# Parse general data: charge, multiplicity, coordinates, etc.
self.index = 0
if self.line[1:13] == 'Total Charge':
tokens = self.line.split()
self.charge = int(tokens[-1])
if (self.line[1:13] or self.line[0:12]) == 'Multiplicity':
tokens = self.line.split()
self.multiplicity = int(tokens[-1])
if self.line[0:33] == 'CARTESIAN COORDINATES (ANGSTROEM)':
if not hasattr(self, 'names'):
self.names = dict()
if not hasattr(self, 'coords'):
self.coords = dict()
self.line = self._skip_lines(2)
names = list()
coords = list()
while self.line.strip():
tokens = self.line.split()
names.append(tokens[0])
x = float(tokens[1])
y = float(tokens[2])
z = float(tokens[3])
coords.append((x, y, z))
self.line = next(self.output)
self.names = np.array(names)
self.coords[self.index] = np.array(coords)
if self.line[22:50] == 'MULLIKEN POPULATION ANALYSIS':
if not hasattr(self, 'populations'):
self.populations = dict()
self.line = self._skip_lines(6)
populations = list()
while self.line.strip() and 'Sum' not in self.line:
tokens = self.line.split()
populations.append((float(tokens[-2]), float(tokens[-1])))
self.line = next(self.output)
self.populations['mulliken'][self.index] = np.array(populations) # noqa
# Parse data from the EPR/NMR module
if self.line[37:44] == 'EPR/NMR':
self.eprnmr = dict()
if self.line[0:19] == 'ELECTRONIC G-MATRIX':
self.line = self._skip_lines(4)
self.eprnmr['g']['tensor'] = self._parse_tensor()
if self.line[0:27] == 'ZERO-FIELD-SPLITTING TENSOR':
self.line = self._skip_lines(4)
self.eprnmr['zfs']['tensor'] = self._parse_tensor()
if self.line[1:8] == 'Nucleus':
tokens = self.line.split()
nucleus = int(re.findall(r'\d+', tokens[1])[0])
while 'Raw HFC' not in self.line:
self.line = self._skip_lines(1)
self.line = self._skip_lines(2)
self.eprnmr['hfc'][nucleus]['tensor'] = self._parse_tensor()
self.line = self._skip_lines(1)
self.eprnmr['hfc'][nucleus]['fc'] = self._parse_components()
self.eprnmr['hfc'][nucleus]['sd'] = self._parse_components()
self.line = self._skip_lines(1)
self.eprnmr['hfc'][nucleus]['orb'] = self._parse_components()
self.eprnmr['hfc'][nucleus]['dia'] = self._parse_components()
# Parse data from the MRCI module
if self.line[36:43] == 'M R C I':
self.mrci = dict()
if self.line[1:19] == 'SPIN-SPIN COUPLING':
self.line = self._skip_lines(4)
self.mrci['zfs']['ssc']['tensor'] = self._parse_tensor()
if self.line[1:30] == '2ND ORDER SPIN-ORBIT COUPLING':
while 'Second' not in self.line:
self.line = self._skip_lines(1)
self.line = self._skip_lines(1)
self.mrci['zfs']['soc']['second_order']['0']['tensor'] = self._parse_tensor() # noqa
self.line = self._skip_lines(2)
self.mrci['zfs']['soc']['second_order']['m']['tensor'] = self._parse_tensor() # noqa
self.line = self._skip_lines(2)
self.mrci['zfs']['soc']['second_order']['p']['tensor'] = self._parse_tensor() # noqa
if self.line[1:42] == 'EFFECTIVE HAMILTONIAN SPIN-ORBIT COUPLING':
self.line = self._skip_lines(4)
self.mrci['zfs']['soc']['heff']['tensor'] = self._parse_tensor() # noqa
|
mretegan/crispy | crispy/modules/orca/parser.py | OutputData.parse | python | def parse(self):
'''Iterate over the lines and extract the required data.'''
for self.line in self.output:
# Parse general data: charge, multiplicity, coordinates, etc.
self.index = 0
if self.line[1:13] == 'Total Charge':
tokens = self.line.split()
self.charge = int(tokens[-1])
if (self.line[1:13] or self.line[0:12]) == 'Multiplicity':
tokens = self.line.split()
self.multiplicity = int(tokens[-1])
if self.line[0:33] == 'CARTESIAN COORDINATES (ANGSTROEM)':
if not hasattr(self, 'names'):
self.names = dict()
if not hasattr(self, 'coords'):
self.coords = dict()
self.line = self._skip_lines(2)
names = list()
coords = list()
while self.line.strip():
tokens = self.line.split()
names.append(tokens[0])
x = float(tokens[1])
y = float(tokens[2])
z = float(tokens[3])
coords.append((x, y, z))
self.line = next(self.output)
self.names = np.array(names)
self.coords[self.index] = np.array(coords)
if self.line[22:50] == 'MULLIKEN POPULATION ANALYSIS':
if not hasattr(self, 'populations'):
self.populations = dict()
self.line = self._skip_lines(6)
populations = list()
while self.line.strip() and 'Sum' not in self.line:
tokens = self.line.split()
populations.append((float(tokens[-2]), float(tokens[-1])))
self.line = next(self.output)
self.populations['mulliken'][self.index] = np.array(populations) # noqa
# Parse data from the EPR/NMR module
if self.line[37:44] == 'EPR/NMR':
self.eprnmr = dict()
if self.line[0:19] == 'ELECTRONIC G-MATRIX':
self.line = self._skip_lines(4)
self.eprnmr['g']['tensor'] = self._parse_tensor()
if self.line[0:27] == 'ZERO-FIELD-SPLITTING TENSOR':
self.line = self._skip_lines(4)
self.eprnmr['zfs']['tensor'] = self._parse_tensor()
if self.line[1:8] == 'Nucleus':
tokens = self.line.split()
nucleus = int(re.findall(r'\d+', tokens[1])[0])
while 'Raw HFC' not in self.line:
self.line = self._skip_lines(1)
self.line = self._skip_lines(2)
self.eprnmr['hfc'][nucleus]['tensor'] = self._parse_tensor()
self.line = self._skip_lines(1)
self.eprnmr['hfc'][nucleus]['fc'] = self._parse_components()
self.eprnmr['hfc'][nucleus]['sd'] = self._parse_components()
self.line = self._skip_lines(1)
self.eprnmr['hfc'][nucleus]['orb'] = self._parse_components()
self.eprnmr['hfc'][nucleus]['dia'] = self._parse_components()
# Parse data from the MRCI module
if self.line[36:43] == 'M R C I':
self.mrci = dict()
if self.line[1:19] == 'SPIN-SPIN COUPLING':
self.line = self._skip_lines(4)
self.mrci['zfs']['ssc']['tensor'] = self._parse_tensor()
if self.line[1:30] == '2ND ORDER SPIN-ORBIT COUPLING':
while 'Second' not in self.line:
self.line = self._skip_lines(1)
self.line = self._skip_lines(1)
self.mrci['zfs']['soc']['second_order']['0']['tensor'] = self._parse_tensor() # noqa
self.line = self._skip_lines(2)
self.mrci['zfs']['soc']['second_order']['m']['tensor'] = self._parse_tensor() # noqa
self.line = self._skip_lines(2)
self.mrci['zfs']['soc']['second_order']['p']['tensor'] = self._parse_tensor() # noqa
if self.line[1:42] == 'EFFECTIVE HAMILTONIAN SPIN-ORBIT COUPLING':
self.line = self._skip_lines(4)
self.mrci['zfs']['soc']['heff']['tensor'] = self._parse_tensor() | Iterate over the lines and extract the required data. | train | https://github.com/mretegan/crispy/blob/7e241ac1a48d34ca769f3a6183c430360b5f6725/crispy/modules/orca/parser.py#L185-L275 | null | class OutputData(object):
'''Extract data from the ORCA output.'''
def __init__(self, output):
self.output = open(output, 'r')
self.parse()
def _skip_lines(self, n):
'''Skip a number of lines from the output.'''
for i in range(n):
self.line = next(self.output)
return self.line
def _parse_tensor(self, indices=False):
'''Parse a tensor.'''
if indices:
self.line = self._skip_lines(1)
tensor = np.zeros((3, 3))
for i in range(3):
tokens = self.line.split()
if indices:
tensor[i][0] = float(tokens[1])
tensor[i][1] = float(tokens[2])
tensor[i][2] = float(tokens[3])
else:
tensor[i][0] = float(tokens[0])
tensor[i][1] = float(tokens[1])
tensor[i][2] = float(tokens[2])
self.line = self._skip_lines(1)
return tensor
def _parse_components(self):
tokens = self.line.split()
components = np.zeros((3,))
components[0] = float(tokens[1])
components[1] = float(tokens[2])
components[2] = float(tokens[3])
self.line = self._skip_lines(1)
return components
# noqa
|
alvarogzp/telegram-bot-framework | bot/action/util/format.py | UserFormatter.default_format | python | def default_format(self):
user = self.user
if user.first_name is not None:
return self.full_name
elif user.username is not None:
return user.username
else:
return str(user.id) | Returns full name (first and last) if name is available.
If not, returns username if available.
If not available too, returns the user id as a string. | train | https://github.com/alvarogzp/telegram-bot-framework/blob/7b597a415c1901901c677976cb13100fc3083107/bot/action/util/format.py#L31-L43 | null | class UserFormatter:
def __init__(self, user):
self.user = user
@property
@property
def full_name(self):
"""
Returns the first and last name of the user separated by a space.
"""
formatted_user = []
if self.user.first_name is not None:
formatted_user.append(self.user.first_name)
if self.user.last_name is not None:
formatted_user.append(self.user.last_name)
return " ".join(formatted_user)
@property
def username(self):
"""
Returns the username of the user without the '@' (thus, not mentioning them).
If the username is not available, returns an empty string.
"""
return self.user.username if self.user.username is not None else ""
@property
def full_format(self):
"""
Returns the full name (first and last parts), and the username between brackets if the user has it.
If there is no info about the user, returns the user id between < and >.
"""
formatted_user = self.full_name
if self.user.username is not None:
formatted_user += " [" + self.user.username + "]"
if not formatted_user:
formatted_user = self._id()
return formatted_user
@property
def full_data(self):
"""
Returns all the info available for the user in the following format:
name [username] <id> (locale) bot_or_user
If any data is not available, it is not added.
"""
data = [
self.full_name,
self._username(),
self._id(),
self._language_code(),
self._is_bot()
]
return " ".join(filter(None, data))
def _username(self):
if self.user.username:
return "[{username}]".format(username=self.user.username)
def _id(self):
return "<{id}>".format(id=self.user.id)
def _language_code(self):
if self.user.language_code:
return "({language_code})".format(language_code=self.user.language_code)
def _is_bot(self):
if self.user.is_bot is not None:
return "🤖" if self.user.is_bot else "👤"
@staticmethod
def retrieve(user_id, user_storage_handler: UserStorageHandler):
user = user_storage_handler.get(user_id)
return UserFormatter(user)
@classmethod
def retrieve_and_format(cls, user_id, user_storage_handler: UserStorageHandler):
return cls.retrieve(user_id, user_storage_handler).default_format
|
alvarogzp/telegram-bot-framework | bot/action/util/format.py | UserFormatter.full_name | python | def full_name(self):
formatted_user = []
if self.user.first_name is not None:
formatted_user.append(self.user.first_name)
if self.user.last_name is not None:
formatted_user.append(self.user.last_name)
return " ".join(formatted_user) | Returns the first and last name of the user separated by a space. | train | https://github.com/alvarogzp/telegram-bot-framework/blob/7b597a415c1901901c677976cb13100fc3083107/bot/action/util/format.py#L46-L55 | null | class UserFormatter:
def __init__(self, user):
self.user = user
@property
def default_format(self):
"""
Returns full name (first and last) if name is available.
If not, returns username if available.
If not available too, returns the user id as a string.
"""
user = self.user
if user.first_name is not None:
return self.full_name
elif user.username is not None:
return user.username
else:
return str(user.id)
@property
@property
def username(self):
"""
Returns the username of the user without the '@' (thus, not mentioning them).
If the username is not available, returns an empty string.
"""
return self.user.username if self.user.username is not None else ""
@property
def full_format(self):
"""
Returns the full name (first and last parts), and the username between brackets if the user has it.
If there is no info about the user, returns the user id between < and >.
"""
formatted_user = self.full_name
if self.user.username is not None:
formatted_user += " [" + self.user.username + "]"
if not formatted_user:
formatted_user = self._id()
return formatted_user
@property
def full_data(self):
"""
Returns all the info available for the user in the following format:
name [username] <id> (locale) bot_or_user
If any data is not available, it is not added.
"""
data = [
self.full_name,
self._username(),
self._id(),
self._language_code(),
self._is_bot()
]
return " ".join(filter(None, data))
def _username(self):
if self.user.username:
return "[{username}]".format(username=self.user.username)
def _id(self):
return "<{id}>".format(id=self.user.id)
def _language_code(self):
if self.user.language_code:
return "({language_code})".format(language_code=self.user.language_code)
def _is_bot(self):
if self.user.is_bot is not None:
return "🤖" if self.user.is_bot else "👤"
@staticmethod
def retrieve(user_id, user_storage_handler: UserStorageHandler):
user = user_storage_handler.get(user_id)
return UserFormatter(user)
@classmethod
def retrieve_and_format(cls, user_id, user_storage_handler: UserStorageHandler):
return cls.retrieve(user_id, user_storage_handler).default_format
|
alvarogzp/telegram-bot-framework | bot/action/util/format.py | UserFormatter.full_format | python | def full_format(self):
formatted_user = self.full_name
if self.user.username is not None:
formatted_user += " [" + self.user.username + "]"
if not formatted_user:
formatted_user = self._id()
return formatted_user | Returns the full name (first and last parts), and the username between brackets if the user has it.
If there is no info about the user, returns the user id between < and >. | train | https://github.com/alvarogzp/telegram-bot-framework/blob/7b597a415c1901901c677976cb13100fc3083107/bot/action/util/format.py#L66-L76 | [
"def _id(self):\n return \"<{id}>\".format(id=self.user.id)\n"
] | class UserFormatter:
def __init__(self, user):
self.user = user
@property
def default_format(self):
"""
Returns full name (first and last) if name is available.
If not, returns username if available.
If not available too, returns the user id as a string.
"""
user = self.user
if user.first_name is not None:
return self.full_name
elif user.username is not None:
return user.username
else:
return str(user.id)
@property
def full_name(self):
"""
Returns the first and last name of the user separated by a space.
"""
formatted_user = []
if self.user.first_name is not None:
formatted_user.append(self.user.first_name)
if self.user.last_name is not None:
formatted_user.append(self.user.last_name)
return " ".join(formatted_user)
@property
def username(self):
"""
Returns the username of the user without the '@' (thus, not mentioning them).
If the username is not available, returns an empty string.
"""
return self.user.username if self.user.username is not None else ""
@property
@property
def full_data(self):
"""
Returns all the info available for the user in the following format:
name [username] <id> (locale) bot_or_user
If any data is not available, it is not added.
"""
data = [
self.full_name,
self._username(),
self._id(),
self._language_code(),
self._is_bot()
]
return " ".join(filter(None, data))
def _username(self):
if self.user.username:
return "[{username}]".format(username=self.user.username)
def _id(self):
return "<{id}>".format(id=self.user.id)
def _language_code(self):
if self.user.language_code:
return "({language_code})".format(language_code=self.user.language_code)
def _is_bot(self):
if self.user.is_bot is not None:
return "🤖" if self.user.is_bot else "👤"
@staticmethod
def retrieve(user_id, user_storage_handler: UserStorageHandler):
user = user_storage_handler.get(user_id)
return UserFormatter(user)
@classmethod
def retrieve_and_format(cls, user_id, user_storage_handler: UserStorageHandler):
return cls.retrieve(user_id, user_storage_handler).default_format
|
alvarogzp/telegram-bot-framework | bot/action/util/format.py | UserFormatter.full_data | python | def full_data(self):
data = [
self.full_name,
self._username(),
self._id(),
self._language_code(),
self._is_bot()
]
return " ".join(filter(None, data)) | Returns all the info available for the user in the following format:
name [username] <id> (locale) bot_or_user
If any data is not available, it is not added. | train | https://github.com/alvarogzp/telegram-bot-framework/blob/7b597a415c1901901c677976cb13100fc3083107/bot/action/util/format.py#L79-L92 | [
"def _username(self):\n if self.user.username:\n return \"[{username}]\".format(username=self.user.username)\n",
"def _id(self):\n return \"<{id}>\".format(id=self.user.id)\n",
"def _language_code(self):\n if self.user.language_code:\n return \"({language_code})\".format(language_code=self.user.language_code)\n",
"def _is_bot(self):\n if self.user.is_bot is not None:\n return \"🤖\" if self.user.is_bot else \"👤\"\n"
] | class UserFormatter:
def __init__(self, user):
self.user = user
@property
def default_format(self):
"""
Returns full name (first and last) if name is available.
If not, returns username if available.
If not available too, returns the user id as a string.
"""
user = self.user
if user.first_name is not None:
return self.full_name
elif user.username is not None:
return user.username
else:
return str(user.id)
@property
def full_name(self):
"""
Returns the first and last name of the user separated by a space.
"""
formatted_user = []
if self.user.first_name is not None:
formatted_user.append(self.user.first_name)
if self.user.last_name is not None:
formatted_user.append(self.user.last_name)
return " ".join(formatted_user)
@property
def username(self):
"""
Returns the username of the user without the '@' (thus, not mentioning them).
If the username is not available, returns an empty string.
"""
return self.user.username if self.user.username is not None else ""
@property
def full_format(self):
"""
Returns the full name (first and last parts), and the username between brackets if the user has it.
If there is no info about the user, returns the user id between < and >.
"""
formatted_user = self.full_name
if self.user.username is not None:
formatted_user += " [" + self.user.username + "]"
if not formatted_user:
formatted_user = self._id()
return formatted_user
@property
def _username(self):
if self.user.username:
return "[{username}]".format(username=self.user.username)
def _id(self):
return "<{id}>".format(id=self.user.id)
def _language_code(self):
if self.user.language_code:
return "({language_code})".format(language_code=self.user.language_code)
def _is_bot(self):
if self.user.is_bot is not None:
return "🤖" if self.user.is_bot else "👤"
@staticmethod
def retrieve(user_id, user_storage_handler: UserStorageHandler):
user = user_storage_handler.get(user_id)
return UserFormatter(user)
@classmethod
def retrieve_and_format(cls, user_id, user_storage_handler: UserStorageHandler):
return cls.retrieve(user_id, user_storage_handler).default_format
|
alvarogzp/telegram-bot-framework | bot/action/util/format.py | ChatFormatter.full_data | python | def full_data(self):
data = [
self.chat.title,
self._username(),
self._type(),
self._id()
]
return " ".join(filter(None, data)) | Returns all the info available for the chat in the following format:
title [username] (type) <id>
If any data is not available, it is not added. | train | https://github.com/alvarogzp/telegram-bot-framework/blob/7b597a415c1901901c677976cb13100fc3083107/bot/action/util/format.py#L124-L136 | [
"def _id(self):\n return \"<{id}>\".format(id=self.chat.id)\n",
"def _type(self):\n return \"({type})\".format(type=self.chat.type)\n",
"def _username(self):\n username = self.chat.username\n if username:\n return \"[{username}]\".format(username=username)\n"
] | class ChatFormatter:
def __init__(self, chat):
self.chat = chat
@property
def _id(self):
return "<{id}>".format(id=self.chat.id)
def _type(self):
return "({type})".format(type=self.chat.type)
def _username(self):
username = self.chat.username
if username:
return "[{username}]".format(username=username)
@staticmethod
def format_group_or_type(chat):
if GroupFormatter.is_group(chat):
return GroupFormatter.format(chat)
else:
return "<" + chat.type + ">"
@staticmethod
def format_group_or_user(chat):
if GroupFormatter.is_group(chat):
return GroupFormatter.format(chat)
else:
return UserFormatter(chat).full_format
|
alvarogzp/telegram-bot-framework | bot/multithreading/scheduler.py | SchedulerApi.set_callbacks | python | def set_callbacks(self, worker_start_callback: callable, worker_end_callback: callable, are_async: bool = False):
# We are setting self.worker_start_callback and self.worker_end_callback
# to lambdas instead of saving them in private vars and moving the lambda logic
# to a member function for, among other reasons, making callback updates atomic,
# ie. once a callback has been posted, it will be executed as it was in that
# moment, any call to set_callbacks will only affect callbacks posted since they
# were updated, but not to any pending callback.
# If callback is async, execute the start callback in the calling thread
scheduler = self.immediate if are_async else self.background
self.worker_start_callback = lambda worker: scheduler(Work(
lambda: worker_start_callback(worker), "worker_start_callback:" + worker.name
))
# As the end callback is called *just* before the thread dies,
# there is no problem running it on the thread
self.worker_end_callback = lambda worker: self.immediate(Work(
lambda: worker_end_callback(worker), "worker_end_callback:" + worker.name
)) | :param are_async: True if the callbacks execute asynchronously, posting any heavy work to another thread. | train | https://github.com/alvarogzp/telegram-bot-framework/blob/7b597a415c1901901c677976cb13100fc3083107/bot/multithreading/scheduler.py#L59-L80 | null | class SchedulerApi:
def __init__(self, max_network_workers: int, worker_error_handler: callable, worker_start_callback: callable, worker_end_callback: callable):
self.worker_error_handler = worker_error_handler
# Defining here to avoid IDE from complaining about defining variables outside __init__
self.worker_start_callback = worker_start_callback
self.worker_end_callback = worker_end_callback
# Set the real callbacks
self.set_callbacks(worker_start_callback, worker_end_callback)
# This list is modified by multiple threads, and although lists shouldn't go corrupt
# (https://stackoverflow.com/questions/6319207/are-lists-thread-safe)
# we are going to play safe by protecting all access and modifications to it with a lock.
self.running_workers = []
self.running_workers_lock = threading.Lock()
# Worker pools should only be launched from main thread, so no locking is needed here.
self.worker_pools = []
self.running = False
self.immediate_worker = ImmediateWorker(worker_error_handler)
self._network_worker = self._new_worker_pool(
"network",
min_workers=0,
max_workers=max_network_workers,
max_seconds_idle=DEFAULT_WORKER_POOL_MAX_SECONDS_IDLE
)
self._io_worker = self._new_worker_pool(
"io", min_workers=0, max_workers=1, max_seconds_idle=WORKER_POOL_KEEP_WORKERS_FOREVER
)
self._background_worker = self._new_worker_pool(
"background", min_workers=0, max_workers=1, max_seconds_idle=DEFAULT_WORKER_POOL_MAX_SECONDS_IDLE
)
def _new_worker(self, name: str):
return QueueWorker(name, queue.Queue(), self.worker_error_handler)
def _new_worker_pool(self, name: str, min_workers: int, max_workers: int, max_seconds_idle: Union[int, None]):
return QueueWorkerPool(name, queue.Queue(), self.worker_error_handler, self._start_worker,
min_workers, max_workers, max_seconds_idle)
def setup(self):
self._start_worker_pool(self._network_worker)
self._start_worker_pool(self._io_worker)
self._start_worker_pool(self._background_worker)
self.running = True
def _start_worker(self, worker: Worker):
"""
Can be safely called multiple times on the same worker (for workers that support it)
to start a new thread for it.
"""
# This function is called from main thread and from worker pools threads to start their children threads
with self.running_workers_lock:
self.running_workers.append(worker)
thread = SchedulerThread(worker, self._worker_ended)
thread.start()
# This may or may not be posted to a background thread (see set_callbacks)
self.worker_start_callback(worker)
def _start_worker_pool(self, worker: QueueWorkerPool):
self.worker_pools.append(worker)
worker.start()
def _worker_ended(self, worker: Worker):
# This function is called from worker threads
with self.running_workers_lock:
self.running_workers.remove(worker)
# This is executed on the same thread (see set_callbacks)
self.worker_end_callback(worker)
def network(self, work: Work):
self.network_worker.post(work)
def io(self, work: Work):
self.io_worker.post(work)
def background(self, work: Work):
self.background_worker.post(work)
def immediate(self, work: Work):
self.immediate_worker.post(work)
@property
def network_worker(self):
return self._get_worker(self._network_worker)
@property
def io_worker(self):
return self._get_worker(self._io_worker)
@property
def background_worker(self):
return self._get_worker(self._background_worker)
def _get_worker(self, worker: Worker):
if not self.running:
return self.immediate_worker
return worker
def new_worker(self, name: str):
"""Creates a new Worker and start a new Thread with it. Returns the Worker."""
if not self.running:
return self.immediate_worker
worker = self._new_worker(name)
self._start_worker(worker)
return worker
def new_worker_pool(self, name: str, min_workers: int = 0, max_workers: int = 1,
max_seconds_idle: int = DEFAULT_WORKER_POOL_MAX_SECONDS_IDLE):
"""
Creates a new worker pool and starts it.
Returns the Worker that schedules works to the pool.
"""
if not self.running:
return self.immediate_worker
worker = self._new_worker_pool(name, min_workers, max_workers, max_seconds_idle)
self._start_worker_pool(worker)
return worker
def get_running_workers(self):
with self.running_workers_lock:
# return a copy to avoid concurrent modifications problems by other threads modifications to the list
return self.running_workers[:]
def get_worker_pools(self):
return self.worker_pools
def shutdown(self):
# first wait for all worker pools to be idle
for worker in self.get_worker_pools():
worker.shutdown()
# now wait for all active workers to be idle
# first, because there may be workers not running on a worker pool
# and second, in case any pending work in a worker pool posted a
# new work on another worker (pool), that way we wait for it to end too
for worker in self.get_running_workers():
worker.shutdown()
|
alvarogzp/telegram-bot-framework | bot/multithreading/scheduler.py | SchedulerApi._start_worker | python | def _start_worker(self, worker: Worker):
# This function is called from main thread and from worker pools threads to start their children threads
with self.running_workers_lock:
self.running_workers.append(worker)
thread = SchedulerThread(worker, self._worker_ended)
thread.start()
# This may or may not be posted to a background thread (see set_callbacks)
self.worker_start_callback(worker) | Can be safely called multiple times on the same worker (for workers that support it)
to start a new thread for it. | train | https://github.com/alvarogzp/telegram-bot-framework/blob/7b597a415c1901901c677976cb13100fc3083107/bot/multithreading/scheduler.py#L95-L106 | [
"def start(self):\n thread = threading.Thread(name=self.worker.name, target=self.run)\n thread.daemon = True\n thread.start()\n"
] | class SchedulerApi:
def __init__(self, max_network_workers: int, worker_error_handler: callable, worker_start_callback: callable, worker_end_callback: callable):
self.worker_error_handler = worker_error_handler
# Defining here to avoid IDE from complaining about defining variables outside __init__
self.worker_start_callback = worker_start_callback
self.worker_end_callback = worker_end_callback
# Set the real callbacks
self.set_callbacks(worker_start_callback, worker_end_callback)
# This list is modified by multiple threads, and although lists shouldn't go corrupt
# (https://stackoverflow.com/questions/6319207/are-lists-thread-safe)
# we are going to play safe by protecting all access and modifications to it with a lock.
self.running_workers = []
self.running_workers_lock = threading.Lock()
# Worker pools should only be launched from main thread, so no locking is needed here.
self.worker_pools = []
self.running = False
self.immediate_worker = ImmediateWorker(worker_error_handler)
self._network_worker = self._new_worker_pool(
"network",
min_workers=0,
max_workers=max_network_workers,
max_seconds_idle=DEFAULT_WORKER_POOL_MAX_SECONDS_IDLE
)
self._io_worker = self._new_worker_pool(
"io", min_workers=0, max_workers=1, max_seconds_idle=WORKER_POOL_KEEP_WORKERS_FOREVER
)
self._background_worker = self._new_worker_pool(
"background", min_workers=0, max_workers=1, max_seconds_idle=DEFAULT_WORKER_POOL_MAX_SECONDS_IDLE
)
def set_callbacks(self, worker_start_callback: callable, worker_end_callback: callable, are_async: bool = False):
"""
:param are_async: True if the callbacks execute asynchronously, posting any heavy work to another thread.
"""
# We are setting self.worker_start_callback and self.worker_end_callback
# to lambdas instead of saving them in private vars and moving the lambda logic
# to a member function for, among other reasons, making callback updates atomic,
# ie. once a callback has been posted, it will be executed as it was in that
# moment, any call to set_callbacks will only affect callbacks posted since they
# were updated, but not to any pending callback.
# If callback is async, execute the start callback in the calling thread
scheduler = self.immediate if are_async else self.background
self.worker_start_callback = lambda worker: scheduler(Work(
lambda: worker_start_callback(worker), "worker_start_callback:" + worker.name
))
# As the end callback is called *just* before the thread dies,
# there is no problem running it on the thread
self.worker_end_callback = lambda worker: self.immediate(Work(
lambda: worker_end_callback(worker), "worker_end_callback:" + worker.name
))
def _new_worker(self, name: str):
return QueueWorker(name, queue.Queue(), self.worker_error_handler)
def _new_worker_pool(self, name: str, min_workers: int, max_workers: int, max_seconds_idle: Union[int, None]):
return QueueWorkerPool(name, queue.Queue(), self.worker_error_handler, self._start_worker,
min_workers, max_workers, max_seconds_idle)
def setup(self):
self._start_worker_pool(self._network_worker)
self._start_worker_pool(self._io_worker)
self._start_worker_pool(self._background_worker)
self.running = True
def _start_worker_pool(self, worker: QueueWorkerPool):
self.worker_pools.append(worker)
worker.start()
def _worker_ended(self, worker: Worker):
# This function is called from worker threads
with self.running_workers_lock:
self.running_workers.remove(worker)
# This is executed on the same thread (see set_callbacks)
self.worker_end_callback(worker)
def network(self, work: Work):
self.network_worker.post(work)
def io(self, work: Work):
self.io_worker.post(work)
def background(self, work: Work):
self.background_worker.post(work)
def immediate(self, work: Work):
self.immediate_worker.post(work)
@property
def network_worker(self):
return self._get_worker(self._network_worker)
@property
def io_worker(self):
return self._get_worker(self._io_worker)
@property
def background_worker(self):
return self._get_worker(self._background_worker)
def _get_worker(self, worker: Worker):
if not self.running:
return self.immediate_worker
return worker
def new_worker(self, name: str):
"""Creates a new Worker and start a new Thread with it. Returns the Worker."""
if not self.running:
return self.immediate_worker
worker = self._new_worker(name)
self._start_worker(worker)
return worker
def new_worker_pool(self, name: str, min_workers: int = 0, max_workers: int = 1,
max_seconds_idle: int = DEFAULT_WORKER_POOL_MAX_SECONDS_IDLE):
"""
Creates a new worker pool and starts it.
Returns the Worker that schedules works to the pool.
"""
if not self.running:
return self.immediate_worker
worker = self._new_worker_pool(name, min_workers, max_workers, max_seconds_idle)
self._start_worker_pool(worker)
return worker
def get_running_workers(self):
with self.running_workers_lock:
# return a copy to avoid concurrent modifications problems by other threads modifications to the list
return self.running_workers[:]
def get_worker_pools(self):
return self.worker_pools
def shutdown(self):
# first wait for all worker pools to be idle
for worker in self.get_worker_pools():
worker.shutdown()
# now wait for all active workers to be idle
# first, because there may be workers not running on a worker pool
# and second, in case any pending work in a worker pool posted a
# new work on another worker (pool), that way we wait for it to end too
for worker in self.get_running_workers():
worker.shutdown()
|
alvarogzp/telegram-bot-framework | bot/multithreading/scheduler.py | SchedulerApi.new_worker | python | def new_worker(self, name: str):
if not self.running:
return self.immediate_worker
worker = self._new_worker(name)
self._start_worker(worker)
return worker | Creates a new Worker and start a new Thread with it. Returns the Worker. | train | https://github.com/alvarogzp/telegram-bot-framework/blob/7b597a415c1901901c677976cb13100fc3083107/bot/multithreading/scheduler.py#L148-L154 | [
"def _new_worker(self, name: str):\n return QueueWorker(name, queue.Queue(), self.worker_error_handler)\n",
"def _start_worker(self, worker: Worker):\n \"\"\"\n Can be safely called multiple times on the same worker (for workers that support it)\n to start a new thread for it.\n \"\"\"\n # This function is called from main thread and from worker pools threads to start their children threads\n with self.running_workers_lock:\n self.running_workers.append(worker)\n thread = SchedulerThread(worker, self._worker_ended)\n thread.start()\n # This may or may not be posted to a background thread (see set_callbacks)\n self.worker_start_callback(worker)\n"
] | class SchedulerApi:
def __init__(self, max_network_workers: int, worker_error_handler: callable, worker_start_callback: callable, worker_end_callback: callable):
self.worker_error_handler = worker_error_handler
# Defining here to avoid IDE from complaining about defining variables outside __init__
self.worker_start_callback = worker_start_callback
self.worker_end_callback = worker_end_callback
# Set the real callbacks
self.set_callbacks(worker_start_callback, worker_end_callback)
# This list is modified by multiple threads, and although lists shouldn't go corrupt
# (https://stackoverflow.com/questions/6319207/are-lists-thread-safe)
# we are going to play safe by protecting all access and modifications to it with a lock.
self.running_workers = []
self.running_workers_lock = threading.Lock()
# Worker pools should only be launched from main thread, so no locking is needed here.
self.worker_pools = []
self.running = False
self.immediate_worker = ImmediateWorker(worker_error_handler)
self._network_worker = self._new_worker_pool(
"network",
min_workers=0,
max_workers=max_network_workers,
max_seconds_idle=DEFAULT_WORKER_POOL_MAX_SECONDS_IDLE
)
self._io_worker = self._new_worker_pool(
"io", min_workers=0, max_workers=1, max_seconds_idle=WORKER_POOL_KEEP_WORKERS_FOREVER
)
self._background_worker = self._new_worker_pool(
"background", min_workers=0, max_workers=1, max_seconds_idle=DEFAULT_WORKER_POOL_MAX_SECONDS_IDLE
)
def set_callbacks(self, worker_start_callback: callable, worker_end_callback: callable, are_async: bool = False):
"""
:param are_async: True if the callbacks execute asynchronously, posting any heavy work to another thread.
"""
# We are setting self.worker_start_callback and self.worker_end_callback
# to lambdas instead of saving them in private vars and moving the lambda logic
# to a member function for, among other reasons, making callback updates atomic,
# ie. once a callback has been posted, it will be executed as it was in that
# moment, any call to set_callbacks will only affect callbacks posted since they
# were updated, but not to any pending callback.
# If callback is async, execute the start callback in the calling thread
scheduler = self.immediate if are_async else self.background
self.worker_start_callback = lambda worker: scheduler(Work(
lambda: worker_start_callback(worker), "worker_start_callback:" + worker.name
))
# As the end callback is called *just* before the thread dies,
# there is no problem running it on the thread
self.worker_end_callback = lambda worker: self.immediate(Work(
lambda: worker_end_callback(worker), "worker_end_callback:" + worker.name
))
def _new_worker(self, name: str):
return QueueWorker(name, queue.Queue(), self.worker_error_handler)
def _new_worker_pool(self, name: str, min_workers: int, max_workers: int, max_seconds_idle: Union[int, None]):
return QueueWorkerPool(name, queue.Queue(), self.worker_error_handler, self._start_worker,
min_workers, max_workers, max_seconds_idle)
def setup(self):
self._start_worker_pool(self._network_worker)
self._start_worker_pool(self._io_worker)
self._start_worker_pool(self._background_worker)
self.running = True
def _start_worker(self, worker: Worker):
"""
Can be safely called multiple times on the same worker (for workers that support it)
to start a new thread for it.
"""
# This function is called from main thread and from worker pools threads to start their children threads
with self.running_workers_lock:
self.running_workers.append(worker)
thread = SchedulerThread(worker, self._worker_ended)
thread.start()
# This may or may not be posted to a background thread (see set_callbacks)
self.worker_start_callback(worker)
def _start_worker_pool(self, worker: QueueWorkerPool):
self.worker_pools.append(worker)
worker.start()
def _worker_ended(self, worker: Worker):
# This function is called from worker threads
with self.running_workers_lock:
self.running_workers.remove(worker)
# This is executed on the same thread (see set_callbacks)
self.worker_end_callback(worker)
def network(self, work: Work):
self.network_worker.post(work)
def io(self, work: Work):
self.io_worker.post(work)
def background(self, work: Work):
self.background_worker.post(work)
def immediate(self, work: Work):
self.immediate_worker.post(work)
@property
def network_worker(self):
return self._get_worker(self._network_worker)
@property
def io_worker(self):
return self._get_worker(self._io_worker)
@property
def background_worker(self):
return self._get_worker(self._background_worker)
def _get_worker(self, worker: Worker):
if not self.running:
return self.immediate_worker
return worker
def new_worker_pool(self, name: str, min_workers: int = 0, max_workers: int = 1,
max_seconds_idle: int = DEFAULT_WORKER_POOL_MAX_SECONDS_IDLE):
"""
Creates a new worker pool and starts it.
Returns the Worker that schedules works to the pool.
"""
if not self.running:
return self.immediate_worker
worker = self._new_worker_pool(name, min_workers, max_workers, max_seconds_idle)
self._start_worker_pool(worker)
return worker
def get_running_workers(self):
with self.running_workers_lock:
# return a copy to avoid concurrent modifications problems by other threads modifications to the list
return self.running_workers[:]
def get_worker_pools(self):
return self.worker_pools
def shutdown(self):
# first wait for all worker pools to be idle
for worker in self.get_worker_pools():
worker.shutdown()
# now wait for all active workers to be idle
# first, because there may be workers not running on a worker pool
# and second, in case any pending work in a worker pool posted a
# new work on another worker (pool), that way we wait for it to end too
for worker in self.get_running_workers():
worker.shutdown()
|
alvarogzp/telegram-bot-framework | bot/multithreading/scheduler.py | SchedulerApi.new_worker_pool | python | def new_worker_pool(self, name: str, min_workers: int = 0, max_workers: int = 1,
max_seconds_idle: int = DEFAULT_WORKER_POOL_MAX_SECONDS_IDLE):
if not self.running:
return self.immediate_worker
worker = self._new_worker_pool(name, min_workers, max_workers, max_seconds_idle)
self._start_worker_pool(worker)
return worker | Creates a new worker pool and starts it.
Returns the Worker that schedules works to the pool. | train | https://github.com/alvarogzp/telegram-bot-framework/blob/7b597a415c1901901c677976cb13100fc3083107/bot/multithreading/scheduler.py#L156-L166 | [
"def _new_worker_pool(self, name: str, min_workers: int, max_workers: int, max_seconds_idle: Union[int, None]):\n return QueueWorkerPool(name, queue.Queue(), self.worker_error_handler, self._start_worker,\n min_workers, max_workers, max_seconds_idle)\n",
"def _start_worker_pool(self, worker: QueueWorkerPool):\n self.worker_pools.append(worker)\n worker.start()\n"
] | class SchedulerApi:
def __init__(self, max_network_workers: int, worker_error_handler: callable, worker_start_callback: callable, worker_end_callback: callable):
self.worker_error_handler = worker_error_handler
# Defining here to avoid IDE from complaining about defining variables outside __init__
self.worker_start_callback = worker_start_callback
self.worker_end_callback = worker_end_callback
# Set the real callbacks
self.set_callbacks(worker_start_callback, worker_end_callback)
# This list is modified by multiple threads, and although lists shouldn't go corrupt
# (https://stackoverflow.com/questions/6319207/are-lists-thread-safe)
# we are going to play safe by protecting all access and modifications to it with a lock.
self.running_workers = []
self.running_workers_lock = threading.Lock()
# Worker pools should only be launched from main thread, so no locking is needed here.
self.worker_pools = []
self.running = False
self.immediate_worker = ImmediateWorker(worker_error_handler)
self._network_worker = self._new_worker_pool(
"network",
min_workers=0,
max_workers=max_network_workers,
max_seconds_idle=DEFAULT_WORKER_POOL_MAX_SECONDS_IDLE
)
self._io_worker = self._new_worker_pool(
"io", min_workers=0, max_workers=1, max_seconds_idle=WORKER_POOL_KEEP_WORKERS_FOREVER
)
self._background_worker = self._new_worker_pool(
"background", min_workers=0, max_workers=1, max_seconds_idle=DEFAULT_WORKER_POOL_MAX_SECONDS_IDLE
)
def set_callbacks(self, worker_start_callback: callable, worker_end_callback: callable, are_async: bool = False):
"""
:param are_async: True if the callbacks execute asynchronously, posting any heavy work to another thread.
"""
# We are setting self.worker_start_callback and self.worker_end_callback
# to lambdas instead of saving them in private vars and moving the lambda logic
# to a member function for, among other reasons, making callback updates atomic,
# ie. once a callback has been posted, it will be executed as it was in that
# moment, any call to set_callbacks will only affect callbacks posted since they
# were updated, but not to any pending callback.
# If callback is async, execute the start callback in the calling thread
scheduler = self.immediate if are_async else self.background
self.worker_start_callback = lambda worker: scheduler(Work(
lambda: worker_start_callback(worker), "worker_start_callback:" + worker.name
))
# As the end callback is called *just* before the thread dies,
# there is no problem running it on the thread
self.worker_end_callback = lambda worker: self.immediate(Work(
lambda: worker_end_callback(worker), "worker_end_callback:" + worker.name
))
def _new_worker(self, name: str):
return QueueWorker(name, queue.Queue(), self.worker_error_handler)
def _new_worker_pool(self, name: str, min_workers: int, max_workers: int, max_seconds_idle: Union[int, None]):
return QueueWorkerPool(name, queue.Queue(), self.worker_error_handler, self._start_worker,
min_workers, max_workers, max_seconds_idle)
def setup(self):
self._start_worker_pool(self._network_worker)
self._start_worker_pool(self._io_worker)
self._start_worker_pool(self._background_worker)
self.running = True
def _start_worker(self, worker: Worker):
"""
Can be safely called multiple times on the same worker (for workers that support it)
to start a new thread for it.
"""
# This function is called from main thread and from worker pools threads to start their children threads
with self.running_workers_lock:
self.running_workers.append(worker)
thread = SchedulerThread(worker, self._worker_ended)
thread.start()
# This may or may not be posted to a background thread (see set_callbacks)
self.worker_start_callback(worker)
def _start_worker_pool(self, worker: QueueWorkerPool):
self.worker_pools.append(worker)
worker.start()
def _worker_ended(self, worker: Worker):
# This function is called from worker threads
with self.running_workers_lock:
self.running_workers.remove(worker)
# This is executed on the same thread (see set_callbacks)
self.worker_end_callback(worker)
def network(self, work: Work):
self.network_worker.post(work)
def io(self, work: Work):
self.io_worker.post(work)
def background(self, work: Work):
self.background_worker.post(work)
def immediate(self, work: Work):
self.immediate_worker.post(work)
@property
def network_worker(self):
return self._get_worker(self._network_worker)
@property
def io_worker(self):
return self._get_worker(self._io_worker)
@property
def background_worker(self):
return self._get_worker(self._background_worker)
def _get_worker(self, worker: Worker):
if not self.running:
return self.immediate_worker
return worker
def new_worker(self, name: str):
"""Creates a new Worker and start a new Thread with it. Returns the Worker."""
if not self.running:
return self.immediate_worker
worker = self._new_worker(name)
self._start_worker(worker)
return worker
def get_running_workers(self):
with self.running_workers_lock:
# return a copy to avoid concurrent modifications problems by other threads modifications to the list
return self.running_workers[:]
def get_worker_pools(self):
return self.worker_pools
def shutdown(self):
# first wait for all worker pools to be idle
for worker in self.get_worker_pools():
worker.shutdown()
# now wait for all active workers to be idle
# first, because there may be workers not running on a worker pool
# and second, in case any pending work in a worker pool posted a
# new work on another worker (pool), that way we wait for it to end too
for worker in self.get_running_workers():
worker.shutdown()
|
alvarogzp/telegram-bot-framework | bot/multithreading/worker/pool/workers/limited_lifespan.py | LimitedLifespanQueueWorker._get_and_execute | python | def _get_and_execute(self):
try:
work = self.queue.get(timeout=self.max_seconds_idle)
except queue.Empty:
# max_seconds_idle has been exhausted, exiting
self.end_notify()
return False
else:
self._work(work)
self.queue.task_done()
return True | :return: True if it should continue running, False if it should end its execution. | train | https://github.com/alvarogzp/telegram-bot-framework/blob/7b597a415c1901901c677976cb13100fc3083107/bot/multithreading/worker/pool/workers/limited_lifespan.py#L21-L34 | [
"def _work(self, work: Work):\n try:\n self._do_work(work)\n except BaseException as e:\n self._error(e, work)\n"
] | class LimitedLifespanQueueWorker(QueueWorker):
def __init__(self, name: str, work_queue: queue.Queue, error_handler: callable, max_seconds_idle: int,
end_notify: callable):
"""
:param max_seconds_idle: Max seconds to wait for a new work to appear before ending the execution.
If it is None, it behaves as a QueueWorker, waiting forever.
"""
super().__init__(name, work_queue, error_handler)
self.max_seconds_idle = max_seconds_idle
self.end_notify = end_notify
def run(self):
while self._get_and_execute():
pass
|
alvarogzp/telegram-bot-framework | bot/action/standard/info/formatter/chat.py | ChatInfoFormatter.format | python | def format(self, full_info: bool = False):
chat = self.api_object
if full_info:
self.__format_full(chat)
else:
self.__format_simple(chat) | :param full_info: If True, adds more info about the chat. Please, note that this additional info requires
to make up to THREE synchronous api calls. | train | https://github.com/alvarogzp/telegram-bot-framework/blob/7b597a415c1901901c677976cb13100fc3083107/bot/action/standard/info/formatter/chat.py#L20-L29 | [
"def __format_full(self, chat: ApiObject):\n chat = self.api.getChat(chat_id=chat.id)\n description = self._text(chat.description)\n invite_link = self._invite_link(chat.invite_link)\n pinned_message = self._pinned_message(chat.pinned_message)\n sticker_set_name = self._group_sticker_set(chat.sticker_set_name)\n member_count = self.api.getChatMembersCount(chat_id=chat.id)\n self.__format_simple(chat)\n self._add_info(\"Description\", description)\n self._add_info(\"Invite link\", invite_link)\n self._add_info(\"Pinned message\", pinned_message)\n self._add_info(\"Group sticker set\", sticker_set_name)\n self._add_info(\"Members\", member_count)\n if chat.type not in (CHAT_TYPE_PRIVATE, CHAT_TYPE_CHANNEL):\n admins = self._get_admins(chat)\n admin_count = len(admins)\n me_admin = self._yes_no(self._is_admin(self.bot_user, admins))\n you_admin = self._yes_no(self._is_admin(self.user, admins))\n self._add_info(\"Admins\", admin_count, additional_text=\"(not counting other bots)\")\n self._add_info(\"Am I admin\", me_admin, separator=\"?\")\n self._add_info(\"Are you admin\", you_admin, separator=\"?\")\n",
"def __format_simple(self, chat: ApiObject):\n full_data = ChatFormatter(chat).full_data\n title = self._text(chat.title)\n username = self._username(chat.username)\n _type = chat.type\n _id = chat.id\n self._add_title(full_data)\n self._add_empty()\n self._add_info(\"Title\", title)\n self._add_info(\"Username\", username)\n self._add_info(\"Type\", _type)\n self._add_info(\"Id\", _id)\n if chat.type == CHAT_TYPE_GROUP or chat.all_members_are_administrators is not None:\n all_members_are_admins = self._yes_no(chat.all_members_are_administrators)\n self._add_info(\"All members are admins\", all_members_are_admins, separator=\"?\")\n"
] | class ChatInfoFormatter(ApiObjectInfoFormatter):
def __init__(self, api: Api, chat: ApiObject, bot_user: ApiObject, user: ApiObject):
super().__init__(api, chat)
self.bot_user = bot_user
self.user = user
def __format_full(self, chat: ApiObject):
chat = self.api.getChat(chat_id=chat.id)
description = self._text(chat.description)
invite_link = self._invite_link(chat.invite_link)
pinned_message = self._pinned_message(chat.pinned_message)
sticker_set_name = self._group_sticker_set(chat.sticker_set_name)
member_count = self.api.getChatMembersCount(chat_id=chat.id)
self.__format_simple(chat)
self._add_info("Description", description)
self._add_info("Invite link", invite_link)
self._add_info("Pinned message", pinned_message)
self._add_info("Group sticker set", sticker_set_name)
self._add_info("Members", member_count)
if chat.type not in (CHAT_TYPE_PRIVATE, CHAT_TYPE_CHANNEL):
admins = self._get_admins(chat)
admin_count = len(admins)
me_admin = self._yes_no(self._is_admin(self.bot_user, admins))
you_admin = self._yes_no(self._is_admin(self.user, admins))
self._add_info("Admins", admin_count, additional_text="(not counting other bots)")
self._add_info("Am I admin", me_admin, separator="?")
self._add_info("Are you admin", you_admin, separator="?")
def _get_admins(self, chat: ApiObject):
if chat.type in (CHAT_TYPE_PRIVATE, CHAT_TYPE_CHANNEL):
return ()
return list(self.api.getChatAdministrators(chat_id=chat.id))
@staticmethod
def _is_admin(user: ApiObject, admin_chat_member_list: List[ApiObject]):
for admin_chat_member in admin_chat_member_list:
if admin_chat_member.user.id == user.id:
return True
return False
def __format_simple(self, chat: ApiObject):
full_data = ChatFormatter(chat).full_data
title = self._text(chat.title)
username = self._username(chat.username)
_type = chat.type
_id = chat.id
self._add_title(full_data)
self._add_empty()
self._add_info("Title", title)
self._add_info("Username", username)
self._add_info("Type", _type)
self._add_info("Id", _id)
if chat.type == CHAT_TYPE_GROUP or chat.all_members_are_administrators is not None:
all_members_are_admins = self._yes_no(chat.all_members_are_administrators)
self._add_info("All members are admins", all_members_are_admins, separator="?")
|
alvarogzp/telegram-bot-framework | bot/action/standard/chatsettings/__init__.py | ChatSettings.list | python | def list(self):
settings = []
for setting in _SETTINGS:
value = self.get(setting)
is_set = self.is_set(setting)
default_value = self.get_default_value(setting)
is_supported = True
settings.append((setting, value, default_value, is_set, is_supported))
for setting in sorted(self.settings_state.list_keys()):
if not self.is_supported(setting):
value = self.get(setting)
default_value = None
is_set = True
is_supported = False
settings.append((setting, value, default_value, is_set, is_supported))
return settings | :rtype: list(setting_name, value, default_value, is_set, is_supported) | train | https://github.com/alvarogzp/telegram-bot-framework/blob/7b597a415c1901901c677976cb13100fc3083107/bot/action/standard/chatsettings/__init__.py#L42-L60 | [
"def get(self, name):\n value = self.settings_state.get_value(name)\n if value is None:\n value = self.get_default_value(name)\n elif name in _CODECS:\n value = _CODECS[name].decode(value)\n return value\n",
"def get_default_value(name):\n return _DEFAULT_VALUES.get(name)\n",
"def is_set(self, name):\n return self.settings_state.exists_value(name)\n",
"def is_supported(name):\n return name in _SETTINGS\n"
] | class ChatSettings:
# List of chat settings
# To add one: SETTING = add_setting("name", "default_value")
LANGUAGE = add_setting("language", "en")
STORE_MESSAGES = add_setting("store_messages", "on")
OVERRIDE_MESSAGES_OPT_OUT = add_setting("override_messages_opt_out", "off")
THROTTLING_SECONDS = add_setting("throttling_seconds", 15, Codecs.INT)
def __init__(self, settings_state):
self.settings_state = settings_state
def get(self, name):
value = self.settings_state.get_value(name)
if value is None:
value = self.get_default_value(name)
elif name in _CODECS:
value = _CODECS[name].decode(value)
return value
def set(self, name, value):
if name in _CODECS:
# decode to check if value is valid
_CODECS[name].decode(value)
self.settings_state.set_value(name, value)
@staticmethod
def get_default_value(name):
return _DEFAULT_VALUES.get(name)
def is_set(self, name):
return self.settings_state.exists_value(name)
@staticmethod
def is_supported(name):
return name in _SETTINGS
|
alvarogzp/telegram-bot-framework | bot/action/util/textformat.py | FormattedText.concat | python | def concat(self, formatted_text):
assert self._is_compatible(formatted_text), "Cannot concat text with different modes"
self.text += formatted_text.text
return self | :type formatted_text: FormattedText | train | https://github.com/alvarogzp/telegram-bot-framework/blob/7b597a415c1901901c677976cb13100fc3083107/bot/action/util/textformat.py#L42-L46 | [
"def _is_compatible(self, formatted_text):\n \"\"\":type formatted_text: FormattedText\"\"\"\n return self.mode == formatted_text.mode\n"
] | class FormattedText:
def __init__(self, mode="HTML"):
self.formatter = TextFormatterFactory.get_for_mode(mode)
self.mode = mode
self.text = ""
def raw(self, text: str):
self.text += text
return self
def normal(self, text: str):
self.text += self._escaped(text)
return self
def bold(self, text: str):
self.text += self.formatter.bold(self._escaped(text))
return self
def italic(self, text: str):
self.text += self.formatter.italic(self._escaped(text))
return self
def url(self, text: str, url: str):
self.text += self.formatter.url(self._escaped(text), self._escaped(url))
return self
def code_inline(self, text: str):
self.text += self.formatter.code_inline(self._escaped_code(text))
return self
def code_block(self, text: str):
self.text += self.formatter.code_block(self._escaped_code(text))
return self
def newline(self):
self.text += "\n"
return self
def join(self, formatted_texts):
""":type formatted_texts: list[FormattedText]"""
formatted_texts = list(formatted_texts) # so that after the first iteration elements are not lost if generator
for formatted_text in formatted_texts:
assert self._is_compatible(formatted_text), "Cannot join text with different modes"
self.text = self.text.join((formatted_text.text for formatted_text in formatted_texts))
return self
def _is_compatible(self, formatted_text):
""":type formatted_text: FormattedText"""
return self.mode == formatted_text.mode
def build_message(self):
return Message.create(self.text, parse_mode=self.mode)
def _escaped(self, text):
return self.__escaped(text, self.formatter.escape)
def _escaped_code(self, text):
return self.__escaped(text, self.formatter.escape_code)
@staticmethod
def __escaped(text, escape_func):
if type(text) is not str:
text = str(text)
return escape_func(text)
def start_format(self):
return FormattedTextStringFormat(self)
def length(self):
return len(self.text)
def __bool__(self):
return self.length() > 0
def clear(self):
self.text = ""
|
alvarogzp/telegram-bot-framework | bot/action/util/textformat.py | FormattedText.join | python | def join(self, formatted_texts):
formatted_texts = list(formatted_texts) # so that after the first iteration elements are not lost if generator
for formatted_text in formatted_texts:
assert self._is_compatible(formatted_text), "Cannot join text with different modes"
self.text = self.text.join((formatted_text.text for formatted_text in formatted_texts))
return self | :type formatted_texts: list[FormattedText] | train | https://github.com/alvarogzp/telegram-bot-framework/blob/7b597a415c1901901c677976cb13100fc3083107/bot/action/util/textformat.py#L48-L54 | [
"def _is_compatible(self, formatted_text):\n \"\"\":type formatted_text: FormattedText\"\"\"\n return self.mode == formatted_text.mode\n"
] | class FormattedText:
def __init__(self, mode="HTML"):
self.formatter = TextFormatterFactory.get_for_mode(mode)
self.mode = mode
self.text = ""
def raw(self, text: str):
self.text += text
return self
def normal(self, text: str):
self.text += self._escaped(text)
return self
def bold(self, text: str):
self.text += self.formatter.bold(self._escaped(text))
return self
def italic(self, text: str):
self.text += self.formatter.italic(self._escaped(text))
return self
def url(self, text: str, url: str):
self.text += self.formatter.url(self._escaped(text), self._escaped(url))
return self
def code_inline(self, text: str):
self.text += self.formatter.code_inline(self._escaped_code(text))
return self
def code_block(self, text: str):
self.text += self.formatter.code_block(self._escaped_code(text))
return self
def newline(self):
self.text += "\n"
return self
def concat(self, formatted_text):
""":type formatted_text: FormattedText"""
assert self._is_compatible(formatted_text), "Cannot concat text with different modes"
self.text += formatted_text.text
return self
def _is_compatible(self, formatted_text):
""":type formatted_text: FormattedText"""
return self.mode == formatted_text.mode
def build_message(self):
return Message.create(self.text, parse_mode=self.mode)
def _escaped(self, text):
return self.__escaped(text, self.formatter.escape)
def _escaped_code(self, text):
return self.__escaped(text, self.formatter.escape_code)
@staticmethod
def __escaped(text, escape_func):
if type(text) is not str:
text = str(text)
return escape_func(text)
def start_format(self):
return FormattedTextStringFormat(self)
def length(self):
return len(self.text)
def __bool__(self):
return self.length() > 0
def clear(self):
self.text = ""
|
alvarogzp/telegram-bot-framework | bot/action/util/textformat.py | FormattedTextStringFormat.concat | python | def concat(self, *args, **kwargs):
for arg in args:
assert self.formatted_text._is_compatible(arg), "Cannot concat text with different modes"
self.format_args.append(arg.text)
for kwarg in kwargs:
value = kwargs[kwarg]
assert self.formatted_text._is_compatible(value), "Cannot concat text with different modes"
self.format_kwargs[kwarg] = value.text
return self | :type args: FormattedText
:type kwargs: FormattedText | train | https://github.com/alvarogzp/telegram-bot-framework/blob/7b597a415c1901901c677976cb13100fc3083107/bot/action/util/textformat.py#L251-L263 | null | class FormattedTextStringFormat:
def __init__(self, formatted_text: FormattedText):
self.formatted_text = formatted_text
self.formatter = formatted_text.formatter
self.format_args = []
self.format_kwargs = {}
def normal(self, *args, **kwargs):
self._add(lambda x: x, args, kwargs)
return self
def bold(self, *args, **kwargs):
self._add(self.formatter.bold, args, kwargs)
return self
def italic(self, *args, **kwargs):
self._add(self.formatter.italic, args, kwargs)
return self
def url(self, text: str, url: str, name=None):
text = self.formatter.url(self._escaped(text), self._escaped(url))
if name is None:
self.format_args.append(text)
else:
self.format_kwargs[name] = text
return self
def code_inline(self, *args, **kwargs):
self._add(self.formatter.code_inline, args, kwargs)
return self
def code_block(self, *args, **kwargs):
self._add(self.formatter.code_block, args, kwargs)
return self
def _add(self, func_to_apply, args, kwargs):
self.format_args.extend((func_to_apply(self._escaped(arg)) for arg in args))
for kwarg in kwargs:
self.format_kwargs[kwarg] = func_to_apply(self._escaped(kwargs[kwarg]))
def _escaped(self, text):
return self.formatted_text._escaped(text)
def end_format(self):
self.formatted_text.text = self.formatted_text.text.format(*self.format_args, **self.format_kwargs)
return self.formatted_text
|
alvarogzp/telegram-bot-framework | bot/action/standard/userinfo.py | UserStorageHandler.get_instance | python | def get_instance(cls, state):
if cls.instance is None:
cls.instance = UserStorageHandler(state)
return cls.instance | :rtype: UserStorageHandler | train | https://github.com/alvarogzp/telegram-bot-framework/blob/7b597a415c1901901c677976cb13100fc3083107/bot/action/standard/userinfo.py#L26-L30 | null | class UserStorageHandler:
instance = None
@classmethod
def __init__(self, state):
self.state = state.get_for("user")
def get(self, user_id):
user = self.state.get_for(str(user_id))
return DictionaryObject({
"id": user_id,
"first_name": user.first_name,
"last_name": user.last_name,
"username": user.username,
"title": user.title # for chats, they use the user storage too
})
def save(self, user):
user_store = self.state.get_for(str(user.id))
if user.first_name != user_store.first_name:
user_store.first_name = user.first_name
if user.last_name != user_store.last_name:
user_store.last_name = user.last_name
if user.username != user_store.username:
user_store.username = user.username
if user.title != user_store.title:
user_store.title = user.title
|
alvarogzp/telegram-bot-framework | bot/action/standard/benchmark.py | WorkersAction._get_active_threads_names | python | def _get_active_threads_names():
active_threads = threading.enumerate()
return FormattedText().join(
[
FormattedText().newline().normal(" - {name}").start_format().bold(name=thread.name).end_format()
for thread in active_threads
]
) | May contain sensitive info (like user ids). Use with care. | train | https://github.com/alvarogzp/telegram-bot-framework/blob/7b597a415c1901901c677976cb13100fc3083107/bot/action/standard/benchmark.py#L164-L172 | [
"def join(self, formatted_texts):\n \"\"\":type formatted_texts: list[FormattedText]\"\"\"\n formatted_texts = list(formatted_texts) # so that after the first iteration elements are not lost if generator\n for formatted_text in formatted_texts:\n assert self._is_compatible(formatted_text), \"Cannot join text with different modes\"\n self.text = self.text.join((formatted_text.text for formatted_text in formatted_texts))\n return self\n"
] | class WorkersAction(Action):
def process(self, event):
response = FormattedText().newline().newline().join((
self.get_active_threads(),
self.get_running_workers(),
self.get_worker_pools()
))
self.api.send_message(response.build_message().to_chat_replying(event.message))
def get_active_threads(self):
return FormattedText()\
.concat(self.get_active_threads_number())\
.concat(self._get_active_threads_names())
@staticmethod
def get_active_threads_number():
active_threads_number = threading.active_count()
return FormattedText().normal("Active threads: {number}")\
.start_format().bold(number=active_threads_number).end_format()
@staticmethod
def get_running_workers(self):
running_workers = self.scheduler.get_running_workers()
return FormattedText()\
.concat(self.get_running_workers_number(running_workers))\
.concat(self._get_running_workers_names(running_workers))
@staticmethod
def get_running_workers_number(running_workers: list):
running_workers_number = len(running_workers)
return FormattedText().normal("Running workers: {number}")\
.start_format().bold(number=running_workers_number).end_format()
@staticmethod
def _get_running_workers_names(running_workers: list):
"""May contain sensitive info (like user ids). Use with care."""
return FormattedText().join(
[
FormattedText().newline().normal(" - {name}").start_format().bold(name=worker.name).end_format()
for worker in running_workers
]
)
def get_worker_pools(self):
worker_pools = self.scheduler.get_worker_pools()
return FormattedText()\
.concat(self.get_worker_pools_number(worker_pools))\
.concat(self._get_worker_pools_names(worker_pools))
@staticmethod
def get_worker_pools_number(worker_pools: list):
worker_pools_number = len(worker_pools)
return FormattedText().normal("Worker pools: {number}")\
.start_format().bold(number=worker_pools_number).end_format()
@staticmethod
def _get_worker_pools_names(worker_pools: list):
"""May contain sensitive info (like user ids). Use with care."""
return FormattedText().join(
[
FormattedText().newline().normal(" - {name}").start_format().bold(name=worker.name).end_format()
for worker in worker_pools
]
)
|
alvarogzp/telegram-bot-framework | bot/action/standard/benchmark.py | WorkersAction._get_running_workers_names | python | def _get_running_workers_names(running_workers: list):
return FormattedText().join(
[
FormattedText().newline().normal(" - {name}").start_format().bold(name=worker.name).end_format()
for worker in running_workers
]
) | May contain sensitive info (like user ids). Use with care. | train | https://github.com/alvarogzp/telegram-bot-framework/blob/7b597a415c1901901c677976cb13100fc3083107/bot/action/standard/benchmark.py#L187-L194 | [
"def join(self, formatted_texts):\n \"\"\":type formatted_texts: list[FormattedText]\"\"\"\n formatted_texts = list(formatted_texts) # so that after the first iteration elements are not lost if generator\n for formatted_text in formatted_texts:\n assert self._is_compatible(formatted_text), \"Cannot join text with different modes\"\n self.text = self.text.join((formatted_text.text for formatted_text in formatted_texts))\n return self\n"
] | class WorkersAction(Action):
def process(self, event):
response = FormattedText().newline().newline().join((
self.get_active_threads(),
self.get_running_workers(),
self.get_worker_pools()
))
self.api.send_message(response.build_message().to_chat_replying(event.message))
def get_active_threads(self):
return FormattedText()\
.concat(self.get_active_threads_number())\
.concat(self._get_active_threads_names())
@staticmethod
def get_active_threads_number():
active_threads_number = threading.active_count()
return FormattedText().normal("Active threads: {number}")\
.start_format().bold(number=active_threads_number).end_format()
@staticmethod
def _get_active_threads_names():
"""May contain sensitive info (like user ids). Use with care."""
active_threads = threading.enumerate()
return FormattedText().join(
[
FormattedText().newline().normal(" - {name}").start_format().bold(name=thread.name).end_format()
for thread in active_threads
]
)
def get_running_workers(self):
running_workers = self.scheduler.get_running_workers()
return FormattedText()\
.concat(self.get_running_workers_number(running_workers))\
.concat(self._get_running_workers_names(running_workers))
@staticmethod
def get_running_workers_number(running_workers: list):
running_workers_number = len(running_workers)
return FormattedText().normal("Running workers: {number}")\
.start_format().bold(number=running_workers_number).end_format()
@staticmethod
def get_worker_pools(self):
worker_pools = self.scheduler.get_worker_pools()
return FormattedText()\
.concat(self.get_worker_pools_number(worker_pools))\
.concat(self._get_worker_pools_names(worker_pools))
@staticmethod
def get_worker_pools_number(worker_pools: list):
worker_pools_number = len(worker_pools)
return FormattedText().normal("Worker pools: {number}")\
.start_format().bold(number=worker_pools_number).end_format()
@staticmethod
def _get_worker_pools_names(worker_pools: list):
"""May contain sensitive info (like user ids). Use with care."""
return FormattedText().join(
[
FormattedText().newline().normal(" - {name}").start_format().bold(name=worker.name).end_format()
for worker in worker_pools
]
)
|
alvarogzp/telegram-bot-framework | bot/action/standard/benchmark.py | WorkersAction._get_worker_pools_names | python | def _get_worker_pools_names(worker_pools: list):
return FormattedText().join(
[
FormattedText().newline().normal(" - {name}").start_format().bold(name=worker.name).end_format()
for worker in worker_pools
]
) | May contain sensitive info (like user ids). Use with care. | train | https://github.com/alvarogzp/telegram-bot-framework/blob/7b597a415c1901901c677976cb13100fc3083107/bot/action/standard/benchmark.py#L209-L216 | [
"def join(self, formatted_texts):\n \"\"\":type formatted_texts: list[FormattedText]\"\"\"\n formatted_texts = list(formatted_texts) # so that after the first iteration elements are not lost if generator\n for formatted_text in formatted_texts:\n assert self._is_compatible(formatted_text), \"Cannot join text with different modes\"\n self.text = self.text.join((formatted_text.text for formatted_text in formatted_texts))\n return self\n"
] | class WorkersAction(Action):
def process(self, event):
response = FormattedText().newline().newline().join((
self.get_active_threads(),
self.get_running_workers(),
self.get_worker_pools()
))
self.api.send_message(response.build_message().to_chat_replying(event.message))
def get_active_threads(self):
return FormattedText()\
.concat(self.get_active_threads_number())\
.concat(self._get_active_threads_names())
@staticmethod
def get_active_threads_number():
active_threads_number = threading.active_count()
return FormattedText().normal("Active threads: {number}")\
.start_format().bold(number=active_threads_number).end_format()
@staticmethod
def _get_active_threads_names():
"""May contain sensitive info (like user ids). Use with care."""
active_threads = threading.enumerate()
return FormattedText().join(
[
FormattedText().newline().normal(" - {name}").start_format().bold(name=thread.name).end_format()
for thread in active_threads
]
)
def get_running_workers(self):
running_workers = self.scheduler.get_running_workers()
return FormattedText()\
.concat(self.get_running_workers_number(running_workers))\
.concat(self._get_running_workers_names(running_workers))
@staticmethod
def get_running_workers_number(running_workers: list):
running_workers_number = len(running_workers)
return FormattedText().normal("Running workers: {number}")\
.start_format().bold(number=running_workers_number).end_format()
@staticmethod
def _get_running_workers_names(running_workers: list):
"""May contain sensitive info (like user ids). Use with care."""
return FormattedText().join(
[
FormattedText().newline().normal(" - {name}").start_format().bold(name=worker.name).end_format()
for worker in running_workers
]
)
def get_worker_pools(self):
worker_pools = self.scheduler.get_worker_pools()
return FormattedText()\
.concat(self.get_worker_pools_number(worker_pools))\
.concat(self._get_worker_pools_names(worker_pools))
@staticmethod
def get_worker_pools_number(worker_pools: list):
worker_pools_number = len(worker_pools)
return FormattedText().normal("Worker pools: {number}")\
.start_format().bold(number=worker_pools_number).end_format()
@staticmethod
|
alvarogzp/telegram-bot-framework | bot/action/standard/info/formatter/user.py | UserInfoFormatter.format | python | def format(self, member_info: bool = False):
user = self.api_object
self.__format_user(user)
if member_info and self.chat.type != CHAT_TYPE_PRIVATE:
self._add_empty()
self.__format_member(user) | :param member_info: If True, adds also chat member info. Please, note that this additional info requires
to make ONE api call. | train | https://github.com/alvarogzp/telegram-bot-framework/blob/7b597a415c1901901c677976cb13100fc3083107/bot/action/standard/info/formatter/user.py#L19-L28 | [
"def _add_empty(self):\n self._add(FormattedText())\n",
"def __format_member(self, user: ApiObject):\n member = self.api.getChatMember(chat_id=self.chat.id, user_id=user.id)\n status = member.status\n self._add_title(\"Member info\")\n self._add_info(\"Status\", status)\n if status in (MEMBER_STATUS_RESTRICTED, MEMBER_STATUS_KICKED):\n until = self._date(member.until_date, \"Forever\")\n self._add_info(\"Until\", until)\n if status == MEMBER_STATUS_ADMINISTRATOR:\n can_change_info = self._yes_no(member.can_change_info)\n can_delete_messages = self._yes_no(member.can_delete_messages)\n can_invite_users = self._yes_no(member.can_invite_users)\n can_restrict_members = self._yes_no(member.can_restrict_members)\n can_pin_messages = self._yes_no(member.can_pin_messages)\n can_promote_members = self._yes_no(member.can_promote_members)\n self._add_info(\"Can change chat info (title, photo, etc.)\", can_change_info, separator=\"?\")\n self._add_info(\"Can delete messages of other users\", can_delete_messages, separator=\"?\")\n self._add_info(\"Can invite new users\", can_invite_users, separator=\"?\")\n self._add_info(\"Can remove and restrict members\", can_restrict_members, separator=\"?\")\n self._add_info(\"Can pin messages\", can_pin_messages, separator=\"?\")\n self._add_info(\"Can add new admins\", can_promote_members, separator=\"?\")\n if self.chat.type == CHAT_TYPE_CHANNEL or \\\n member.can_post_messages is not None or member.can_edit_messages is not None:\n can_post_messages = self._yes_no(member.can_post_messages)\n can_edit_messages = self._yes_no(member.can_edit_messages)\n self._add_info(\"Can send messages (for channels only)\", can_post_messages, separator=\"?\")\n self._add_info(\"Can edit messages of other users (for channels only)\", can_edit_messages, separator=\"?\")\n if status == MEMBER_STATUS_RESTRICTED:\n can_send_messages = self._yes_no(member.can_send_messages)\n can_send_media_messages = self._yes_no(member.can_send_media_messages)\n can_send_other_messages = self._yes_no(member.can_send_other_messages)\n can_add_web_page_previews = self._yes_no(member.can_add_web_page_previews)\n self._add_info(\"Can send messages\", can_send_messages, separator=\"?\")\n self._add_info(\"Can send media messages (audio, photo & video)\", can_send_media_messages, separator=\"?\")\n self._add_info(\n \"Can send other messages (stickers, gifs, games, inline bots)\", can_send_other_messages, separator=\"?\"\n )\n self._add_info(\"Can add web page previews\", can_add_web_page_previews, separator=\"?\")\n",
"def __format_user(self, user: ApiObject):\n full_data = UserFormatter(user).full_data\n first_name = self._text(user.first_name)\n last_name = self._text(user.last_name)\n username = self._username(user.username)\n _id = user.id\n language_code = self._text(user.language_code)\n is_bot = self._yes_no(user.is_bot, yes_emoji=\"(🤖)\", no_emoji=\"(👤)\")\n self._add_title(full_data)\n self._add_empty()\n self._add_info(\"First name\", first_name)\n self._add_info(\"Last name\", last_name)\n self._add_info(\"Username\", username)\n self._add_info(\"Id\", _id)\n self._add_info(\"Language code\", language_code)\n self._add_info(\"Is bot\", is_bot, separator=\"?\")\n"
] | class UserInfoFormatter(ApiObjectInfoFormatter):
def __init__(self, api: Api, user: ApiObject, chat: ApiObject):
super().__init__(api, user)
self.chat = chat
def __format_member(self, user: ApiObject):
member = self.api.getChatMember(chat_id=self.chat.id, user_id=user.id)
status = member.status
self._add_title("Member info")
self._add_info("Status", status)
if status in (MEMBER_STATUS_RESTRICTED, MEMBER_STATUS_KICKED):
until = self._date(member.until_date, "Forever")
self._add_info("Until", until)
if status == MEMBER_STATUS_ADMINISTRATOR:
can_change_info = self._yes_no(member.can_change_info)
can_delete_messages = self._yes_no(member.can_delete_messages)
can_invite_users = self._yes_no(member.can_invite_users)
can_restrict_members = self._yes_no(member.can_restrict_members)
can_pin_messages = self._yes_no(member.can_pin_messages)
can_promote_members = self._yes_no(member.can_promote_members)
self._add_info("Can change chat info (title, photo, etc.)", can_change_info, separator="?")
self._add_info("Can delete messages of other users", can_delete_messages, separator="?")
self._add_info("Can invite new users", can_invite_users, separator="?")
self._add_info("Can remove and restrict members", can_restrict_members, separator="?")
self._add_info("Can pin messages", can_pin_messages, separator="?")
self._add_info("Can add new admins", can_promote_members, separator="?")
if self.chat.type == CHAT_TYPE_CHANNEL or \
member.can_post_messages is not None or member.can_edit_messages is not None:
can_post_messages = self._yes_no(member.can_post_messages)
can_edit_messages = self._yes_no(member.can_edit_messages)
self._add_info("Can send messages (for channels only)", can_post_messages, separator="?")
self._add_info("Can edit messages of other users (for channels only)", can_edit_messages, separator="?")
if status == MEMBER_STATUS_RESTRICTED:
can_send_messages = self._yes_no(member.can_send_messages)
can_send_media_messages = self._yes_no(member.can_send_media_messages)
can_send_other_messages = self._yes_no(member.can_send_other_messages)
can_add_web_page_previews = self._yes_no(member.can_add_web_page_previews)
self._add_info("Can send messages", can_send_messages, separator="?")
self._add_info("Can send media messages (audio, photo & video)", can_send_media_messages, separator="?")
self._add_info(
"Can send other messages (stickers, gifs, games, inline bots)", can_send_other_messages, separator="?"
)
self._add_info("Can add web page previews", can_add_web_page_previews, separator="?")
def __format_user(self, user: ApiObject):
full_data = UserFormatter(user).full_data
first_name = self._text(user.first_name)
last_name = self._text(user.last_name)
username = self._username(user.username)
_id = user.id
language_code = self._text(user.language_code)
is_bot = self._yes_no(user.is_bot, yes_emoji="(🤖)", no_emoji="(👤)")
self._add_title(full_data)
self._add_empty()
self._add_info("First name", first_name)
self._add_info("Last name", last_name)
self._add_info("Username", username)
self._add_info("Id", _id)
self._add_info("Language code", language_code)
self._add_info("Is bot", is_bot, separator="?")
|
alvarogzp/telegram-bot-framework | bot/bot.py | UpdatesProcessor.safe_log_error | python | def safe_log_error(self, error: Exception, *info: str):
self.__do_safe(lambda: self.logger.error(error, *info)) | Log error failing silently on error | train | https://github.com/alvarogzp/telegram-bot-framework/blob/7b597a415c1901901c677976cb13100fc3083107/bot/bot.py#L173-L175 | [
"def __do_safe(func: callable):\n try:\n return func()\n except Exception:\n pass\n"
] | class UpdatesProcessor:
def __init__(self, get_updates_func: callable, logger: AdminLogger, config: Config,
update_processor: UpdateProcessor):
self.get_updates_func = get_updates_func
self.logger = logger
self.config = config
self.update_processor = update_processor
self.last_error = None
self.number_of_updates_processed = 0
def run(self):
self.processing_starting()
try:
self.__processing_loop()
finally:
self.processing_ended()
self.processing_ended_successfully()
def __processing_loop(self):
while self.should_keep_processing_updates():
self.__get_and_process_handling_errors()
def __get_and_process_handling_errors(self):
try:
self.__get_and_process()
except Exception as e:
self.__handle_error(e)
# notify there has been an error
self.processing_error(e)
else:
# notify successful processing
self.processing_successful()
def __get_and_process(self):
for update in self.get_updates_func():
self.update_processor.process_update(update)
self.number_of_updates_processed += 1
def __handle_error(self, error: Exception):
sleep_seconds = self.config.sleep_seconds_on_get_updates_error
# we do not want to let non-fatal (eg. API) errors to escape from here
self.safe_log_error(error, "get_and_process", "Sleeping for {seconds} seconds.".format(seconds=sleep_seconds))
# there has been an error while getting updates, sleep a little to give a chance
# for the server or the network to recover (if that was the case), and to not to flood the server
time.sleep(int(sleep_seconds))
def safe_log_info(self, *info: str):
"""Log info failing silently on error"""
self.__do_safe(lambda: self.logger.info(*info))
@staticmethod
def __do_safe(func: callable):
try:
return func()
except Exception:
pass
def should_keep_processing_updates(self):
raise NotImplementedError()
def processing_successful(self):
"""Updates were processed successfully"""
self.last_error = None
def processing_error(self, error: Exception):
"""There has been an error while processing the last updates"""
self.last_error = error
def processing_starting(self):
"""Updates are about to start being processed"""
pass
def processing_ended(self):
"""Processing has ended, we don't know if successfully or caused by an error"""
self.safe_log_info(
"Ending",
"Updates processed: {updates_processed_number}"
.format(updates_processed_number=self.number_of_updates_processed)
)
def processing_ended_successfully(self):
"""Processing has ended successfully"""
pass
|
alvarogzp/telegram-bot-framework | bot/bot.py | UpdatesProcessor.safe_log_info | python | def safe_log_info(self, *info: str):
self.__do_safe(lambda: self.logger.info(*info)) | Log info failing silently on error | train | https://github.com/alvarogzp/telegram-bot-framework/blob/7b597a415c1901901c677976cb13100fc3083107/bot/bot.py#L177-L179 | [
"def __do_safe(func: callable):\n try:\n return func()\n except Exception:\n pass\n"
] | class UpdatesProcessor:
def __init__(self, get_updates_func: callable, logger: AdminLogger, config: Config,
update_processor: UpdateProcessor):
self.get_updates_func = get_updates_func
self.logger = logger
self.config = config
self.update_processor = update_processor
self.last_error = None
self.number_of_updates_processed = 0
def run(self):
self.processing_starting()
try:
self.__processing_loop()
finally:
self.processing_ended()
self.processing_ended_successfully()
def __processing_loop(self):
while self.should_keep_processing_updates():
self.__get_and_process_handling_errors()
def __get_and_process_handling_errors(self):
try:
self.__get_and_process()
except Exception as e:
self.__handle_error(e)
# notify there has been an error
self.processing_error(e)
else:
# notify successful processing
self.processing_successful()
def __get_and_process(self):
for update in self.get_updates_func():
self.update_processor.process_update(update)
self.number_of_updates_processed += 1
def __handle_error(self, error: Exception):
sleep_seconds = self.config.sleep_seconds_on_get_updates_error
# we do not want to let non-fatal (eg. API) errors to escape from here
self.safe_log_error(error, "get_and_process", "Sleeping for {seconds} seconds.".format(seconds=sleep_seconds))
# there has been an error while getting updates, sleep a little to give a chance
# for the server or the network to recover (if that was the case), and to not to flood the server
time.sleep(int(sleep_seconds))
def safe_log_error(self, error: Exception, *info: str):
"""Log error failing silently on error"""
self.__do_safe(lambda: self.logger.error(error, *info))
@staticmethod
def __do_safe(func: callable):
try:
return func()
except Exception:
pass
def should_keep_processing_updates(self):
raise NotImplementedError()
def processing_successful(self):
"""Updates were processed successfully"""
self.last_error = None
def processing_error(self, error: Exception):
"""There has been an error while processing the last updates"""
self.last_error = error
def processing_starting(self):
"""Updates are about to start being processed"""
pass
def processing_ended(self):
"""Processing has ended, we don't know if successfully or caused by an error"""
self.safe_log_info(
"Ending",
"Updates processed: {updates_processed_number}"
.format(updates_processed_number=self.number_of_updates_processed)
)
def processing_ended_successfully(self):
"""Processing has ended successfully"""
pass
|
ajk8/hatchery | hatchery/helpers.py | value_of_named_argument_in_function | python | def value_of_named_argument_in_function(argument_name, function_name, search_str,
resolve_varname=False):
try:
search_str = unicode(search_str)
except NameError:
pass
readline = StringIO(search_str).readline
try:
token_generator = tokenize.generate_tokens(readline)
tokens = [SimplifiedToken(toknum, tokval) for toknum, tokval, _, _, _ in token_generator]
except tokenize.TokenError as e:
raise ValueError('search_str is not parse-able python code: ' + str(e))
in_function = False
is_var = False
for i in range(len(tokens)):
if (
not in_function and
tokens[i].typenum == tokenize.NAME and tokens[i].value == function_name and
tokens[i+1].typenum == tokenize.OP and tokens[i+1].value == '('
):
in_function = True
continue
elif (
in_function and
tokens[i].typenum == tokenize.NAME and tokens[i].value == argument_name and
tokens[i+1].typenum == tokenize.OP and tokens[i+1].value == '='
):
# value is set to another variable which we are going to attempt to resolve
if resolve_varname and tokens[i+2].typenum == 1:
is_var = True
argument_name = tokens[i+2].value
break
# again, for a very specific usecase -- get the whole value and concatenate it
# this will match something like _version.__version__
j = 3
while True:
if tokens[i+j].value in (',', ')') or tokens[i+j].typenum == 58:
break
j += 1
return ''.join([t.value for t in tokens[i+2:i+j]]).strip()
# this is very dumb logic, and only works if the function argument is set to a variable
# which is set to a string value
if is_var:
for i in range(len(tokens)):
if (
tokens[i].typenum == tokenize.NAME and tokens[i].value == argument_name and
tokens[i+1].typenum == tokenize.OP and tokens[i+1].value == '='
):
return tokens[i+2].value.strip()
return None | Parse an arbitrary block of python code to get the value of a named argument
from inside a function call | train | https://github.com/ajk8/hatchery/blob/e068c9f5366d2c98225babb03d4cde36c710194f/hatchery/helpers.py#L17-L73 | null | import tokenize
import collections
import os
import funcy
import microcache
from io import StringIO
try:
from urllib import parse as urlparse
except ImportError:
import urlparse
SimplifiedToken = collections.namedtuple('SimplifiedToken', ('typenum', 'value'))
@microcache.this
@microcache.this
def get_file_content(file_path):
""" Load the content of a text file into a string """
with open(file_path) as f:
ret = f.read()
return ret
@microcache.this
def package_file_path(filename, package_name):
""" Convenience function to get the path to a package's version file
>>> package_file_path('mymodule.py', 'mypackage')
'mypackage/mymodule.py'
"""
return os.path.join(package_name, filename)
@microcache.this
def regex_in_file(regex, filepath, return_match=False):
""" Search for a regex in a file
If return_match is True, return the found object instead of a boolean
"""
file_content = get_file_content(filepath)
re_method = funcy.re_find if return_match else funcy.re_test
return re_method(regex, file_content)
@microcache.this
def regex_in_package_file(regex, filename, package_name, return_match=False):
""" Search for a regex in a file contained within the package directory
If return_match is True, return the found object instead of a boolean
"""
filepath = package_file_path(filename, package_name)
return regex_in_file(regex, filepath, return_match=return_match)
@microcache.this
def string_is_url(test_str):
""" Test to see if a string is a URL or not, defined in this case as a string for which
urlparse returns a scheme component
>>> string_is_url('somestring')
False
>>> string_is_url('https://some.domain.org/path')
True
"""
parsed = urlparse.urlparse(test_str)
return parsed.scheme is not None and parsed.scheme != ''
|
ajk8/hatchery | hatchery/helpers.py | regex_in_file | python | def regex_in_file(regex, filepath, return_match=False):
file_content = get_file_content(filepath)
re_method = funcy.re_find if return_match else funcy.re_test
return re_method(regex, file_content) | Search for a regex in a file
If return_match is True, return the found object instead of a boolean | train | https://github.com/ajk8/hatchery/blob/e068c9f5366d2c98225babb03d4cde36c710194f/hatchery/helpers.py#L95-L102 | null | import tokenize
import collections
import os
import funcy
import microcache
from io import StringIO
try:
from urllib import parse as urlparse
except ImportError:
import urlparse
SimplifiedToken = collections.namedtuple('SimplifiedToken', ('typenum', 'value'))
@microcache.this
def value_of_named_argument_in_function(argument_name, function_name, search_str,
resolve_varname=False):
""" Parse an arbitrary block of python code to get the value of a named argument
from inside a function call
"""
try:
search_str = unicode(search_str)
except NameError:
pass
readline = StringIO(search_str).readline
try:
token_generator = tokenize.generate_tokens(readline)
tokens = [SimplifiedToken(toknum, tokval) for toknum, tokval, _, _, _ in token_generator]
except tokenize.TokenError as e:
raise ValueError('search_str is not parse-able python code: ' + str(e))
in_function = False
is_var = False
for i in range(len(tokens)):
if (
not in_function and
tokens[i].typenum == tokenize.NAME and tokens[i].value == function_name and
tokens[i+1].typenum == tokenize.OP and tokens[i+1].value == '('
):
in_function = True
continue
elif (
in_function and
tokens[i].typenum == tokenize.NAME and tokens[i].value == argument_name and
tokens[i+1].typenum == tokenize.OP and tokens[i+1].value == '='
):
# value is set to another variable which we are going to attempt to resolve
if resolve_varname and tokens[i+2].typenum == 1:
is_var = True
argument_name = tokens[i+2].value
break
# again, for a very specific usecase -- get the whole value and concatenate it
# this will match something like _version.__version__
j = 3
while True:
if tokens[i+j].value in (',', ')') or tokens[i+j].typenum == 58:
break
j += 1
return ''.join([t.value for t in tokens[i+2:i+j]]).strip()
# this is very dumb logic, and only works if the function argument is set to a variable
# which is set to a string value
if is_var:
for i in range(len(tokens)):
if (
tokens[i].typenum == tokenize.NAME and tokens[i].value == argument_name and
tokens[i+1].typenum == tokenize.OP and tokens[i+1].value == '='
):
return tokens[i+2].value.strip()
return None
@microcache.this
def get_file_content(file_path):
""" Load the content of a text file into a string """
with open(file_path) as f:
ret = f.read()
return ret
@microcache.this
def package_file_path(filename, package_name):
""" Convenience function to get the path to a package's version file
>>> package_file_path('mymodule.py', 'mypackage')
'mypackage/mymodule.py'
"""
return os.path.join(package_name, filename)
@microcache.this
@microcache.this
def regex_in_package_file(regex, filename, package_name, return_match=False):
""" Search for a regex in a file contained within the package directory
If return_match is True, return the found object instead of a boolean
"""
filepath = package_file_path(filename, package_name)
return regex_in_file(regex, filepath, return_match=return_match)
@microcache.this
def string_is_url(test_str):
""" Test to see if a string is a URL or not, defined in this case as a string for which
urlparse returns a scheme component
>>> string_is_url('somestring')
False
>>> string_is_url('https://some.domain.org/path')
True
"""
parsed = urlparse.urlparse(test_str)
return parsed.scheme is not None and parsed.scheme != ''
|
ajk8/hatchery | hatchery/helpers.py | regex_in_package_file | python | def regex_in_package_file(regex, filename, package_name, return_match=False):
filepath = package_file_path(filename, package_name)
return regex_in_file(regex, filepath, return_match=return_match) | Search for a regex in a file contained within the package directory
If return_match is True, return the found object instead of a boolean | train | https://github.com/ajk8/hatchery/blob/e068c9f5366d2c98225babb03d4cde36c710194f/hatchery/helpers.py#L106-L112 | null | import tokenize
import collections
import os
import funcy
import microcache
from io import StringIO
try:
from urllib import parse as urlparse
except ImportError:
import urlparse
SimplifiedToken = collections.namedtuple('SimplifiedToken', ('typenum', 'value'))
@microcache.this
def value_of_named_argument_in_function(argument_name, function_name, search_str,
resolve_varname=False):
""" Parse an arbitrary block of python code to get the value of a named argument
from inside a function call
"""
try:
search_str = unicode(search_str)
except NameError:
pass
readline = StringIO(search_str).readline
try:
token_generator = tokenize.generate_tokens(readline)
tokens = [SimplifiedToken(toknum, tokval) for toknum, tokval, _, _, _ in token_generator]
except tokenize.TokenError as e:
raise ValueError('search_str is not parse-able python code: ' + str(e))
in_function = False
is_var = False
for i in range(len(tokens)):
if (
not in_function and
tokens[i].typenum == tokenize.NAME and tokens[i].value == function_name and
tokens[i+1].typenum == tokenize.OP and tokens[i+1].value == '('
):
in_function = True
continue
elif (
in_function and
tokens[i].typenum == tokenize.NAME and tokens[i].value == argument_name and
tokens[i+1].typenum == tokenize.OP and tokens[i+1].value == '='
):
# value is set to another variable which we are going to attempt to resolve
if resolve_varname and tokens[i+2].typenum == 1:
is_var = True
argument_name = tokens[i+2].value
break
# again, for a very specific usecase -- get the whole value and concatenate it
# this will match something like _version.__version__
j = 3
while True:
if tokens[i+j].value in (',', ')') or tokens[i+j].typenum == 58:
break
j += 1
return ''.join([t.value for t in tokens[i+2:i+j]]).strip()
# this is very dumb logic, and only works if the function argument is set to a variable
# which is set to a string value
if is_var:
for i in range(len(tokens)):
if (
tokens[i].typenum == tokenize.NAME and tokens[i].value == argument_name and
tokens[i+1].typenum == tokenize.OP and tokens[i+1].value == '='
):
return tokens[i+2].value.strip()
return None
@microcache.this
def get_file_content(file_path):
""" Load the content of a text file into a string """
with open(file_path) as f:
ret = f.read()
return ret
@microcache.this
def package_file_path(filename, package_name):
""" Convenience function to get the path to a package's version file
>>> package_file_path('mymodule.py', 'mypackage')
'mypackage/mymodule.py'
"""
return os.path.join(package_name, filename)
@microcache.this
def regex_in_file(regex, filepath, return_match=False):
""" Search for a regex in a file
If return_match is True, return the found object instead of a boolean
"""
file_content = get_file_content(filepath)
re_method = funcy.re_find if return_match else funcy.re_test
return re_method(regex, file_content)
@microcache.this
@microcache.this
def string_is_url(test_str):
""" Test to see if a string is a URL or not, defined in this case as a string for which
urlparse returns a scheme component
>>> string_is_url('somestring')
False
>>> string_is_url('https://some.domain.org/path')
True
"""
parsed = urlparse.urlparse(test_str)
return parsed.scheme is not None and parsed.scheme != ''
|
ajk8/hatchery | hatchery/helpers.py | string_is_url | python | def string_is_url(test_str):
parsed = urlparse.urlparse(test_str)
return parsed.scheme is not None and parsed.scheme != '' | Test to see if a string is a URL or not, defined in this case as a string for which
urlparse returns a scheme component
>>> string_is_url('somestring')
False
>>> string_is_url('https://some.domain.org/path')
True | train | https://github.com/ajk8/hatchery/blob/e068c9f5366d2c98225babb03d4cde36c710194f/hatchery/helpers.py#L116-L126 | null | import tokenize
import collections
import os
import funcy
import microcache
from io import StringIO
try:
from urllib import parse as urlparse
except ImportError:
import urlparse
SimplifiedToken = collections.namedtuple('SimplifiedToken', ('typenum', 'value'))
@microcache.this
def value_of_named_argument_in_function(argument_name, function_name, search_str,
resolve_varname=False):
""" Parse an arbitrary block of python code to get the value of a named argument
from inside a function call
"""
try:
search_str = unicode(search_str)
except NameError:
pass
readline = StringIO(search_str).readline
try:
token_generator = tokenize.generate_tokens(readline)
tokens = [SimplifiedToken(toknum, tokval) for toknum, tokval, _, _, _ in token_generator]
except tokenize.TokenError as e:
raise ValueError('search_str is not parse-able python code: ' + str(e))
in_function = False
is_var = False
for i in range(len(tokens)):
if (
not in_function and
tokens[i].typenum == tokenize.NAME and tokens[i].value == function_name and
tokens[i+1].typenum == tokenize.OP and tokens[i+1].value == '('
):
in_function = True
continue
elif (
in_function and
tokens[i].typenum == tokenize.NAME and tokens[i].value == argument_name and
tokens[i+1].typenum == tokenize.OP and tokens[i+1].value == '='
):
# value is set to another variable which we are going to attempt to resolve
if resolve_varname and tokens[i+2].typenum == 1:
is_var = True
argument_name = tokens[i+2].value
break
# again, for a very specific usecase -- get the whole value and concatenate it
# this will match something like _version.__version__
j = 3
while True:
if tokens[i+j].value in (',', ')') or tokens[i+j].typenum == 58:
break
j += 1
return ''.join([t.value for t in tokens[i+2:i+j]]).strip()
# this is very dumb logic, and only works if the function argument is set to a variable
# which is set to a string value
if is_var:
for i in range(len(tokens)):
if (
tokens[i].typenum == tokenize.NAME and tokens[i].value == argument_name and
tokens[i+1].typenum == tokenize.OP and tokens[i+1].value == '='
):
return tokens[i+2].value.strip()
return None
@microcache.this
def get_file_content(file_path):
""" Load the content of a text file into a string """
with open(file_path) as f:
ret = f.read()
return ret
@microcache.this
def package_file_path(filename, package_name):
""" Convenience function to get the path to a package's version file
>>> package_file_path('mymodule.py', 'mypackage')
'mypackage/mymodule.py'
"""
return os.path.join(package_name, filename)
@microcache.this
def regex_in_file(regex, filepath, return_match=False):
""" Search for a regex in a file
If return_match is True, return the found object instead of a boolean
"""
file_content = get_file_content(filepath)
re_method = funcy.re_find if return_match else funcy.re_test
return re_method(regex, file_content)
@microcache.this
def regex_in_package_file(regex, filename, package_name, return_match=False):
""" Search for a regex in a file contained within the package directory
If return_match is True, return the found object instead of a boolean
"""
filepath = package_file_path(filename, package_name)
return regex_in_file(regex, filepath, return_match=return_match)
@microcache.this
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.