hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
74eb68637a5b38c8fbe62e1e9ca43b3cb96a9d13 | 6,030 | py | Python | bluesky_widgets/headless/figures.py | jklynch/bluesky-widgets | 238a77bb7c3a2bcebb890a892780021790c031f5 | [
"BSD-3-Clause"
] | null | null | null | bluesky_widgets/headless/figures.py | jklynch/bluesky-widgets | 238a77bb7c3a2bcebb890a892780021790c031f5 | [
"BSD-3-Clause"
] | null | null | null | bluesky_widgets/headless/figures.py | jklynch/bluesky-widgets | 238a77bb7c3a2bcebb890a892780021790c031f5 | [
"BSD-3-Clause"
] | null | null | null | import collections.abc
from pathlib import Path
import matplotlib
from ..models.plot_specs import Figure, FigureList
from .._matplotlib_axes import MatplotlibAxes
from ..utils.dict_view import DictView
class HeadlessFigures:
"""
A headless "view" for a FigureList model.
It does not produce a graphical user interface. Instead, it provides
methods for exporting figures as images.
Examples
--------
Export all the figures to a directory. They will be named by their title.
If there are duplciate titles, a counting number will appended like
x-1.png, x-2.png.
>>> headless = HeadlessFigures(model)
>>> headless.export_all("path/to/directory/")
Control the format.
>>> headless.export_all("path/to/directory/", format="png")
>>> headless.export_all("path/to/directory/", format="jpg")
"""
def __init__(self, model: FigureList):
self.model = model
# Map Figure UUID to widget with HeadlessFigure
self._figures = {}
for figure_spec in model:
self._add_figure(figure_spec)
model.events.added.connect(self._on_figure_added)
model.events.removed.connect(self._on_figure_removed)
@property
def figures(self):
"Read-only access to the mapping Figure UUID -> HeadlessFigure"
return DictView(self._figures)
def _on_figure_added(self, event):
figure_spec = event.item
self._add_figure(figure_spec)
def _add_figure(self, figure_spec):
"Create a new matplotlib Figure."
figure = HeadlessFigure(figure_spec)
self._figures[figure_spec.uuid] = figure
def _on_figure_removed(self, event):
"Remove the associated tab and close its canvas."
figure_spec = event.item
figure = self._figures[figure_spec.uuid]
figure.close_figure()
del self._figures[figure_spec.uuid]
def close_figures(self):
for figure in self._figures.values():
figure.close_figure()
close = close_figures
def export_all(self, directory, format="png", **kwargs):
"""
Export all figures.
Parameters
----------
directory : str | Path
format : str, optional
Default is "png".
**kwargs :
Passed through to matplotlib.figure.Figure.savefig
Returns
-------
filenames : List[String]
"""
# Avoid name collisions in the case of duplicate titles by appending
# "-1", "-2", "-3", ... to duplicates.
titles_tallied = {}
filenames = []
for figure_spec in self.model:
title = figure_spec.title
if title in titles_tallied:
filename = f"{title}-{titles_tallied[title]}"
titles_tallied[title] += 1
else:
filename = title
titles_tallied[title] = 1
filename = str(Path(directory, f"{filename}.{format}"))
figure = self._figures[figure_spec.uuid]
figure.export(filename, format=format, **kwargs)
filenames.append(filename)
return filenames
class HeadlessFigure:
"""
A Headless "view" for a Figure model. This always contains one Figure.
Examples
--------
Export the figure.
>>> headless = HeadlessFigure(model)
>>> headless.export("my-figure.png")
"""
def __init__(self, model: Figure):
self.model = model
self.figure, self.axes_list = _make_figure(model)
self.figure.suptitle(model.title)
self._axes = {}
for axes_spec, axes in zip(model.axes, self.axes_list):
self._axes[axes_spec.uuid] = MatplotlibAxes(model=axes_spec, axes=axes)
model.events.title.connect(self._on_title_changed)
# The Figure model does not currently allow axes to be added or
# removed, so we do not need to handle changes in model.axes.
@property
def axes(self):
"Read-only access to the mapping Axes UUID -> MatplotlibAxes"
return DictView(self._axes)
def _on_title_changed(self, event):
self.figure.suptitle(event.value)
def close_figure(self):
_close_figure(self.figure)
close = close_figure
def export(self, filename, format="png", **kwargs):
"""
Export figure.
Parameters
----------
filename : str | Path
format : str, optional
Default is "png".
**kwargs :
Passed through to matplotlib.figure.Figure.savefig
"""
self.figure.savefig(str(filename), format=format, **kwargs)
def _make_figure(figure_spec):
"Create a Figure and Axes."
matplotlib.use("Agg") # must set before importing matplotlib.pyplot
import matplotlib.pyplot as plt # noqa
# TODO Let Figure give different options to subplots here,
# but verify that number of axes created matches the number of axes
# specified.
fig, axes = plt.subplots(len(figure_spec.axes))
# Handl return type instability in plt.subplots.
if not isinstance(axes, collections.abc.Iterable):
axes = [axes]
return fig, axes
def _close_figure(figure):
"""
Workaround for matplotlib regression relating to closing figures in Agg
See https://github.com/matplotlib/matplotlib/pull/18184/
"""
# TODO It would be better to switch the approach based on matplotlib
# versions known to have this problem, rather than blindly trying. Update
# this once a fixed has been released and we know the earliest version of
# matplotlib that does not have this bug.
try:
figure.canvas.close()
except AttributeError:
from matplotlib._pylab_helpers import Gcf
num = next(
(
manager.num
for manager in Gcf.figs.values()
if manager.canvas.figure == figure
),
None,
)
if num is not None:
Gcf.destroy(num)
| 30.301508 | 83 | 0.625705 |
6b8b377e0ed2432b31bf8bb192f165325548dbfa | 1,014 | py | Python | test/test_v1_network_find_request.py | metal-stack/metal-python | cdf40fa86d2b2944f9818cef1c6723b1eecc506e | [
"MIT"
] | 7 | 2020-12-21T05:24:24.000Z | 2022-02-12T20:55:32.000Z | test/test_v1_network_find_request.py | metal-stack/metal-python | cdf40fa86d2b2944f9818cef1c6723b1eecc506e | [
"MIT"
] | 6 | 2020-09-16T07:23:34.000Z | 2022-01-18T12:05:30.000Z | test/test_v1_network_find_request.py | metal-stack/metal-python | cdf40fa86d2b2944f9818cef1c6723b1eecc506e | [
"MIT"
] | null | null | null | # coding: utf-8
"""
metal-api
API to manage and control plane resources like machines, switches, operating system images, machine sizes, networks, IP addresses and more # noqa: E501
OpenAPI spec version: v0.15.7
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import metal_python
from metal_python.models.v1_network_find_request import V1NetworkFindRequest # noqa: E501
from metal_python.rest import ApiException
class TestV1NetworkFindRequest(unittest.TestCase):
"""V1NetworkFindRequest unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testV1NetworkFindRequest(self):
"""Test V1NetworkFindRequest"""
# FIXME: construct object with mandatory attributes with example values
# model = metal_python.models.v1_network_find_request.V1NetworkFindRequest() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 24.731707 | 156 | 0.724852 |
376ae3c761fc2f69d37ea387853db5f275742ac3 | 10,879 | py | Python | pptx/oxml/__init__.py | jwoglom/python-pptx | ed748b0def32deba08bdc3d549092a26b764c28c | [
"MIT"
] | 1 | 2017-08-07T14:52:02.000Z | 2017-08-07T14:52:02.000Z | pptx/oxml/__init__.py | jwoglom/python-pptx | ed748b0def32deba08bdc3d549092a26b764c28c | [
"MIT"
] | 4 | 2021-03-18T20:28:17.000Z | 2022-03-11T23:18:51.000Z | pptx/oxml/__init__.py | jwoglom/python-pptx | ed748b0def32deba08bdc3d549092a26b764c28c | [
"MIT"
] | null | null | null | # encoding: utf-8
"""
Initializes lxml parser and makes available a handful of functions that wrap
its typical uses.
"""
from __future__ import absolute_import
from lxml import etree
from .ns import NamespacePrefixedTag
# configure etree XML parser -------------------------------
element_class_lookup = etree.ElementNamespaceClassLookup()
oxml_parser = etree.XMLParser(remove_blank_text=True)
oxml_parser.set_element_class_lookup(element_class_lookup)
def parse_xml(xml):
"""
Return root lxml element obtained by parsing XML character string in
*xml*, which can be either a Python 2.x string or unicode.
"""
root_element = etree.fromstring(xml, oxml_parser)
return root_element
def register_element_cls(nsptagname, cls):
"""
Register *cls* to be constructed when the oxml parser encounters an
element having name *nsptag_name*. *nsptag_name* is a string of the form
``nspfx:tagroot``, e.g. ``'w:document'``.
"""
nsptag = NamespacePrefixedTag(nsptagname)
namespace = element_class_lookup.get_namespace(nsptag.nsuri)
namespace[nsptag.local_part] = cls
from .action import CT_Hyperlink
register_element_cls('a:hlinkClick', CT_Hyperlink)
register_element_cls('a:hlinkHover', CT_Hyperlink)
from .chart.axis import (
CT_AxisUnit, CT_CatAx, CT_LblOffset, CT_Scaling, CT_TickLblPos,
CT_TickMark, CT_ValAx
)
register_element_cls('c:catAx', CT_CatAx)
register_element_cls('c:lblOffset', CT_LblOffset)
register_element_cls('c:majorTickMark', CT_TickMark)
register_element_cls('c:majorUnit', CT_AxisUnit)
register_element_cls('c:minorTickMark', CT_TickMark)
register_element_cls('c:minorUnit', CT_AxisUnit)
register_element_cls('c:scaling', CT_Scaling)
register_element_cls('c:tickLblPos', CT_TickLblPos)
register_element_cls('c:valAx', CT_ValAx)
from .chart.chart import (
CT_Chart, CT_ChartSpace, CT_ExternalData, CT_PlotArea, CT_Style
)
register_element_cls('c:chart', CT_Chart)
register_element_cls('c:chartSpace', CT_ChartSpace)
register_element_cls('c:externalData', CT_ExternalData)
register_element_cls('c:plotArea', CT_PlotArea)
register_element_cls('c:style', CT_Style)
from .chart.legend import CT_Legend, CT_LegendPos
register_element_cls('c:legend', CT_Legend)
register_element_cls('c:legendPos', CT_LegendPos)
from .chart.plot import (
CT_Area3DChart, CT_AreaChart, CT_BarChart, CT_BarDir, CT_DLblPos,
CT_DLbls, CT_GapAmount, CT_Grouping, CT_LineChart, CT_Overlap,
CT_PieChart
)
register_element_cls('c:area3DChart', CT_Area3DChart)
register_element_cls('c:areaChart', CT_AreaChart)
register_element_cls('c:barChart', CT_BarChart)
register_element_cls('c:barDir', CT_BarDir)
register_element_cls('c:dLblPos', CT_DLblPos)
register_element_cls('c:dLbls', CT_DLbls)
register_element_cls('c:gapWidth', CT_GapAmount)
register_element_cls('c:grouping', CT_Grouping)
register_element_cls('c:lineChart', CT_LineChart)
register_element_cls('c:overlap', CT_Overlap)
register_element_cls('c:pieChart', CT_PieChart)
from .chart.series import CT_SeriesComposite, CT_StrVal_NumVal_Composite
register_element_cls('c:pt', CT_StrVal_NumVal_Composite)
register_element_cls('c:ser', CT_SeriesComposite)
from .chart.shared import (
CT_Boolean, CT_Double, CT_Layout, CT_LayoutMode, CT_ManualLayout,
CT_NumFmt, CT_UnsignedInt
)
register_element_cls('c:autoUpdate', CT_Boolean)
register_element_cls('c:delete', CT_Boolean)
register_element_cls('c:idx', CT_UnsignedInt)
register_element_cls('c:invertIfNegative', CT_Boolean)
register_element_cls('c:layout', CT_Layout)
register_element_cls('c:manualLayout', CT_ManualLayout)
register_element_cls('c:max', CT_Double)
register_element_cls('c:min', CT_Double)
register_element_cls('c:numFmt', CT_NumFmt)
register_element_cls('c:order', CT_UnsignedInt)
register_element_cls('c:overlay', CT_Boolean)
register_element_cls('c:smooth', CT_Boolean)
register_element_cls('c:varyColors', CT_Boolean)
register_element_cls('c:x', CT_Double)
register_element_cls('c:xMode', CT_LayoutMode)
from .dml.color import (
CT_HslColor, CT_Percentage, CT_PresetColor, CT_SchemeColor,
CT_ScRgbColor, CT_SRgbColor, CT_SystemColor
)
register_element_cls('a:hslClr', CT_HslColor)
register_element_cls('a:lumMod', CT_Percentage)
register_element_cls('a:lumOff', CT_Percentage)
register_element_cls('a:prstClr', CT_PresetColor)
register_element_cls('a:schemeClr', CT_SchemeColor)
register_element_cls('a:scrgbClr', CT_ScRgbColor)
register_element_cls('a:srgbClr', CT_SRgbColor)
register_element_cls('a:sysClr', CT_SystemColor)
from .dml.fill import (
CT_Blip, CT_BlipFillProperties, CT_GradientFillProperties,
CT_GroupFillProperties, CT_NoFillProperties, CT_PatternFillProperties,
CT_RelativeRect, CT_SolidColorFillProperties
)
register_element_cls('a:blip', CT_Blip)
register_element_cls('a:blipFill', CT_BlipFillProperties)
register_element_cls('a:gradFill', CT_GradientFillProperties)
register_element_cls('a:grpFill', CT_GroupFillProperties)
register_element_cls('a:noFill', CT_NoFillProperties)
register_element_cls('a:pattFill', CT_PatternFillProperties)
register_element_cls('a:solidFill', CT_SolidColorFillProperties)
register_element_cls('a:srcRect', CT_RelativeRect)
from .parts.coreprops import CT_CoreProperties
register_element_cls('cp:coreProperties', CT_CoreProperties)
from .parts.presentation import (
CT_Presentation, CT_SlideId, CT_SlideIdList, CT_SlideMasterIdList,
CT_SlideMasterIdListEntry, CT_SlideSize
)
register_element_cls('p:presentation', CT_Presentation)
register_element_cls('p:sldId', CT_SlideId)
register_element_cls('p:sldIdLst', CT_SlideIdList)
register_element_cls('p:sldMasterId', CT_SlideMasterIdListEntry)
register_element_cls('p:sldMasterIdLst', CT_SlideMasterIdList)
register_element_cls('p:sldSz', CT_SlideSize)
from .parts.slide import CT_CommonSlideData, CT_Slide
register_element_cls('p:cSld', CT_CommonSlideData)
register_element_cls('p:sld', CT_Slide)
from .parts.slidelayout import CT_SlideLayout
register_element_cls('p:sldLayout', CT_SlideLayout)
from .parts.slidemaster import (
CT_SlideLayoutIdList, CT_SlideLayoutIdListEntry, CT_SlideMaster
)
register_element_cls('p:sldLayoutId', CT_SlideLayoutIdListEntry)
register_element_cls('p:sldLayoutIdLst', CT_SlideLayoutIdList)
register_element_cls('p:sldMaster', CT_SlideMaster)
from .shapes.autoshape import (
CT_GeomGuide, CT_GeomGuideList, CT_NonVisualDrawingShapeProps,
CT_PresetGeometry2D, CT_Shape, CT_ShapeNonVisual
)
register_element_cls('a:avLst', CT_GeomGuideList)
register_element_cls('a:gd', CT_GeomGuide)
register_element_cls('a:prstGeom', CT_PresetGeometry2D)
register_element_cls('p:cNvSpPr', CT_NonVisualDrawingShapeProps)
register_element_cls('p:nvSpPr', CT_ShapeNonVisual)
register_element_cls('p:sp', CT_Shape)
from .shapes.connector import CT_Connector, CT_ConnectorNonVisual
register_element_cls('p:cxnSp', CT_Connector)
register_element_cls('p:nvCxnSpPr', CT_ConnectorNonVisual)
from .shapes.graphfrm import (
CT_GraphicalObject, CT_GraphicalObjectData, CT_GraphicalObjectFrame,
CT_GraphicalObjectFrameNonVisual
)
register_element_cls('a:graphic', CT_GraphicalObject)
register_element_cls('a:graphicData', CT_GraphicalObjectData)
register_element_cls('p:graphicFrame', CT_GraphicalObjectFrame)
register_element_cls('p:nvGraphicFramePr', CT_GraphicalObjectFrameNonVisual)
from .shapes.groupshape import (
CT_GroupShape, CT_GroupShapeNonVisual, CT_GroupShapeProperties
)
register_element_cls('p:grpSp', CT_GroupShape)
register_element_cls('p:grpSpPr', CT_GroupShapeProperties)
register_element_cls('p:nvGrpSpPr', CT_GroupShapeNonVisual)
register_element_cls('p:spTree', CT_GroupShape)
from .shapes.picture import CT_Picture, CT_PictureNonVisual
register_element_cls('p:blipFill', CT_BlipFillProperties)
register_element_cls('p:nvPicPr', CT_PictureNonVisual)
register_element_cls('p:pic', CT_Picture)
from .shapes.shared import (
CT_ApplicationNonVisualDrawingProps, CT_LineProperties,
CT_NonVisualDrawingProps, CT_Placeholder, CT_Point2D, CT_PositiveSize2D,
CT_ShapeProperties, CT_Transform2D
)
register_element_cls('a:ext', CT_PositiveSize2D)
register_element_cls('a:ln', CT_LineProperties)
register_element_cls('a:off', CT_Point2D)
register_element_cls('a:xfrm', CT_Transform2D)
register_element_cls('c:spPr', CT_ShapeProperties)
register_element_cls('p:cNvPr', CT_NonVisualDrawingProps)
register_element_cls('p:nvPr', CT_ApplicationNonVisualDrawingProps)
register_element_cls('p:ph', CT_Placeholder)
register_element_cls('p:spPr', CT_ShapeProperties)
register_element_cls('p:xfrm', CT_Transform2D)
from .shapes.table import (
CT_Table, CT_TableCell, CT_TableCellProperties, CT_TableCol,
CT_TableGrid, CT_TableProperties, CT_TableRow
)
register_element_cls('a:gridCol', CT_TableCol)
register_element_cls('a:tbl', CT_Table)
register_element_cls('a:tblGrid', CT_TableGrid)
register_element_cls('a:tblPr', CT_TableProperties)
register_element_cls('a:tc', CT_TableCell)
register_element_cls('a:tcPr', CT_TableCellProperties)
register_element_cls('a:tr', CT_TableRow)
from .text import (
CT_RegularTextRun, CT_TextBody, CT_TextBodyProperties,
CT_TextCharacterProperties, CT_TextField, CT_TextFont, CT_TextLineBreak,
CT_TextNormalAutofit, CT_TextParagraph, CT_TextParagraphProperties,
CT_TextSpacing, CT_TextSpacingPercent, CT_TextSpacingPoint
)
register_element_cls('a:bodyPr', CT_TextBodyProperties)
register_element_cls('a:br', CT_TextLineBreak)
register_element_cls('a:defRPr', CT_TextCharacterProperties)
register_element_cls('a:endParaRPr', CT_TextCharacterProperties)
register_element_cls('a:fld', CT_TextField)
register_element_cls('a:latin', CT_TextFont)
register_element_cls('a:lnSpc', CT_TextSpacing)
register_element_cls('a:normAutofit', CT_TextNormalAutofit)
register_element_cls('a:r', CT_RegularTextRun)
register_element_cls('a:p', CT_TextParagraph)
register_element_cls('a:pPr', CT_TextParagraphProperties)
register_element_cls('a:rPr', CT_TextCharacterProperties)
register_element_cls('a:spcAft', CT_TextSpacing)
register_element_cls('a:spcBef', CT_TextSpacing)
register_element_cls('a:spcPct', CT_TextSpacingPercent)
register_element_cls('a:spcPts', CT_TextSpacingPoint)
register_element_cls('a:txBody', CT_TextBody)
register_element_cls('c:txPr', CT_TextBody)
register_element_cls('p:txBody', CT_TextBody)
| 38.992832 | 76 | 0.781598 |
5e76f31e24c073af0a8a1852ba8c20da76966644 | 1,243 | py | Python | 高频120Leetcode/67. Add Binary.py | lixiaoruiusa/Rui7272 | fbdb87104353138d3af7f3fe2cb3c0f00ff9e449 | [
"MIT"
] | null | null | null | 高频120Leetcode/67. Add Binary.py | lixiaoruiusa/Rui7272 | fbdb87104353138d3af7f3fe2cb3c0f00ff9e449 | [
"MIT"
] | null | null | null | 高频120Leetcode/67. Add Binary.py | lixiaoruiusa/Rui7272 | fbdb87104353138d3af7f3fe2cb3c0f00ff9e449 | [
"MIT"
] | null | null | null | class Solution:
def addBinary(self, a: str, b: str) -> str:
indexa = len(a) - 1
indexb = len(b) - 1
carry = 0
res = ""
while indexa >= 0 or indexb >= 0 or carry:
x = int(a[indexa]) if indexa >= 0 else 0
y = int(b[indexb]) if indexb >= 0 else 0
res = str((x + y + carry) % 2) + res
carry = (x + y + carry) // 2
indexa, indexb = indexa - 1, indexb - 1
return res
'''
def addBinary(self, a, b):
i, j, carry, res = len(a)-1, len(b)-1, 0, ""
while i >= 0 or j >= 0 or carry:
if i >= 0:
carry += int(a[i])
i -= 1
if j >= 0:
carry += int(b[j])
j -= 1
res = str(carry%2) + res
carry //= 2
return res
'''
'''
def addBinary(self, a, b):
i = len(a) - 1
j = len(b) - 1
carry = 0
res = ""
while i >=0 or j >=0 or carry:
if i >=0:
carry += int(a[i])
if j >=0:
carry += int(b[j])
res = str(carry % 2) + res
carry = carry // 2
i -= 1
j -= 1
return res
''' | 21.431034 | 52 | 0.367659 |
45b95dec1d9caacca53f9f1ec25bc3a99c877f21 | 1,709 | py | Python | scripts/fuzzing/lib/dictionary.py | EnderNightLord-ChromeBook/zircon-rpi | b09b1eb3aa7a127c65568229fe10edd251869283 | [
"BSD-2-Clause"
] | 14 | 2020-10-25T05:48:36.000Z | 2021-09-20T02:46:20.000Z | scripts/fuzzing/lib/dictionary.py | DamieFC/fuchsia | f78a4a1326f4a4bb5834500918756173c01bab4f | [
"BSD-2-Clause"
] | null | null | null | scripts/fuzzing/lib/dictionary.py | DamieFC/fuchsia | f78a4a1326f4a4bb5834500918756173c01bab4f | [
"BSD-2-Clause"
] | 2 | 2020-10-25T01:13:49.000Z | 2020-10-26T02:32:13.000Z | #!/usr/bin/env python
# Copyright 2020 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
class Dictionary(object):
"""Represents the dictionary of keywords for a fuzzer.
See https://llvm.org/docs/LibFuzzer.html#dictionaries for details on
fuzzer dictionaries. Note that libFuzzer supports at most one"-dict=DICT"
option when being run.
Attributes:
fuzzer: The Fuzzer corresponding to this object.
ns: Alias for fuzzer.ns.
nspath: Path to dictionary in the namespace.
"""
def __init__(self, fuzzer):
self._fuzzer = fuzzer
self._nspath = None
@property
def fuzzer(self):
"""The Fuzzer corresponding to this object."""
return self._fuzzer
@property
def host(self):
"""Alias for fuzzer.host."""
return self.fuzzer.host
@property
def ns(self):
"""Alias for fuzzer.ns."""
return self.fuzzer.ns
@property
def nspath(self):
"""Path to dictionary in the namespace."""
if not self._nspath:
self.find_on_device()
return self._nspath
def find_on_device(self):
resource = self.ns.resource(self.fuzzer.executable + '/dictionary')
if self.ns.ls(resource):
self._nspath = resource
def replace(self, pathname):
if not self.host.isfile(pathname):
self.host.error('No such file: {}'.format(pathname))
relpath = os.path.basename(pathname)
self._nspath = self.ns.data(relpath)
self.ns.store(self._nspath, pathname)
| 28.966102 | 80 | 0.624342 |
6594c29981b62ec3607669d04f2ac58702caafab | 264,686 | py | Python | nova/compute/api.py | karimull/nova | 9dcff4d4ed3e5ed5c0f58638c863562f4761495c | [
"Apache-2.0"
] | null | null | null | nova/compute/api.py | karimull/nova | 9dcff4d4ed3e5ed5c0f58638c863562f4761495c | [
"Apache-2.0"
] | null | null | null | nova/compute/api.py | karimull/nova | 9dcff4d4ed3e5ed5c0f58638c863562f4761495c | [
"Apache-2.0"
] | null | null | null | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Piston Cloud Computing, Inc.
# Copyright 2012-2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Handles all requests relating to compute resources (e.g. guest VMs,
networking and storage of VMs, and compute hosts on which they run)."""
import collections
import copy
import functools
import re
import string
from castellan import key_manager
from oslo_log import log as logging
from oslo_messaging import exceptions as oslo_exceptions
from oslo_serialization import base64 as base64utils
from oslo_serialization import jsonutils
from oslo_utils import excutils
from oslo_utils import strutils
from oslo_utils import timeutils
from oslo_utils import units
from oslo_utils import uuidutils
import six
from six.moves import range
from nova import availability_zones
from nova import block_device
from nova.cells import opts as cells_opts
from nova.compute import flavors
from nova.compute import instance_actions
from nova.compute import instance_list
from nova.compute import migration_list
from nova.compute import power_state
from nova.compute import rpcapi as compute_rpcapi
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute.utils import wrap_instance_event
from nova.compute import vm_states
from nova import conductor
import nova.conf
from nova.consoleauth import rpcapi as consoleauth_rpcapi
from nova import context as nova_context
from nova import crypto
from nova.db import base
from nova import exception
from nova import exception_wrapper
from nova import hooks
from nova.i18n import _
from nova import image
from nova import network
from nova.network import model as network_model
from nova.network.security_group import openstack_driver
from nova.network.security_group import security_group_base
from nova import objects
from nova.objects import base as obj_base
from nova.objects import block_device as block_device_obj
from nova.objects import fields as fields_obj
from nova.objects import keypair as keypair_obj
from nova.objects import quotas as quotas_obj
from nova.pci import request as pci_request
import nova.policy
from nova import profiler
from nova import rpc
from nova.scheduler import client as scheduler_client
from nova.scheduler import utils as scheduler_utils
from nova import servicegroup
from nova import utils
from nova.virt import hardware
from nova.volume import cinder
LOG = logging.getLogger(__name__)
get_notifier = functools.partial(rpc.get_notifier, service='compute')
# NOTE(gibi): legacy notification used compute as a service but these
# calls still run on the client side of the compute service which is
# nova-api. By setting the binary to nova-api below, we can make sure
# that the new versioned notifications has the right publisher_id but the
# legacy notifications does not change.
wrap_exception = functools.partial(exception_wrapper.wrap_exception,
get_notifier=get_notifier,
binary='nova-api')
CONF = nova.conf.CONF
RO_SECURITY_GROUPS = ['default']
AGGREGATE_ACTION_UPDATE = 'Update'
AGGREGATE_ACTION_UPDATE_META = 'UpdateMeta'
AGGREGATE_ACTION_DELETE = 'Delete'
AGGREGATE_ACTION_ADD = 'Add'
BFV_RESERVE_MIN_COMPUTE_VERSION = 17
CINDER_V3_ATTACH_MIN_COMPUTE_VERSION = 24
MIN_COMPUTE_MULTIATTACH = 27
# FIXME(danms): Keep a global cache of the cells we find the
# first time we look. This needs to be refreshed on a timer or
# trigger.
CELLS = []
def check_instance_state(vm_state=None, task_state=(None,),
must_have_launched=True):
"""Decorator to check VM and/or task state before entry to API functions.
If the instance is in the wrong state, or has not been successfully
started at least once the wrapper will raise an exception.
"""
if vm_state is not None and not isinstance(vm_state, set):
vm_state = set(vm_state)
if task_state is not None and not isinstance(task_state, set):
task_state = set(task_state)
def outer(f):
@six.wraps(f)
def inner(self, context, instance, *args, **kw):
if vm_state is not None and instance.vm_state not in vm_state:
raise exception.InstanceInvalidState(
attr='vm_state',
instance_uuid=instance.uuid,
state=instance.vm_state,
method=f.__name__)
if (task_state is not None and
instance.task_state not in task_state):
raise exception.InstanceInvalidState(
attr='task_state',
instance_uuid=instance.uuid,
state=instance.task_state,
method=f.__name__)
if must_have_launched and not instance.launched_at:
raise exception.InstanceInvalidState(
attr='launched_at',
instance_uuid=instance.uuid,
state=instance.launched_at,
method=f.__name__)
return f(self, context, instance, *args, **kw)
return inner
return outer
def _set_or_none(q):
return q if q is None or isinstance(q, set) else set(q)
def reject_instance_state(vm_state=None, task_state=None):
"""Decorator. Raise InstanceInvalidState if instance is in any of the
given states.
"""
vm_state = _set_or_none(vm_state)
task_state = _set_or_none(task_state)
def outer(f):
@six.wraps(f)
def inner(self, context, instance, *args, **kw):
_InstanceInvalidState = functools.partial(
exception.InstanceInvalidState,
instance_uuid=instance.uuid,
method=f.__name__)
if vm_state is not None and instance.vm_state in vm_state:
raise _InstanceInvalidState(
attr='vm_state', state=instance.vm_state)
if task_state is not None and instance.task_state in task_state:
raise _InstanceInvalidState(
attr='task_state', state=instance.task_state)
return f(self, context, instance, *args, **kw)
return inner
return outer
def check_instance_host(function):
@six.wraps(function)
def wrapped(self, context, instance, *args, **kwargs):
if not instance.host:
raise exception.InstanceNotReady(instance_id=instance.uuid)
return function(self, context, instance, *args, **kwargs)
return wrapped
def check_instance_lock(function):
@six.wraps(function)
def inner(self, context, instance, *args, **kwargs):
if instance.locked and not context.is_admin:
raise exception.InstanceIsLocked(instance_uuid=instance.uuid)
return function(self, context, instance, *args, **kwargs)
return inner
def check_instance_cell(fn):
@six.wraps(fn)
def _wrapped(self, context, instance, *args, **kwargs):
self._validate_cell(instance)
return fn(self, context, instance, *args, **kwargs)
return _wrapped
def _diff_dict(orig, new):
"""Return a dict describing how to change orig to new. The keys
correspond to values that have changed; the value will be a list
of one or two elements. The first element of the list will be
either '+' or '-', indicating whether the key was updated or
deleted; if the key was updated, the list will contain a second
element, giving the updated value.
"""
# Figure out what keys went away
result = {k: ['-'] for k in set(orig.keys()) - set(new.keys())}
# Compute the updates
for key, value in new.items():
if key not in orig or value != orig[key]:
result[key] = ['+', value]
return result
def load_cells():
global CELLS
if not CELLS:
CELLS = objects.CellMappingList.get_all(
nova_context.get_admin_context())
LOG.debug('Found %(count)i cells: %(cells)s',
dict(count=len(CELLS),
cells=','.join([c.identity for c in CELLS])))
if not CELLS:
LOG.error('No cells are configured, unable to continue')
@profiler.trace_cls("compute_api")
class API(base.Base):
"""API for interacting with the compute manager."""
def __init__(self, image_api=None, network_api=None, volume_api=None,
security_group_api=None, **kwargs):
self.image_api = image_api or image.API()
self.network_api = network_api or network.API()
self.volume_api = volume_api or cinder.API()
self.security_group_api = (security_group_api or
openstack_driver.get_openstack_security_group_driver())
self.consoleauth_rpcapi = consoleauth_rpcapi.ConsoleAuthAPI()
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
self.compute_task_api = conductor.ComputeTaskAPI()
self.servicegroup_api = servicegroup.API()
self.notifier = rpc.get_notifier('compute', CONF.host)
if CONF.ephemeral_storage_encryption.enabled:
self.key_manager = key_manager.API()
super(API, self).__init__(**kwargs)
@property
def cell_type(self):
try:
return getattr(self, '_cell_type')
except AttributeError:
self._cell_type = cells_opts.get_cell_type()
return self._cell_type
def _validate_cell(self, instance):
if self.cell_type != 'api':
return
cell_name = instance.cell_name
if not cell_name:
raise exception.InstanceUnknownCell(
instance_uuid=instance.uuid)
def _record_action_start(self, context, instance, action):
objects.InstanceAction.action_start(context, instance.uuid,
action, want_result=False)
def _check_injected_file_quota(self, context, injected_files):
"""Enforce quota limits on injected files.
Raises a QuotaError if any limit is exceeded.
"""
if injected_files is None:
return
# Check number of files first
try:
objects.Quotas.limit_check(context,
injected_files=len(injected_files))
except exception.OverQuota:
raise exception.OnsetFileLimitExceeded()
# OK, now count path and content lengths; we're looking for
# the max...
max_path = 0
max_content = 0
for path, content in injected_files:
max_path = max(max_path, len(path))
max_content = max(max_content, len(content))
try:
objects.Quotas.limit_check(context,
injected_file_path_bytes=max_path,
injected_file_content_bytes=max_content)
except exception.OverQuota as exc:
# Favor path limit over content limit for reporting
# purposes
if 'injected_file_path_bytes' in exc.kwargs['overs']:
raise exception.OnsetFilePathLimitExceeded(
allowed=exc.kwargs['quotas']['injected_file_path_bytes'])
else:
raise exception.OnsetFileContentLimitExceeded(
allowed=exc.kwargs['quotas']['injected_file_content_bytes'])
def _check_metadata_properties_quota(self, context, metadata=None):
"""Enforce quota limits on metadata properties."""
if not metadata:
metadata = {}
if not isinstance(metadata, dict):
msg = (_("Metadata type should be dict."))
raise exception.InvalidMetadata(reason=msg)
num_metadata = len(metadata)
try:
objects.Quotas.limit_check(context, metadata_items=num_metadata)
except exception.OverQuota as exc:
quota_metadata = exc.kwargs['quotas']['metadata_items']
raise exception.MetadataLimitExceeded(allowed=quota_metadata)
# Because metadata is stored in the DB, we hard-code the size limits
# In future, we may support more variable length strings, so we act
# as if this is quota-controlled for forwards compatibility.
# Those are only used in V2 API, from V2.1 API, those checks are
# validated at API layer schema validation.
for k, v in metadata.items():
try:
utils.check_string_length(v)
utils.check_string_length(k, min_length=1)
except exception.InvalidInput as e:
raise exception.InvalidMetadata(reason=e.format_message())
if len(k) > 255:
msg = _("Metadata property key greater than 255 characters")
raise exception.InvalidMetadataSize(reason=msg)
if len(v) > 255:
msg = _("Metadata property value greater than 255 characters")
raise exception.InvalidMetadataSize(reason=msg)
def _check_requested_secgroups(self, context, secgroups):
"""Check if the security group requested exists and belongs to
the project.
:param context: The nova request context.
:type context: nova.context.RequestContext
:param secgroups: list of requested security group names, or uuids in
the case of Neutron.
:type secgroups: list
:returns: list of requested security group names unmodified if using
nova-network. If using Neutron, the list returned is all uuids.
Note that 'default' is a special case and will be unmodified if
it's requested.
"""
security_groups = []
for secgroup in secgroups:
# NOTE(sdague): default is handled special
if secgroup == "default":
security_groups.append(secgroup)
continue
secgroup_dict = self.security_group_api.get(context, secgroup)
if not secgroup_dict:
raise exception.SecurityGroupNotFoundForProject(
project_id=context.project_id, security_group_id=secgroup)
# Check to see if it's a nova-network or neutron type.
if isinstance(secgroup_dict['id'], int):
# This is nova-network so just return the requested name.
security_groups.append(secgroup)
else:
# The id for neutron is a uuid, so we return the id (uuid).
security_groups.append(secgroup_dict['id'])
return security_groups
def _check_requested_networks(self, context, requested_networks,
max_count):
"""Check if the networks requested belongs to the project
and the fixed IP address for each network provided is within
same the network block
"""
if requested_networks is not None:
if requested_networks.no_allocate:
# If the network request was specifically 'none' meaning don't
# allocate any networks, we just return the number of requested
# instances since quotas don't change at all.
return max_count
# NOTE(danms): Temporary transition
requested_networks = requested_networks.as_tuples()
return self.network_api.validate_networks(context, requested_networks,
max_count)
def _handle_kernel_and_ramdisk(self, context, kernel_id, ramdisk_id,
image):
"""Choose kernel and ramdisk appropriate for the instance.
The kernel and ramdisk can be chosen in one of two ways:
1. Passed in with create-instance request.
2. Inherited from image metadata.
If inherited from image metadata, and if that image metadata value is
set to 'nokernel', both kernel and ramdisk will default to None.
"""
# Inherit from image if not specified
image_properties = image.get('properties', {})
if kernel_id is None:
kernel_id = image_properties.get('kernel_id')
if ramdisk_id is None:
ramdisk_id = image_properties.get('ramdisk_id')
# Force to None if kernel_id indicates that a kernel is not to be used
if kernel_id == 'nokernel':
kernel_id = None
ramdisk_id = None
# Verify kernel and ramdisk exist (fail-fast)
if kernel_id is not None:
kernel_image = self.image_api.get(context, kernel_id)
# kernel_id could have been a URI, not a UUID, so to keep behaviour
# from before, which leaked that implementation detail out to the
# caller, we return the image UUID of the kernel image and ramdisk
# image (below) and not any image URIs that might have been
# supplied.
# TODO(jaypipes): Get rid of this silliness once we move to a real
# Image object and hide all of that stuff within nova.image.api.
kernel_id = kernel_image['id']
if ramdisk_id is not None:
ramdisk_image = self.image_api.get(context, ramdisk_id)
ramdisk_id = ramdisk_image['id']
return kernel_id, ramdisk_id
@staticmethod
def parse_availability_zone(context, availability_zone):
# NOTE(vish): We have a legacy hack to allow admins to specify hosts
# via az using az:host:node. It might be nice to expose an
# api to specify specific hosts to force onto, but for
# now it just supports this legacy hack.
# NOTE(deva): It is also possible to specify az::node, in which case
# the host manager will determine the correct host.
forced_host = None
forced_node = None
if availability_zone and ':' in availability_zone:
c = availability_zone.count(':')
if c == 1:
availability_zone, forced_host = availability_zone.split(':')
elif c == 2:
if '::' in availability_zone:
availability_zone, forced_node = \
availability_zone.split('::')
else:
availability_zone, forced_host, forced_node = \
availability_zone.split(':')
else:
raise exception.InvalidInput(
reason="Unable to parse availability_zone")
if not availability_zone:
availability_zone = CONF.default_schedule_zone
return availability_zone, forced_host, forced_node
def _ensure_auto_disk_config_is_valid(self, auto_disk_config_img,
auto_disk_config, image):
auto_disk_config_disabled = \
utils.is_auto_disk_config_disabled(auto_disk_config_img)
if auto_disk_config_disabled and auto_disk_config:
raise exception.AutoDiskConfigDisabledByImage(image=image)
def _inherit_properties_from_image(self, image, auto_disk_config):
image_properties = image.get('properties', {})
auto_disk_config_img = \
utils.get_auto_disk_config_from_image_props(image_properties)
self._ensure_auto_disk_config_is_valid(auto_disk_config_img,
auto_disk_config,
image.get("id"))
if auto_disk_config is None:
auto_disk_config = strutils.bool_from_string(auto_disk_config_img)
return {
'os_type': image_properties.get('os_type'),
'architecture': image_properties.get('architecture'),
'vm_mode': image_properties.get('vm_mode'),
'auto_disk_config': auto_disk_config
}
def _new_instance_name_from_template(self, uuid, display_name, index):
params = {
'uuid': uuid,
'name': display_name,
'count': index + 1,
}
try:
new_name = (CONF.multi_instance_display_name_template %
params)
except (KeyError, TypeError):
LOG.exception('Failed to set instance name using '
'multi_instance_display_name_template.')
new_name = display_name
return new_name
def _apply_instance_name_template(self, context, instance, index):
original_name = instance.display_name
new_name = self._new_instance_name_from_template(instance.uuid,
instance.display_name, index)
instance.display_name = new_name
if not instance.get('hostname', None):
if utils.sanitize_hostname(original_name) == "":
instance.hostname = self._default_host_name(instance.uuid)
else:
instance.hostname = utils.sanitize_hostname(new_name)
return instance
def _check_config_drive(self, config_drive):
if config_drive:
try:
bool_val = strutils.bool_from_string(config_drive,
strict=True)
except ValueError:
raise exception.ConfigDriveInvalidValue(option=config_drive)
else:
bool_val = False
# FIXME(comstud): Bug ID 1193438 filed for this. This looks silly,
# but this is because the config drive column is a String. False
# is represented by using an empty string. And for whatever
# reason, we rely on the DB to cast True to a String.
return True if bool_val else ''
def _check_requested_image(self, context, image_id, image,
instance_type, root_bdm):
if not image:
return
if image['status'] != 'active':
raise exception.ImageNotActive(image_id=image_id)
image_properties = image.get('properties', {})
config_drive_option = image_properties.get(
'img_config_drive', 'optional')
if config_drive_option not in ['optional', 'mandatory']:
raise exception.InvalidImageConfigDrive(
config_drive=config_drive_option)
if instance_type['memory_mb'] < int(image.get('min_ram') or 0):
raise exception.FlavorMemoryTooSmall()
# Image min_disk is in gb, size is in bytes. For sanity, have them both
# in bytes.
image_min_disk = int(image.get('min_disk') or 0) * units.Gi
image_size = int(image.get('size') or 0)
# Target disk is a volume. Don't check flavor disk size because it
# doesn't make sense, and check min_disk against the volume size.
if (root_bdm is not None and root_bdm.is_volume):
# There are 2 possibilities here: either the target volume already
# exists, or it doesn't, in which case the bdm will contain the
# intended volume size.
#
# Cinder does its own check against min_disk, so if the target
# volume already exists this has already been done and we don't
# need to check it again here. In this case, volume_size may not be
# set on the bdm.
#
# If we're going to create the volume, the bdm will contain
# volume_size. Therefore we should check it if it exists. This will
# still be checked again by cinder when the volume is created, but
# that will not happen until the request reaches a host. By
# checking it here, the user gets an immediate and useful failure
# indication.
#
# The third possibility is that we have failed to consider
# something, and there are actually more than 2 possibilities. In
# this case cinder will still do the check at volume creation time.
# The behaviour will still be correct, but the user will not get an
# immediate failure from the api, and will instead have to
# determine why the instance is in an error state with a task of
# block_device_mapping.
#
# We could reasonably refactor this check into _validate_bdm at
# some future date, as the various size logic is already split out
# in there.
dest_size = root_bdm.volume_size
if dest_size is not None:
dest_size *= units.Gi
if image_min_disk > dest_size:
raise exception.VolumeSmallerThanMinDisk(
volume_size=dest_size, image_min_disk=image_min_disk)
# Target disk is a local disk whose size is taken from the flavor
else:
dest_size = instance_type['root_gb'] * units.Gi
# NOTE(johannes): root_gb is allowed to be 0 for legacy reasons
# since libvirt interpreted the value differently than other
# drivers. A value of 0 means don't check size.
if dest_size != 0:
if image_size > dest_size:
raise exception.FlavorDiskSmallerThanImage(
flavor_size=dest_size, image_size=image_size)
if image_min_disk > dest_size:
raise exception.FlavorDiskSmallerThanMinDisk(
flavor_size=dest_size, image_min_disk=image_min_disk)
def _get_image_defined_bdms(self, instance_type, image_meta,
root_device_name):
image_properties = image_meta.get('properties', {})
# Get the block device mappings defined by the image.
image_defined_bdms = image_properties.get('block_device_mapping', [])
legacy_image_defined = not image_properties.get('bdm_v2', False)
image_mapping = image_properties.get('mappings', [])
if legacy_image_defined:
image_defined_bdms = block_device.from_legacy_mapping(
image_defined_bdms, None, root_device_name)
else:
image_defined_bdms = list(map(block_device.BlockDeviceDict,
image_defined_bdms))
if image_mapping:
image_mapping = self._prepare_image_mapping(instance_type,
image_mapping)
image_defined_bdms = self._merge_bdms_lists(
image_mapping, image_defined_bdms)
return image_defined_bdms
def _get_flavor_defined_bdms(self, instance_type, block_device_mapping):
flavor_defined_bdms = []
have_ephemeral_bdms = any(filter(
block_device.new_format_is_ephemeral, block_device_mapping))
have_swap_bdms = any(filter(
block_device.new_format_is_swap, block_device_mapping))
if instance_type.get('ephemeral_gb') and not have_ephemeral_bdms:
flavor_defined_bdms.append(
block_device.create_blank_bdm(instance_type['ephemeral_gb']))
if instance_type.get('swap') and not have_swap_bdms:
flavor_defined_bdms.append(
block_device.create_blank_bdm(instance_type['swap'], 'swap'))
return flavor_defined_bdms
def _merge_bdms_lists(self, overridable_mappings, overrider_mappings):
"""Override any block devices from the first list by device name
:param overridable_mappings: list which items are overridden
:param overrider_mappings: list which items override
:returns: A merged list of bdms
"""
device_names = set(bdm['device_name'] for bdm in overrider_mappings
if bdm['device_name'])
return (overrider_mappings +
[bdm for bdm in overridable_mappings
if bdm['device_name'] not in device_names])
def _check_and_transform_bdm(self, context, base_options, instance_type,
image_meta, min_count, max_count,
block_device_mapping, legacy_bdm):
# NOTE (ndipanov): Assume root dev name is 'vda' if not supplied.
# It's needed for legacy conversion to work.
root_device_name = (base_options.get('root_device_name') or 'vda')
image_ref = base_options.get('image_ref', '')
# If the instance is booted by image and has a volume attached,
# the volume cannot have the same device name as root_device_name
if image_ref:
for bdm in block_device_mapping:
if (bdm.get('destination_type') == 'volume' and
block_device.strip_dev(bdm.get(
'device_name')) == root_device_name):
msg = _('The volume cannot be assigned the same device'
' name as the root device %s') % root_device_name
raise exception.InvalidRequest(msg)
image_defined_bdms = self._get_image_defined_bdms(
instance_type, image_meta, root_device_name)
root_in_image_bdms = (
block_device.get_root_bdm(image_defined_bdms) is not None)
if legacy_bdm:
block_device_mapping = block_device.from_legacy_mapping(
block_device_mapping, image_ref, root_device_name,
no_root=root_in_image_bdms)
elif root_in_image_bdms:
# NOTE (ndipanov): client will insert an image mapping into the v2
# block_device_mapping, but if there is a bootable device in image
# mappings - we need to get rid of the inserted image
# NOTE (gibi): another case is when a server is booted with an
# image to bdm mapping where the image only contains a bdm to a
# snapshot. In this case the other image to bdm mapping
# contains an unnecessary device with boot_index == 0.
# Also in this case the image_ref is None as we are booting from
# an image to volume bdm.
def not_image_and_root_bdm(bdm):
return not (bdm.get('boot_index') == 0 and
bdm.get('source_type') == 'image')
block_device_mapping = list(
filter(not_image_and_root_bdm, block_device_mapping))
block_device_mapping = self._merge_bdms_lists(
image_defined_bdms, block_device_mapping)
if min_count > 1 or max_count > 1:
if any(map(lambda bdm: bdm['source_type'] == 'volume',
block_device_mapping)):
msg = _('Cannot attach one or more volumes to multiple'
' instances')
raise exception.InvalidRequest(msg)
block_device_mapping += self._get_flavor_defined_bdms(
instance_type, block_device_mapping)
return block_device_obj.block_device_make_list_from_dicts(
context, block_device_mapping)
def _get_image(self, context, image_href):
if not image_href:
return None, {}
image = self.image_api.get(context, image_href)
return image['id'], image
def _checks_for_create_and_rebuild(self, context, image_id, image,
instance_type, metadata,
files_to_inject, root_bdm):
self._check_metadata_properties_quota(context, metadata)
self._check_injected_file_quota(context, files_to_inject)
self._check_requested_image(context, image_id, image,
instance_type, root_bdm)
def _validate_and_build_base_options(self, context, instance_type,
boot_meta, image_href, image_id,
kernel_id, ramdisk_id, display_name,
display_description, key_name,
key_data, security_groups,
availability_zone, user_data,
metadata, access_ip_v4, access_ip_v6,
requested_networks, config_drive,
auto_disk_config, reservation_id,
max_count):
"""Verify all the input parameters regardless of the provisioning
strategy being performed.
"""
if instance_type['disabled']:
raise exception.FlavorNotFound(flavor_id=instance_type['id'])
if user_data:
try:
base64utils.decode_as_bytes(user_data)
except TypeError:
raise exception.InstanceUserDataMalformed()
# When using Neutron, _check_requested_secgroups will translate and
# return any requested security group names to uuids.
security_groups = (
self._check_requested_secgroups(context, security_groups))
# Note: max_count is the number of instances requested by the user,
# max_network_count is the maximum number of instances taking into
# account any network quotas
max_network_count = self._check_requested_networks(context,
requested_networks, max_count)
kernel_id, ramdisk_id = self._handle_kernel_and_ramdisk(
context, kernel_id, ramdisk_id, boot_meta)
config_drive = self._check_config_drive(config_drive)
if key_data is None and key_name is not None:
key_pair = objects.KeyPair.get_by_name(context,
context.user_id,
key_name)
key_data = key_pair.public_key
else:
key_pair = None
root_device_name = block_device.prepend_dev(
block_device.properties_root_device_name(
boot_meta.get('properties', {})))
try:
image_meta = objects.ImageMeta.from_dict(boot_meta)
except ValueError as e:
# there must be invalid values in the image meta properties so
# consider this an invalid request
msg = _('Invalid image metadata. Error: %s') % six.text_type(e)
raise exception.InvalidRequest(msg)
numa_topology = hardware.numa_get_constraints(
instance_type, image_meta)
system_metadata = {}
# PCI requests come from two sources: instance flavor and
# requested_networks. The first call in below returns an
# InstancePCIRequests object which is a list of InstancePCIRequest
# objects. The second call in below creates an InstancePCIRequest
# object for each SR-IOV port, and append it to the list in the
# InstancePCIRequests object
pci_request_info = pci_request.get_pci_requests_from_flavor(
instance_type)
self.network_api.create_pci_requests_for_sriov_ports(context,
pci_request_info, requested_networks)
base_options = {
'reservation_id': reservation_id,
'image_ref': image_href,
'kernel_id': kernel_id or '',
'ramdisk_id': ramdisk_id or '',
'power_state': power_state.NOSTATE,
'vm_state': vm_states.BUILDING,
'config_drive': config_drive,
'user_id': context.user_id,
'project_id': context.project_id,
'instance_type_id': instance_type['id'],
'memory_mb': instance_type['memory_mb'],
'vcpus': instance_type['vcpus'],
'root_gb': instance_type['root_gb'],
'ephemeral_gb': instance_type['ephemeral_gb'],
'display_name': display_name,
'display_description': display_description,
'user_data': user_data,
'key_name': key_name,
'key_data': key_data,
'locked': False,
'metadata': metadata or {},
'access_ip_v4': access_ip_v4,
'access_ip_v6': access_ip_v6,
'availability_zone': availability_zone,
'root_device_name': root_device_name,
'progress': 0,
'pci_requests': pci_request_info,
'numa_topology': numa_topology,
'system_metadata': system_metadata}
options_from_image = self._inherit_properties_from_image(
boot_meta, auto_disk_config)
base_options.update(options_from_image)
# return the validated options and maximum number of instances allowed
# by the network quotas
return base_options, max_network_count, key_pair, security_groups
def _provision_instances(self, context, instance_type, min_count,
max_count, base_options, boot_meta, security_groups,
block_device_mapping, shutdown_terminate,
instance_group, check_server_group_quota, filter_properties,
key_pair, tags, supports_multiattach=False):
# Check quotas
num_instances = compute_utils.check_num_instances_quota(
context, instance_type, min_count, max_count)
security_groups = self.security_group_api.populate_security_groups(
security_groups)
self.security_group_api.ensure_default(context)
LOG.debug("Going to run %s instances...", num_instances)
instances_to_build = []
try:
for i in range(num_instances):
# Create a uuid for the instance so we can store the
# RequestSpec before the instance is created.
instance_uuid = uuidutils.generate_uuid()
# Store the RequestSpec that will be used for scheduling.
req_spec = objects.RequestSpec.from_components(context,
instance_uuid, boot_meta, instance_type,
base_options['numa_topology'],
base_options['pci_requests'], filter_properties,
instance_group, base_options['availability_zone'],
security_groups=security_groups)
# NOTE(danms): We need to record num_instances on the request
# spec as this is how the conductor knows how many were in this
# batch.
req_spec.num_instances = num_instances
req_spec.create()
# Create an instance object, but do not store in db yet.
instance = objects.Instance(context=context)
instance.uuid = instance_uuid
instance.update(base_options)
instance.keypairs = objects.KeyPairList(objects=[])
if key_pair:
instance.keypairs.objects.append(key_pair)
instance = self.create_db_entry_for_new_instance(context,
instance_type, boot_meta, instance, security_groups,
block_device_mapping, num_instances, i,
shutdown_terminate, create_instance=False)
block_device_mapping = (
self._bdm_validate_set_size_and_instance(context,
instance, instance_type, block_device_mapping,
supports_multiattach))
instance_tags = self._transform_tags(tags, instance.uuid)
build_request = objects.BuildRequest(context,
instance=instance, instance_uuid=instance.uuid,
project_id=instance.project_id,
block_device_mappings=block_device_mapping,
tags=instance_tags)
build_request.create()
# Create an instance_mapping. The null cell_mapping indicates
# that the instance doesn't yet exist in a cell, and lookups
# for it need to instead look for the RequestSpec.
# cell_mapping will be populated after scheduling, with a
# scheduling failure using the cell_mapping for the special
# cell0.
inst_mapping = objects.InstanceMapping(context=context)
inst_mapping.instance_uuid = instance_uuid
inst_mapping.project_id = context.project_id
inst_mapping.cell_mapping = None
inst_mapping.create()
instances_to_build.append(
(req_spec, build_request, inst_mapping))
if instance_group:
if check_server_group_quota:
try:
objects.Quotas.check_deltas(
context, {'server_group_members': 1},
instance_group, context.user_id)
except exception.OverQuota:
msg = _("Quota exceeded, too many servers in "
"group")
raise exception.QuotaError(msg)
members = objects.InstanceGroup.add_members(
context, instance_group.uuid, [instance.uuid])
# NOTE(melwitt): We recheck the quota after creating the
# object to prevent users from allocating more resources
# than their allowed quota in the event of a race. This is
# configurable because it can be expensive if strict quota
# limits are not required in a deployment.
if CONF.quota.recheck_quota and check_server_group_quota:
try:
objects.Quotas.check_deltas(
context, {'server_group_members': 0},
instance_group, context.user_id)
except exception.OverQuota:
objects.InstanceGroup._remove_members_in_db(
context, instance_group.id, [instance.uuid])
msg = _("Quota exceeded, too many servers in "
"group")
raise exception.QuotaError(msg)
# list of members added to servers group in this iteration
# is needed to check quota of server group during add next
# instance
instance_group.members.extend(members)
# In the case of any exceptions, attempt DB cleanup
except Exception:
with excutils.save_and_reraise_exception():
self._cleanup_build_artifacts(None, instances_to_build)
return instances_to_build
def _get_bdm_image_metadata(self, context, block_device_mapping,
legacy_bdm=True):
"""If we are booting from a volume, we need to get the
volume details from Cinder and make sure we pass the
metadata back accordingly.
"""
if not block_device_mapping:
return {}
for bdm in block_device_mapping:
if (legacy_bdm and
block_device.get_device_letter(
bdm.get('device_name', '')) != 'a'):
continue
elif not legacy_bdm and bdm.get('boot_index') != 0:
continue
volume_id = bdm.get('volume_id')
snapshot_id = bdm.get('snapshot_id')
if snapshot_id:
# NOTE(alaski): A volume snapshot inherits metadata from the
# originating volume, but the API does not expose metadata
# on the snapshot itself. So we query the volume for it below.
snapshot = self.volume_api.get_snapshot(context, snapshot_id)
volume_id = snapshot['volume_id']
if bdm.get('image_id'):
try:
image_id = bdm['image_id']
image_meta = self.image_api.get(context, image_id)
return image_meta
except Exception:
raise exception.InvalidBDMImage(id=image_id)
elif volume_id:
try:
volume = self.volume_api.get(context, volume_id)
except exception.CinderConnectionFailed:
raise
except Exception:
raise exception.InvalidBDMVolume(id=volume_id)
if not volume.get('bootable', True):
raise exception.InvalidBDMVolumeNotBootable(id=volume_id)
return utils.get_image_metadata_from_volume(volume)
return {}
@staticmethod
def _get_requested_instance_group(context, filter_properties):
if (not filter_properties or
not filter_properties.get('scheduler_hints')):
return
group_hint = filter_properties.get('scheduler_hints').get('group')
if not group_hint:
return
return objects.InstanceGroup.get_by_uuid(context, group_hint)
def _create_instance(self, context, instance_type,
image_href, kernel_id, ramdisk_id,
min_count, max_count,
display_name, display_description,
key_name, key_data, security_groups,
availability_zone, user_data, metadata, injected_files,
admin_password, access_ip_v4, access_ip_v6,
requested_networks, config_drive,
block_device_mapping, auto_disk_config, filter_properties,
reservation_id=None, legacy_bdm=True, shutdown_terminate=False,
check_server_group_quota=False, tags=None,
supports_multiattach=False):
"""Verify all the input parameters regardless of the provisioning
strategy being performed and schedule the instance(s) for
creation.
"""
# Normalize and setup some parameters
if reservation_id is None:
reservation_id = utils.generate_uid('r')
security_groups = security_groups or ['default']
min_count = min_count or 1
max_count = max_count or min_count
block_device_mapping = block_device_mapping or []
tags = tags or []
if image_href:
image_id, boot_meta = self._get_image(context, image_href)
else:
image_id = None
boot_meta = self._get_bdm_image_metadata(
context, block_device_mapping, legacy_bdm)
self._check_auto_disk_config(image=boot_meta,
auto_disk_config=auto_disk_config)
base_options, max_net_count, key_pair, security_groups = \
self._validate_and_build_base_options(
context, instance_type, boot_meta, image_href, image_id,
kernel_id, ramdisk_id, display_name, display_description,
key_name, key_data, security_groups, availability_zone,
user_data, metadata, access_ip_v4, access_ip_v6,
requested_networks, config_drive, auto_disk_config,
reservation_id, max_count)
# max_net_count is the maximum number of instances requested by the
# user adjusted for any network quota constraints, including
# consideration of connections to each requested network
if max_net_count < min_count:
raise exception.PortLimitExceeded()
elif max_net_count < max_count:
LOG.info("max count reduced from %(max_count)d to "
"%(max_net_count)d due to network port quota",
{'max_count': max_count,
'max_net_count': max_net_count})
max_count = max_net_count
block_device_mapping = self._check_and_transform_bdm(context,
base_options, instance_type, boot_meta, min_count, max_count,
block_device_mapping, legacy_bdm)
# We can't do this check earlier because we need bdms from all sources
# to have been merged in order to get the root bdm.
self._checks_for_create_and_rebuild(context, image_id, boot_meta,
instance_type, metadata, injected_files,
block_device_mapping.root_bdm())
instance_group = self._get_requested_instance_group(context,
filter_properties)
tags = self._create_tag_list_obj(context, tags)
instances_to_build = self._provision_instances(
context, instance_type, min_count, max_count, base_options,
boot_meta, security_groups, block_device_mapping,
shutdown_terminate, instance_group, check_server_group_quota,
filter_properties, key_pair, tags, supports_multiattach)
instances = []
request_specs = []
build_requests = []
for rs, build_request, im in instances_to_build:
build_requests.append(build_request)
instance = build_request.get_new_instance(context)
instances.append(instance)
request_specs.append(rs)
if CONF.cells.enable:
# NOTE(danms): CellsV1 can't do the new thing, so we
# do the old thing here. We can remove this path once
# we stop supporting v1.
for instance in instances:
instance.create()
# NOTE(melwitt): We recheck the quota after creating the objects
# to prevent users from allocating more resources than their
# allowed quota in the event of a race. This is configurable
# because it can be expensive if strict quota limits are not
# required in a deployment.
if CONF.quota.recheck_quota:
try:
compute_utils.check_num_instances_quota(
context, instance_type, 0, 0,
orig_num_req=len(instances))
except exception.TooManyInstances:
with excutils.save_and_reraise_exception():
# Need to clean up all the instances we created
# along with the build requests, request specs,
# and instance mappings.
self._cleanup_build_artifacts(instances,
instances_to_build)
self.compute_task_api.build_instances(context,
instances=instances, image=boot_meta,
filter_properties=filter_properties,
admin_password=admin_password,
injected_files=injected_files,
requested_networks=requested_networks,
security_groups=security_groups,
block_device_mapping=block_device_mapping,
legacy_bdm=False)
else:
self.compute_task_api.schedule_and_build_instances(
context,
build_requests=build_requests,
request_spec=request_specs,
image=boot_meta,
admin_password=admin_password,
injected_files=injected_files,
requested_networks=requested_networks,
block_device_mapping=block_device_mapping,
tags=tags)
return instances, reservation_id
@staticmethod
def _cleanup_build_artifacts(instances, instances_to_build):
# instances_to_build is a list of tuples:
# (RequestSpec, BuildRequest, InstanceMapping)
# Be paranoid about artifacts being deleted underneath us.
for instance in instances or []:
try:
instance.destroy()
except exception.InstanceNotFound:
pass
for rs, build_request, im in instances_to_build or []:
try:
rs.destroy()
except exception.RequestSpecNotFound:
pass
try:
build_request.destroy()
except exception.BuildRequestNotFound:
pass
try:
im.destroy()
except exception.InstanceMappingNotFound:
pass
@staticmethod
def _volume_size(instance_type, bdm):
size = bdm.get('volume_size')
# NOTE (ndipanov): inherit flavor size only for swap and ephemeral
if (size is None and bdm.get('source_type') == 'blank' and
bdm.get('destination_type') == 'local'):
if bdm.get('guest_format') == 'swap':
size = instance_type.get('swap', 0)
else:
size = instance_type.get('ephemeral_gb', 0)
return size
def _prepare_image_mapping(self, instance_type, mappings):
"""Extract and format blank devices from image mappings."""
prepared_mappings = []
for bdm in block_device.mappings_prepend_dev(mappings):
LOG.debug("Image bdm %s", bdm)
virtual_name = bdm['virtual']
if virtual_name == 'ami' or virtual_name == 'root':
continue
if not block_device.is_swap_or_ephemeral(virtual_name):
continue
guest_format = bdm.get('guest_format')
if virtual_name == 'swap':
guest_format = 'swap'
if not guest_format:
guest_format = CONF.default_ephemeral_format
values = block_device.BlockDeviceDict({
'device_name': bdm['device'],
'source_type': 'blank',
'destination_type': 'local',
'device_type': 'disk',
'guest_format': guest_format,
'delete_on_termination': True,
'boot_index': -1})
values['volume_size'] = self._volume_size(
instance_type, values)
if values['volume_size'] == 0:
continue
prepared_mappings.append(values)
return prepared_mappings
def _bdm_validate_set_size_and_instance(self, context, instance,
instance_type,
block_device_mapping,
supports_multiattach=False):
"""Ensure the bdms are valid, then set size and associate with instance
Because this method can be called multiple times when more than one
instance is booted in a single request it makes a copy of the bdm list.
"""
LOG.debug("block_device_mapping %s", list(block_device_mapping),
instance_uuid=instance.uuid)
self._validate_bdm(
context, instance, instance_type, block_device_mapping,
supports_multiattach)
instance_block_device_mapping = block_device_mapping.obj_clone()
for bdm in instance_block_device_mapping:
bdm.volume_size = self._volume_size(instance_type, bdm)
bdm.instance_uuid = instance.uuid
return instance_block_device_mapping
def _create_block_device_mapping(self, block_device_mapping):
# Copy the block_device_mapping because this method can be called
# multiple times when more than one instance is booted in a single
# request. This avoids 'id' being set and triggering the object dupe
# detection
db_block_device_mapping = copy.deepcopy(block_device_mapping)
# Create the BlockDeviceMapping objects in the db.
for bdm in db_block_device_mapping:
# TODO(alaski): Why is this done?
if bdm.volume_size == 0:
continue
bdm.update_or_create()
def _validate_bdm(self, context, instance, instance_type,
block_device_mappings, supports_multiattach=False):
# Make sure that the boot indexes make sense.
# Setting a negative value or None indicates that the device should not
# be used for booting.
boot_indexes = sorted([bdm.boot_index
for bdm in block_device_mappings
if bdm.boot_index is not None
and bdm.boot_index >= 0])
# Each device which is capable of being used as boot device should
# be given a unique boot index, starting from 0 in ascending order.
if any(i != v for i, v in enumerate(boot_indexes)):
# Convert the BlockDeviceMappingList to a list for repr details.
LOG.debug('Invalid block device mapping boot sequence for '
'instance: %s', list(block_device_mappings),
instance=instance)
raise exception.InvalidBDMBootSequence()
for bdm in block_device_mappings:
# NOTE(vish): For now, just make sure the volumes are accessible.
# Additionally, check that the volume can be attached to this
# instance.
snapshot_id = bdm.snapshot_id
volume_id = bdm.volume_id
image_id = bdm.image_id
if (image_id is not None and
image_id != instance.get('image_ref')):
try:
self._get_image(context, image_id)
except Exception:
raise exception.InvalidBDMImage(id=image_id)
if (bdm.source_type == 'image' and
bdm.destination_type == 'volume' and
not bdm.volume_size):
raise exception.InvalidBDM(message=_("Images with "
"destination_type 'volume' need to have a non-zero "
"size specified"))
elif volume_id is not None:
# The instance is being created and we don't know which
# cell it's going to land in, so check all cells.
min_compute_version = \
objects.service.get_minimum_version_all_cells(
context, ['nova-compute'])
try:
# NOTE(ildikov): The boot from volume operation did not
# reserve the volume before Pike and as the older computes
# are running 'check_attach' which will fail if the volume
# is in 'attaching' state; if the compute service version
# is not high enough we will just perform the old check as
# opposed to reserving the volume here.
volume = self.volume_api.get(context, volume_id)
if (min_compute_version >=
BFV_RESERVE_MIN_COMPUTE_VERSION):
self._check_attach_and_reserve_volume(
context, volume, instance, bdm,
supports_multiattach)
else:
# NOTE(ildikov): This call is here only for backward
# compatibility can be removed after Ocata EOL.
self._check_attach(context, volume, instance)
bdm.volume_size = volume.get('size')
# NOTE(mnaser): If we end up reserving the volume, it will
# not have an attachment_id which is needed
# for cleanups. This can be removed once
# all calls to reserve_volume are gone.
if 'attachment_id' not in bdm:
bdm.attachment_id = None
except (exception.CinderConnectionFailed,
exception.InvalidVolume,
exception.MultiattachNotSupportedOldMicroversion,
exception.MultiattachSupportNotYetAvailable):
raise
except exception.InvalidInput as exc:
raise exception.InvalidVolume(reason=exc.format_message())
except Exception:
raise exception.InvalidBDMVolume(id=volume_id)
elif snapshot_id is not None:
try:
snap = self.volume_api.get_snapshot(context, snapshot_id)
bdm.volume_size = bdm.volume_size or snap.get('size')
except exception.CinderConnectionFailed:
raise
except Exception:
raise exception.InvalidBDMSnapshot(id=snapshot_id)
elif (bdm.source_type == 'blank' and
bdm.destination_type == 'volume' and
not bdm.volume_size):
raise exception.InvalidBDM(message=_("Blank volumes "
"(source: 'blank', dest: 'volume') need to have non-zero "
"size"))
ephemeral_size = sum(bdm.volume_size or instance_type['ephemeral_gb']
for bdm in block_device_mappings
if block_device.new_format_is_ephemeral(bdm))
if ephemeral_size > instance_type['ephemeral_gb']:
raise exception.InvalidBDMEphemeralSize()
# There should be only one swap
swap_list = block_device.get_bdm_swap_list(block_device_mappings)
if len(swap_list) > 1:
msg = _("More than one swap drive requested.")
raise exception.InvalidBDMFormat(details=msg)
if swap_list:
swap_size = swap_list[0].volume_size or 0
if swap_size > instance_type['swap']:
raise exception.InvalidBDMSwapSize()
max_local = CONF.max_local_block_devices
if max_local >= 0:
num_local = len([bdm for bdm in block_device_mappings
if bdm.destination_type == 'local'])
if num_local > max_local:
raise exception.InvalidBDMLocalsLimit()
def _check_attach(self, context, volume, instance):
# TODO(ildikov): This check_attach code is kept only for backward
# compatibility and should be removed after Ocata EOL.
if volume['status'] != 'available':
msg = _("volume '%(vol)s' status must be 'available'. Currently "
"in '%(status)s'") % {'vol': volume['id'],
'status': volume['status']}
raise exception.InvalidVolume(reason=msg)
if volume['attach_status'] == 'attached':
msg = _("volume %s already attached") % volume['id']
raise exception.InvalidVolume(reason=msg)
self.volume_api.check_availability_zone(context, volume,
instance=instance)
def _populate_instance_names(self, instance, num_instances):
"""Populate instance display_name and hostname."""
display_name = instance.get('display_name')
if instance.obj_attr_is_set('hostname'):
hostname = instance.get('hostname')
else:
hostname = None
# NOTE(mriedem): This is only here for test simplicity since a server
# name is required in the REST API.
if display_name is None:
display_name = self._default_display_name(instance.uuid)
instance.display_name = display_name
if hostname is None and num_instances == 1:
# NOTE(russellb) In the multi-instance case, we're going to
# overwrite the display_name using the
# multi_instance_display_name_template. We need the default
# display_name set so that it can be used in the template, though.
# Only set the hostname here if we're only creating one instance.
# Otherwise, it will be built after the template based
# display_name.
hostname = display_name
default_hostname = self._default_host_name(instance.uuid)
instance.hostname = utils.sanitize_hostname(hostname,
default_hostname)
def _default_display_name(self, instance_uuid):
return "Server %s" % instance_uuid
def _default_host_name(self, instance_uuid):
return "Server-%s" % instance_uuid
def _populate_instance_for_create(self, context, instance, image,
index, security_groups, instance_type,
num_instances, shutdown_terminate):
"""Build the beginning of a new instance."""
instance.launch_index = index
instance.vm_state = vm_states.BUILDING
instance.task_state = task_states.SCHEDULING
info_cache = objects.InstanceInfoCache()
info_cache.instance_uuid = instance.uuid
info_cache.network_info = network_model.NetworkInfo()
instance.info_cache = info_cache
instance.flavor = instance_type
instance.old_flavor = None
instance.new_flavor = None
if CONF.ephemeral_storage_encryption.enabled:
# NOTE(kfarr): dm-crypt expects the cipher in a
# hyphenated format: cipher-chainmode-ivmode
# (ex: aes-xts-plain64). The algorithm needs
# to be parsed out to pass to the key manager (ex: aes).
cipher = CONF.ephemeral_storage_encryption.cipher
algorithm = cipher.split('-')[0] if cipher else None
instance.ephemeral_key_uuid = self.key_manager.create_key(
context,
algorithm=algorithm,
length=CONF.ephemeral_storage_encryption.key_size)
else:
instance.ephemeral_key_uuid = None
# Store image properties so we can use them later
# (for notifications, etc). Only store what we can.
if not instance.obj_attr_is_set('system_metadata'):
instance.system_metadata = {}
# Make sure we have the dict form that we need for instance_update.
instance.system_metadata = utils.instance_sys_meta(instance)
system_meta = utils.get_system_metadata_from_image(
image, instance_type)
# In case we couldn't find any suitable base_image
system_meta.setdefault('image_base_image_ref', instance.image_ref)
system_meta['owner_user_name'] = context.user_name
system_meta['owner_project_name'] = context.project_name
instance.system_metadata.update(system_meta)
if CONF.use_neutron:
# For Neutron we don't actually store anything in the database, we
# proxy the security groups on the instance from the ports
# attached to the instance.
instance.security_groups = objects.SecurityGroupList()
else:
instance.security_groups = security_groups
self._populate_instance_names(instance, num_instances)
instance.shutdown_terminate = shutdown_terminate
if num_instances > 1 and self.cell_type != 'api':
instance = self._apply_instance_name_template(context, instance,
index)
return instance
def _create_tag_list_obj(self, context, tags):
"""Create TagList objects from simple string tags.
:param context: security context.
:param tags: simple string tags from API request.
:returns: TagList object.
"""
tag_list = [objects.Tag(context=context, tag=t) for t in tags]
tag_list_obj = objects.TagList(objects=tag_list)
return tag_list_obj
def _transform_tags(self, tags, resource_id):
"""Change the resource_id of the tags according to the input param.
Because this method can be called multiple times when more than one
instance is booted in a single request it makes a copy of the tags
list.
:param tags: TagList object.
:param resource_id: string.
:returns: TagList object.
"""
instance_tags = tags.obj_clone()
for tag in instance_tags:
tag.resource_id = resource_id
return instance_tags
# This method remains because cellsv1 uses it in the scheduler
def create_db_entry_for_new_instance(self, context, instance_type, image,
instance, security_group, block_device_mapping, num_instances,
index, shutdown_terminate=False, create_instance=True):
"""Create an entry in the DB for this new instance,
including any related table updates (such as security group,
etc).
This is called by the scheduler after a location for the
instance has been determined.
:param create_instance: Determines if the instance is created here or
just populated for later creation. This is done so that this code
can be shared with cellsv1 which needs the instance creation to
happen here. It should be removed and this method cleaned up when
cellsv1 is a distant memory.
"""
self._populate_instance_for_create(context, instance, image, index,
security_group, instance_type,
num_instances, shutdown_terminate)
if create_instance:
instance.create()
return instance
def _check_multiple_instances_with_neutron_ports(self,
requested_networks):
"""Check whether multiple instances are created from port id(s)."""
for requested_net in requested_networks:
if requested_net.port_id:
msg = _("Unable to launch multiple instances with"
" a single configured port ID. Please launch your"
" instance one by one with different ports.")
raise exception.MultiplePortsNotApplicable(reason=msg)
def _check_multiple_instances_with_specified_ip(self, requested_networks):
"""Check whether multiple instances are created with specified ip."""
for requested_net in requested_networks:
if requested_net.network_id and requested_net.address:
msg = _("max_count cannot be greater than 1 if an fixed_ip "
"is specified.")
raise exception.InvalidFixedIpAndMaxCountRequest(reason=msg)
@hooks.add_hook("create_instance")
def create(self, context, instance_type,
image_href, kernel_id=None, ramdisk_id=None,
min_count=None, max_count=None,
display_name=None, display_description=None,
key_name=None, key_data=None, security_groups=None,
availability_zone=None, forced_host=None, forced_node=None,
user_data=None, metadata=None, injected_files=None,
admin_password=None, block_device_mapping=None,
access_ip_v4=None, access_ip_v6=None, requested_networks=None,
config_drive=None, auto_disk_config=None, scheduler_hints=None,
legacy_bdm=True, shutdown_terminate=False,
check_server_group_quota=False, tags=None,
supports_multiattach=False):
"""Provision instances, sending instance information to the
scheduler. The scheduler will determine where the instance(s)
go and will handle creating the DB entries.
Returns a tuple of (instances, reservation_id)
"""
if requested_networks and max_count is not None and max_count > 1:
self._check_multiple_instances_with_specified_ip(
requested_networks)
if utils.is_neutron():
self._check_multiple_instances_with_neutron_ports(
requested_networks)
if availability_zone:
available_zones = availability_zones.\
get_availability_zones(context.elevated(), True)
if forced_host is None and availability_zone not in \
available_zones:
msg = _('The requested availability zone is not available')
raise exception.InvalidRequest(msg)
filter_properties = scheduler_utils.build_filter_properties(
scheduler_hints, forced_host, forced_node, instance_type)
return self._create_instance(
context, instance_type,
image_href, kernel_id, ramdisk_id,
min_count, max_count,
display_name, display_description,
key_name, key_data, security_groups,
availability_zone, user_data, metadata,
injected_files, admin_password,
access_ip_v4, access_ip_v6,
requested_networks, config_drive,
block_device_mapping, auto_disk_config,
filter_properties=filter_properties,
legacy_bdm=legacy_bdm,
shutdown_terminate=shutdown_terminate,
check_server_group_quota=check_server_group_quota,
tags=tags, supports_multiattach=supports_multiattach)
def _check_auto_disk_config(self, instance=None, image=None,
**extra_instance_updates):
auto_disk_config = extra_instance_updates.get("auto_disk_config")
if auto_disk_config is None:
return
if not image and not instance:
return
if image:
image_props = image.get("properties", {})
auto_disk_config_img = \
utils.get_auto_disk_config_from_image_props(image_props)
image_ref = image.get("id")
else:
sys_meta = utils.instance_sys_meta(instance)
image_ref = sys_meta.get('image_base_image_ref')
auto_disk_config_img = \
utils.get_auto_disk_config_from_instance(sys_meta=sys_meta)
self._ensure_auto_disk_config_is_valid(auto_disk_config_img,
auto_disk_config,
image_ref)
def _lookup_instance(self, context, uuid):
'''Helper method for pulling an instance object from a database.
During the transition to cellsv2 there is some complexity around
retrieving an instance from the database which this method hides. If
there is an instance mapping then query the cell for the instance, if
no mapping exists then query the configured nova database.
Once we are past the point that all deployments can be assumed to be
migrated to cellsv2 this method can go away.
'''
inst_map = None
try:
inst_map = objects.InstanceMapping.get_by_instance_uuid(
context, uuid)
except exception.InstanceMappingNotFound:
# TODO(alaski): This exception block can be removed once we're
# guaranteed everyone is using cellsv2.
pass
if (inst_map is None or inst_map.cell_mapping is None or
CONF.cells.enable):
# If inst_map is None then the deployment has not migrated to
# cellsv2 yet.
# If inst_map.cell_mapping is None then the instance is not in a
# cell yet. Until instance creation moves to the conductor the
# instance can be found in the configured database, so attempt
# to look it up.
# If we're on cellsv1, we can't yet short-circuit the cells
# messaging path
cell = None
try:
instance = objects.Instance.get_by_uuid(context, uuid)
except exception.InstanceNotFound:
# If we get here then the conductor is in charge of writing the
# instance to the database and hasn't done that yet. It's up to
# the caller of this method to determine what to do with that
# information.
return None, None
else:
cell = inst_map.cell_mapping
with nova_context.target_cell(context, cell) as cctxt:
try:
instance = objects.Instance.get_by_uuid(cctxt, uuid)
except exception.InstanceNotFound:
# Since the cell_mapping exists we know the instance is in
# the cell, however InstanceNotFound means it's already
# deleted.
return None, None
return cell, instance
def _delete_while_booting(self, context, instance):
"""Handle deletion if the instance has not reached a cell yet
Deletion before an instance reaches a cell needs to be handled
differently. What we're attempting to do is delete the BuildRequest
before the api level conductor does. If we succeed here then the boot
request stops before reaching a cell. If not then the instance will
need to be looked up in a cell db and the normal delete path taken.
"""
deleted = self._attempt_delete_of_buildrequest(context, instance)
# After service version 15 deletion of the BuildRequest will halt the
# build process in the conductor. In that case run the rest of this
# method and consider the instance deleted. If we have not yet reached
# service version 15 then just return False so the rest of the delete
# process will proceed usually.
service_version = objects.Service.get_minimum_version(
context, 'nova-osapi_compute')
if service_version < 15:
return False
if deleted:
# If we've reached this block the successful deletion of the
# buildrequest indicates that the build process should be halted by
# the conductor.
# NOTE(alaski): Though the conductor halts the build process it
# does not currently delete the instance record. This is
# because in the near future the instance record will not be
# created if the buildrequest has been deleted here. For now we
# ensure the instance has been set to deleted at this point.
# Yes this directly contradicts the comment earlier in this
# method, but this is a temporary measure.
# Look up the instance because the current instance object was
# stashed on the buildrequest and therefore not complete enough
# to run .destroy().
try:
instance_uuid = instance.uuid
cell, instance = self._lookup_instance(context, instance_uuid)
if instance is not None:
# If instance is None it has already been deleted.
if cell:
with nova_context.target_cell(context, cell) as cctxt:
# FIXME: When the instance context is targeted,
# we can remove this
with compute_utils.notify_about_instance_delete(
self.notifier, cctxt, instance):
instance.destroy()
else:
instance.destroy()
except exception.InstanceNotFound:
pass
return True
return False
def _attempt_delete_of_buildrequest(self, context, instance):
# If there is a BuildRequest then the instance may not have been
# written to a cell db yet. Delete the BuildRequest here, which
# will indicate that the Instance build should not proceed.
try:
build_req = objects.BuildRequest.get_by_instance_uuid(
context, instance.uuid)
build_req.destroy()
except exception.BuildRequestNotFound:
# This means that conductor has deleted the BuildRequest so the
# instance is now in a cell and the delete needs to proceed
# normally.
return False
# We need to detach from any volumes so they aren't orphaned.
self._local_cleanup_bdm_volumes(
build_req.block_device_mappings, instance, context)
return True
def _delete(self, context, instance, delete_type, cb, **instance_attrs):
if instance.disable_terminate:
LOG.info('instance termination disabled', instance=instance)
return
cell = None
# If there is an instance.host (or the instance is shelved-offloaded or
# in error state), the instance has been scheduled and sent to a
# cell/compute which means it was pulled from the cell db.
# Normal delete should be attempted.
may_have_ports_or_volumes = self._may_have_ports_or_volumes(instance)
if not instance.host and not may_have_ports_or_volumes:
try:
if self._delete_while_booting(context, instance):
return
# If instance.host was not set it's possible that the Instance
# object here was pulled from a BuildRequest object and is not
# fully populated. Notably it will be missing an 'id' field
# which will prevent instance.destroy from functioning
# properly. A lookup is attempted which will either return a
# full Instance or None if not found. If not found then it's
# acceptable to skip the rest of the delete processing.
cell, instance = self._lookup_instance(context, instance.uuid)
if cell and instance:
try:
# Now destroy the instance from the cell it lives in.
with compute_utils.notify_about_instance_delete(
self.notifier, context, instance):
instance.destroy()
except exception.InstanceNotFound:
pass
# The instance was deleted or is already gone.
return
if not instance:
# Instance is already deleted.
return
except exception.ObjectActionError:
# NOTE(melwitt): This means the instance.host changed
# under us indicating the instance became scheduled
# during the destroy(). Refresh the instance from the DB and
# continue on with the delete logic for a scheduled instance.
# NOTE(danms): If instance.host is set, we should be able to
# do the following lookup. If not, there's not much we can
# do to recover.
cell, instance = self._lookup_instance(context, instance.uuid)
if not instance:
# Instance is already deleted
return
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
# At these states an instance has a snapshot associate.
if instance.vm_state in (vm_states.SHELVED,
vm_states.SHELVED_OFFLOADED):
snapshot_id = instance.system_metadata.get('shelved_image_id')
LOG.info("Working on deleting snapshot %s "
"from shelved instance...",
snapshot_id, instance=instance)
try:
self.image_api.delete(context, snapshot_id)
except (exception.ImageNotFound,
exception.ImageNotAuthorized) as exc:
LOG.warning("Failed to delete snapshot "
"from shelved instance (%s).",
exc.format_message(), instance=instance)
except Exception:
LOG.exception("Something wrong happened when trying to "
"delete snapshot from shelved instance.",
instance=instance)
original_task_state = instance.task_state
try:
# NOTE(maoy): no expected_task_state needs to be set
instance.update(instance_attrs)
instance.progress = 0
instance.save()
# NOTE(dtp): cells.enable = False means "use cells v2".
# Run everywhere except v1 compute cells.
if not CONF.cells.enable or self.cell_type == 'api':
self.consoleauth_rpcapi.delete_tokens_for_instance(
context, instance.uuid)
if self.cell_type == 'api':
# NOTE(comstud): If we're in the API cell, we need to
# skip all remaining logic and just call the callback,
# which will cause a cast to the child cell.
cb(context, instance, bdms)
return
if not instance.host and not may_have_ports_or_volumes:
try:
with compute_utils.notify_about_instance_delete(
self.notifier, context, instance,
delete_type
if delete_type != 'soft_delete'
else 'delete'):
instance.destroy()
LOG.info('Instance deleted and does not have host '
'field, its vm_state is %(state)s.',
{'state': instance.vm_state},
instance=instance)
return
except exception.ObjectActionError as ex:
# The instance's host likely changed under us as
# this instance could be building and has since been
# scheduled. Continue with attempts to delete it.
LOG.debug('Refreshing instance because: %s', ex,
instance=instance)
instance.refresh()
if instance.vm_state == vm_states.RESIZED:
self._confirm_resize_on_deleting(context, instance)
is_local_delete = True
try:
# instance.host must be set in order to look up the service.
if instance.host is not None:
service = objects.Service.get_by_compute_host(
context.elevated(), instance.host)
is_local_delete = not self.servicegroup_api.service_is_up(
service)
if not is_local_delete:
if original_task_state in (task_states.DELETING,
task_states.SOFT_DELETING):
LOG.info('Instance is already in deleting state, '
'ignoring this request',
instance=instance)
return
self._record_action_start(context, instance,
instance_actions.DELETE)
cb(context, instance, bdms)
except exception.ComputeHostNotFound:
LOG.debug('Compute host %s not found during service up check, '
'going to local delete instance', instance.host,
instance=instance)
if is_local_delete:
# If instance is in shelved_offloaded state or compute node
# isn't up, delete instance from db and clean bdms info and
# network info
if cell is None:
# NOTE(danms): If we didn't get our cell from one of the
# paths above, look it up now.
try:
im = objects.InstanceMapping.get_by_instance_uuid(
context, instance.uuid)
cell = im.cell_mapping
except exception.InstanceMappingNotFound:
LOG.warning('During local delete, failed to find '
'instance mapping', instance=instance)
return
LOG.debug('Doing local delete in cell %s', cell.identity,
instance=instance)
with nova_context.target_cell(context, cell) as cctxt:
self._local_delete(cctxt, instance, bdms, delete_type, cb)
except exception.InstanceNotFound:
# NOTE(comstud): Race condition. Instance already gone.
pass
def _may_have_ports_or_volumes(self, instance):
# NOTE(melwitt): When an instance build fails in the compute manager,
# the instance host and node are set to None and the vm_state is set
# to ERROR. In the case, the instance with host = None has actually
# been scheduled and may have ports and/or volumes allocated on the
# compute node.
if instance.vm_state in (vm_states.SHELVED_OFFLOADED, vm_states.ERROR):
return True
return False
def _confirm_resize_on_deleting(self, context, instance):
# If in the middle of a resize, use confirm_resize to
# ensure the original instance is cleaned up too
migration = None
for status in ('finished', 'confirming'):
try:
migration = objects.Migration.get_by_instance_and_status(
context.elevated(), instance.uuid, status)
LOG.info('Found an unconfirmed migration during delete, '
'id: %(id)s, status: %(status)s',
{'id': migration.id,
'status': migration.status},
instance=instance)
break
except exception.MigrationNotFoundByStatus:
pass
if not migration:
LOG.info('Instance may have been confirmed during delete',
instance=instance)
return
src_host = migration.source_compute
self._record_action_start(context, instance,
instance_actions.CONFIRM_RESIZE)
self.compute_rpcapi.confirm_resize(context,
instance, migration, src_host, cast=False)
def _get_stashed_volume_connector(self, bdm, instance):
"""Lookup a connector dict from the bdm.connection_info if set
Gets the stashed connector dict out of the bdm.connection_info if set
and the connector host matches the instance host.
:param bdm: nova.objects.block_device.BlockDeviceMapping
:param instance: nova.objects.instance.Instance
:returns: volume connector dict or None
"""
if 'connection_info' in bdm and bdm.connection_info is not None:
# NOTE(mriedem): We didn't start stashing the connector in the
# bdm.connection_info until Mitaka so it might not be there on old
# attachments. Also, if the volume was attached when the instance
# was in shelved_offloaded state and it hasn't been unshelved yet
# we don't have the attachment/connection information either.
connector = jsonutils.loads(bdm.connection_info).get('connector')
if connector:
if connector.get('host') == instance.host:
return connector
LOG.debug('Found stashed volume connector for instance but '
'connector host %(connector_host)s does not match '
'the instance host %(instance_host)s.',
{'connector_host': connector.get('host'),
'instance_host': instance.host}, instance=instance)
if (instance.host is None and
self._may_have_ports_or_volumes(instance)):
LOG.debug('Allowing use of stashed volume connector with '
'instance host None because instance with '
'vm_state %(vm_state)s has been scheduled in '
'the past.', {'vm_state': instance.vm_state},
instance=instance)
return connector
def _local_cleanup_bdm_volumes(self, bdms, instance, context):
"""The method deletes the bdm records and, if a bdm is a volume, call
the terminate connection and the detach volume via the Volume API.
"""
elevated = context.elevated()
for bdm in bdms:
if bdm.is_volume:
try:
if bdm.attachment_id:
self.volume_api.attachment_delete(context,
bdm.attachment_id)
else:
connector = self._get_stashed_volume_connector(
bdm, instance)
if connector:
self.volume_api.terminate_connection(context,
bdm.volume_id,
connector)
else:
LOG.debug('Unable to find connector for volume %s,'
' not attempting terminate_connection.',
bdm.volume_id, instance=instance)
# Attempt to detach the volume. If there was no
# connection made in the first place this is just
# cleaning up the volume state in the Cinder DB.
self.volume_api.detach(elevated, bdm.volume_id,
instance.uuid)
if bdm.delete_on_termination:
self.volume_api.delete(context, bdm.volume_id)
except Exception as exc:
LOG.warning("Ignoring volume cleanup failure due to %s",
exc, instance=instance)
# If we're cleaning up volumes from an instance that wasn't yet
# created in a cell, i.e. the user deleted the server while
# the BuildRequest still existed, then the BDM doesn't actually
# exist in the DB to destroy it.
if 'id' in bdm:
bdm.destroy()
def _local_delete(self, context, instance, bdms, delete_type, cb):
if instance.vm_state == vm_states.SHELVED_OFFLOADED:
LOG.info("instance is in SHELVED_OFFLOADED state, cleanup"
" the instance's info from database.",
instance=instance)
else:
LOG.warning("instance's host %s is down, deleting from "
"database", instance.host, instance=instance)
with compute_utils.notify_about_instance_delete(
self.notifier, context, instance,
delete_type if delete_type != 'soft_delete' else 'delete'):
elevated = context.elevated()
if self.cell_type != 'api':
# NOTE(liusheng): In nova-network multi_host scenario,deleting
# network info of the instance may need instance['host'] as
# destination host of RPC call. If instance in
# SHELVED_OFFLOADED state, instance['host'] is None, here, use
# shelved_host as host to deallocate network info and reset
# instance['host'] after that. Here we shouldn't use
# instance.save(), because this will mislead user who may think
# the instance's host has been changed, and actually, the
# instance.host is always None.
orig_host = instance.host
try:
if instance.vm_state == vm_states.SHELVED_OFFLOADED:
sysmeta = getattr(instance,
obj_base.get_attrname(
'system_metadata'))
instance.host = sysmeta.get('shelved_host')
self.network_api.deallocate_for_instance(elevated,
instance)
finally:
instance.host = orig_host
# cleanup volumes
self._local_cleanup_bdm_volumes(bdms, instance, context)
cb(context, instance, bdms, local=True)
instance.destroy()
def _do_delete(self, context, instance, bdms, local=False):
if local:
instance.vm_state = vm_states.DELETED
instance.task_state = None
instance.terminated_at = timeutils.utcnow()
instance.save()
else:
self.compute_rpcapi.terminate_instance(context, instance, bdms,
delete_type='delete')
def _do_force_delete(self, context, instance, bdms, local=False):
if local:
instance.vm_state = vm_states.DELETED
instance.task_state = None
instance.terminated_at = timeutils.utcnow()
instance.save()
else:
self.compute_rpcapi.terminate_instance(context, instance, bdms,
delete_type='force_delete')
def _do_soft_delete(self, context, instance, bdms, local=False):
if local:
instance.vm_state = vm_states.SOFT_DELETED
instance.task_state = None
instance.terminated_at = timeutils.utcnow()
instance.save()
else:
self.compute_rpcapi.soft_delete_instance(context, instance)
# NOTE(maoy): we allow delete to be called no matter what vm_state says.
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=None, task_state=None,
must_have_launched=True)
def soft_delete(self, context, instance):
"""Terminate an instance."""
LOG.debug('Going to try to soft delete instance',
instance=instance)
self._delete(context, instance, 'soft_delete', self._do_soft_delete,
task_state=task_states.SOFT_DELETING,
deleted_at=timeutils.utcnow())
def _delete_instance(self, context, instance):
self._delete(context, instance, 'delete', self._do_delete,
task_state=task_states.DELETING)
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=None, task_state=None,
must_have_launched=False)
def delete(self, context, instance):
"""Terminate an instance."""
LOG.debug("Going to try to terminate instance", instance=instance)
self._delete_instance(context, instance)
@check_instance_lock
@check_instance_state(vm_state=[vm_states.SOFT_DELETED])
def restore(self, context, instance):
"""Restore a previously deleted (but not reclaimed) instance."""
# Check quotas
flavor = instance.get_flavor()
project_id, user_id = quotas_obj.ids_from_instance(context, instance)
compute_utils.check_num_instances_quota(context, flavor, 1, 1,
project_id=project_id, user_id=user_id)
self._record_action_start(context, instance, instance_actions.RESTORE)
if instance.host:
instance.task_state = task_states.RESTORING
instance.deleted_at = None
instance.save(expected_task_state=[None])
# TODO(melwitt): We're not rechecking for strict quota here to
# guard against going over quota during a race at this time because
# the resource consumption for this operation is written to the
# database by compute.
self.compute_rpcapi.restore_instance(context, instance)
else:
instance.vm_state = vm_states.ACTIVE
instance.task_state = None
instance.deleted_at = None
instance.save(expected_task_state=[None])
@check_instance_lock
@check_instance_state(task_state=None,
must_have_launched=False)
def force_delete(self, context, instance):
"""Force delete an instance in any vm_state/task_state."""
self._delete(context, instance, 'force_delete', self._do_force_delete,
task_state=task_states.DELETING)
def force_stop(self, context, instance, do_cast=True, clean_shutdown=True):
LOG.debug("Going to try to stop instance", instance=instance)
instance.task_state = task_states.POWERING_OFF
instance.progress = 0
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.STOP)
self.compute_rpcapi.stop_instance(context, instance, do_cast=do_cast,
clean_shutdown=clean_shutdown)
@check_instance_lock
@check_instance_host
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.ERROR])
def stop(self, context, instance, do_cast=True, clean_shutdown=True):
"""Stop an instance."""
self.force_stop(context, instance, do_cast, clean_shutdown)
@check_instance_lock
@check_instance_host
@check_instance_cell
@check_instance_state(vm_state=[vm_states.STOPPED])
def start(self, context, instance):
"""Start an instance."""
LOG.debug("Going to try to start instance", instance=instance)
instance.task_state = task_states.POWERING_ON
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.START)
# TODO(yamahata): injected_files isn't supported right now.
# It is used only for osapi. not for ec2 api.
# availability_zone isn't used by run_instance.
self.compute_rpcapi.start_instance(context, instance)
@check_instance_lock
@check_instance_host
@check_instance_cell
@check_instance_state(vm_state=vm_states.ALLOW_TRIGGER_CRASH_DUMP)
def trigger_crash_dump(self, context, instance):
"""Trigger crash dump in an instance."""
LOG.debug("Try to trigger crash dump", instance=instance)
self._record_action_start(context, instance,
instance_actions.TRIGGER_CRASH_DUMP)
self.compute_rpcapi.trigger_crash_dump(context, instance)
def _get_instance_map_or_none(self, context, instance_uuid):
try:
inst_map = objects.InstanceMapping.get_by_instance_uuid(
context, instance_uuid)
except exception.InstanceMappingNotFound:
# InstanceMapping should always be found generally. This exception
# may be raised if a deployment has partially migrated the nova-api
# services.
inst_map = None
return inst_map
def _get_instance(self, context, instance_uuid, expected_attrs):
# Before service version 15 the BuildRequest is not cleaned up during
# a delete request so there is no reason to look it up here as we can't
# trust that it's not referencing a deleted instance. Also even if
# there is an instance mapping we don't need to honor it for older
# service versions.
service_version = objects.Service.get_minimum_version(
context, 'nova-osapi_compute')
# If we're on cellsv1, we also need to consult the top-level
# merged replica instead of the cell directly, so fall through
# here in that case as well.
if service_version < 15 or CONF.cells.enable:
return objects.Instance.get_by_uuid(context, instance_uuid,
expected_attrs=expected_attrs)
inst_map = self._get_instance_map_or_none(context, instance_uuid)
if inst_map and (inst_map.cell_mapping is not None):
nova_context.set_target_cell(context, inst_map.cell_mapping)
instance = objects.Instance.get_by_uuid(
context, instance_uuid, expected_attrs=expected_attrs)
elif inst_map and (inst_map.cell_mapping is None):
# This means the instance has not been scheduled and put in
# a cell yet. For now it also may mean that the deployer
# has not created their cell(s) yet.
try:
build_req = objects.BuildRequest.get_by_instance_uuid(
context, instance_uuid)
instance = build_req.instance
except exception.BuildRequestNotFound:
# Instance was mapped and the BuildRequest was deleted
# while fetching. Try again.
inst_map = self._get_instance_map_or_none(context,
instance_uuid)
if inst_map and (inst_map.cell_mapping is not None):
nova_context.set_target_cell(context,
inst_map.cell_mapping)
instance = objects.Instance.get_by_uuid(
context, instance_uuid,
expected_attrs=expected_attrs)
else:
raise exception.InstanceNotFound(instance_id=instance_uuid)
else:
raise exception.InstanceNotFound(instance_id=instance_uuid)
return instance
def get(self, context, instance_id, expected_attrs=None):
"""Get a single instance with the given instance_id."""
if not expected_attrs:
expected_attrs = []
expected_attrs.extend(['metadata', 'system_metadata',
'security_groups', 'info_cache'])
# NOTE(ameade): we still need to support integer ids for ec2
try:
if uuidutils.is_uuid_like(instance_id):
LOG.debug("Fetching instance by UUID",
instance_uuid=instance_id)
instance = self._get_instance(context, instance_id,
expected_attrs)
else:
LOG.debug("Failed to fetch instance by id %s", instance_id)
raise exception.InstanceNotFound(instance_id=instance_id)
except exception.InvalidID:
LOG.debug("Invalid instance id %s", instance_id)
raise exception.InstanceNotFound(instance_id=instance_id)
return instance
def get_all(self, context, search_opts=None, limit=None, marker=None,
expected_attrs=None, sort_keys=None, sort_dirs=None):
"""Get all instances filtered by one of the given parameters.
If there is no filter and the context is an admin, it will retrieve
all instances in the system.
Deleted instances will be returned by default, unless there is a
search option that says otherwise.
The results will be sorted based on the list of sort keys in the
'sort_keys' parameter (first value is primary sort key, second value is
secondary sort ket, etc.). For each sort key, the associated sort
direction is based on the list of sort directions in the 'sort_dirs'
parameter.
"""
if search_opts is None:
search_opts = {}
LOG.debug("Searching by: %s", str(search_opts))
# Fixups for the DB call
filters = {}
def _remap_flavor_filter(flavor_id):
flavor = objects.Flavor.get_by_flavor_id(context, flavor_id)
filters['instance_type_id'] = flavor.id
def _remap_fixed_ip_filter(fixed_ip):
# Turn fixed_ip into a regexp match. Since '.' matches
# any character, we need to use regexp escaping for it.
filters['ip'] = '^%s$' % fixed_ip.replace('.', '\\.')
# search_option to filter_name mapping.
filter_mapping = {
'image': 'image_ref',
'name': 'display_name',
'tenant_id': 'project_id',
'flavor': _remap_flavor_filter,
'fixed_ip': _remap_fixed_ip_filter}
# copy from search_opts, doing various remappings as necessary
for opt, value in search_opts.items():
# Do remappings.
# Values not in the filter_mapping table are copied as-is.
# If remapping is None, option is not copied
# If the remapping is a string, it is the filter_name to use
try:
remap_object = filter_mapping[opt]
except KeyError:
filters[opt] = value
else:
# Remaps are strings to translate to, or functions to call
# to do the translating as defined by the table above.
if isinstance(remap_object, six.string_types):
filters[remap_object] = value
else:
try:
remap_object(value)
# We already know we can't match the filter, so
# return an empty list
except ValueError:
return objects.InstanceList()
# IP address filtering cannot be applied at the DB layer, remove any DB
# limit so that it can be applied after the IP filter.
filter_ip = 'ip6' in filters or 'ip' in filters
orig_limit = limit
if filter_ip:
if self.network_api.has_substr_port_filtering_extension(context):
# We're going to filter by IP using Neutron so set filter_ip
# to False so we don't attempt post-DB query filtering in
# memory below.
filter_ip = False
instance_uuids = self._ip_filter_using_neutron(context,
filters)
if instance_uuids:
# Note that 'uuid' is not in the 2.1 GET /servers query
# parameter schema, however, we allow additionalProperties
# so someone could filter instances by uuid, which doesn't
# make a lot of sense but we have to account for it.
if 'uuid' in filters and filters['uuid']:
filter_uuids = filters['uuid']
if isinstance(filter_uuids, list):
instance_uuids.extend(filter_uuids)
else:
# Assume a string. If it's a dict or tuple or
# something, well...that's too bad. This is why
# we have query parameter schema definitions.
if filter_uuids not in instance_uuids:
instance_uuids.append(filter_uuids)
filters['uuid'] = instance_uuids
else:
# No matches on the ip filter(s), return an empty list.
return objects.InstanceList()
elif limit:
LOG.debug('Removing limit for DB query due to IP filter')
limit = None
# The ordering of instances will be
# [sorted instances with no host] + [sorted instances with host].
# This means BuildRequest and cell0 instances first, then cell
# instances
try:
build_requests = objects.BuildRequestList.get_by_filters(
context, filters, limit=limit, marker=marker,
sort_keys=sort_keys, sort_dirs=sort_dirs)
# If we found the marker in we need to set it to None
# so we don't expect to find it in the cells below.
marker = None
except exception.MarkerNotFound:
# If we didn't find the marker in the build requests then keep
# looking for it in the cells.
build_requests = objects.BuildRequestList()
build_req_instances = objects.InstanceList(
objects=[build_req.instance for build_req in build_requests])
# Only subtract from limit if it is not None
limit = (limit - len(build_req_instances)) if limit else limit
# We could arguably avoid joining on security_groups if we're using
# neutron (which is the default) but if you're using neutron then the
# security_group_instance_association table should be empty anyway
# and the DB should optimize out that join, making it insignificant.
fields = ['metadata', 'info_cache', 'security_groups']
if expected_attrs:
fields.extend(expected_attrs)
if CONF.cells.enable:
insts = self._do_old_style_instance_list_for_poor_cellsv1_users(
context, filters, limit, marker, fields, sort_keys,
sort_dirs)
else:
insts = instance_list.get_instance_objects_sorted(
context, filters, limit, marker, fields, sort_keys, sort_dirs)
def _get_unique_filter_method():
seen_uuids = set()
def _filter(instance):
if instance.uuid in seen_uuids:
return False
seen_uuids.add(instance.uuid)
return True
return _filter
filter_method = _get_unique_filter_method()
# Only subtract from limit if it is not None
limit = (limit - len(insts)) if limit else limit
# TODO(alaski): Clean up the objects concatenation when List objects
# support it natively.
instances = objects.InstanceList(
objects=list(filter(filter_method,
build_req_instances.objects +
insts.objects)))
if filter_ip:
instances = self._ip_filter(instances, filters, orig_limit)
return instances
def _do_old_style_instance_list_for_poor_cellsv1_users(self,
context, filters,
limit, marker,
fields,
sort_keys,
sort_dirs):
try:
cell0_mapping = objects.CellMapping.get_by_uuid(context,
objects.CellMapping.CELL0_UUID)
except exception.CellMappingNotFound:
cell0_instances = objects.InstanceList(objects=[])
else:
with nova_context.target_cell(context, cell0_mapping) as cctxt:
try:
cell0_instances = self._get_instances_by_filters(
cctxt, filters, limit=limit, marker=marker,
fields=fields, sort_keys=sort_keys,
sort_dirs=sort_dirs)
# If we found the marker in cell0 we need to set it to None
# so we don't expect to find it in the cells below.
marker = None
except exception.MarkerNotFound:
# We can ignore this since we need to look in the cell DB
cell0_instances = objects.InstanceList(objects=[])
# Only subtract from limit if it is not None
limit = (limit - len(cell0_instances)) if limit else limit
# There is only planned support for a single cell here. Multiple cell
# instance lists should be proxied to project Searchlight, or a similar
# alternative.
if limit is None or limit > 0:
# NOTE(melwitt): If we're on cells v1, we need to read
# instances from the top-level database because reading from
# cells results in changed behavior, because of the syncing.
# We can remove this path once we stop supporting cells v1.
cell_instances = self._get_instances_by_filters(
context, filters, limit=limit, marker=marker,
fields=fields, sort_keys=sort_keys,
sort_dirs=sort_dirs)
else:
LOG.debug('Limit excludes any results from real cells')
cell_instances = objects.InstanceList(objects=[])
return cell0_instances + cell_instances
@staticmethod
def _ip_filter(inst_models, filters, limit):
ipv4_f = re.compile(str(filters.get('ip')))
ipv6_f = re.compile(str(filters.get('ip6')))
def _match_instance(instance):
nw_info = instance.get_network_info()
for vif in nw_info:
for fixed_ip in vif.fixed_ips():
address = fixed_ip.get('address')
if not address:
continue
version = fixed_ip.get('version')
if ((version == 4 and ipv4_f.match(address)) or
(version == 6 and ipv6_f.match(address))):
return True
return False
result_objs = []
for instance in inst_models:
if _match_instance(instance):
result_objs.append(instance)
if limit and len(result_objs) == limit:
break
return objects.InstanceList(objects=result_objs)
def _ip_filter_using_neutron(self, context, filters):
ip4_address = filters.get('ip')
ip6_address = filters.get('ip6')
addresses = [ip4_address, ip6_address]
uuids = []
for address in addresses:
if address:
try:
ports = self.network_api.list_ports(
context, fixed_ips='ip_address_substr=' + address,
fields=['device_id'])['ports']
for port in ports:
uuids.append(port['device_id'])
except Exception as e:
LOG.error('An error occurred while listing ports '
'with an ip_address filter value of "%s". '
'Error: %s',
address, six.text_type(e))
return uuids
def _get_instances_by_filters(self, context, filters,
limit=None, marker=None, fields=None,
sort_keys=None, sort_dirs=None):
return objects.InstanceList.get_by_filters(
context, filters=filters, limit=limit, marker=marker,
expected_attrs=fields, sort_keys=sort_keys, sort_dirs=sort_dirs)
def update_instance(self, context, instance, updates):
"""Updates a single Instance object with some updates dict.
Returns the updated instance.
"""
# NOTE(sbauza): Given we only persist the Instance object after we
# create the BuildRequest, we are sure that if the Instance object
# has an ID field set, then it was persisted in the right Cell DB.
if instance.obj_attr_is_set('id'):
instance.update(updates)
# Instance has been scheduled and the BuildRequest has been deleted
# we can directly write the update down to the right cell.
inst_map = self._get_instance_map_or_none(context, instance.uuid)
# If we have a cell_mapping and we're not on cells v1, then
# look up the instance in the cell database
if inst_map and (inst_map.cell_mapping is not None) and (
not CONF.cells.enable):
with nova_context.target_cell(context,
inst_map.cell_mapping) as cctxt:
with instance.obj_alternate_context(cctxt):
instance.save()
else:
# If inst_map.cell_mapping does not point at a cell then cell
# migration has not happened yet.
# TODO(alaski): Make this a failure case after we put in
# a block that requires migrating to cellsv2.
instance.save()
else:
# Instance is not yet mapped to a cell, so we need to update
# BuildRequest instead
# TODO(sbauza): Fix the possible race conditions where BuildRequest
# could be deleted because of either a concurrent instance delete
# or because the scheduler just returned a destination right
# after we called the instance in the API.
try:
build_req = objects.BuildRequest.get_by_instance_uuid(
context, instance.uuid)
instance = build_req.instance
instance.update(updates)
# FIXME(sbauza): Here we are updating the current
# thread-related BuildRequest object. Given that another worker
# could have looking up at that BuildRequest in the API, it
# means that it could pass it down to the conductor without
# making sure that it's not updated, we could have some race
# condition where it would missing the updated fields, but
# that's something we could discuss once the instance record
# is persisted by the conductor.
build_req.save()
except exception.BuildRequestNotFound:
# Instance was mapped and the BuildRequest was deleted
# while fetching (and possibly the instance could have been
# deleted as well). We need to lookup again the Instance object
# in order to correctly update it.
# TODO(sbauza): Figure out a good way to know the expected
# attributes by checking which fields are set or not.
expected_attrs = ['flavor', 'pci_devices', 'numa_topology',
'tags', 'metadata', 'system_metadata',
'security_groups', 'info_cache']
inst_map = self._get_instance_map_or_none(context,
instance.uuid)
if inst_map and (inst_map.cell_mapping is not None):
with nova_context.target_cell(
context,
inst_map.cell_mapping) as cctxt:
instance = objects.Instance.get_by_uuid(
cctxt, instance.uuid,
expected_attrs=expected_attrs)
instance.update(updates)
instance.save()
else:
# If inst_map.cell_mapping does not point at a cell then
# cell migration has not happened yet.
# TODO(alaski): Make this a failure case after we put in
# a block that requires migrating to cellsv2.
instance = objects.Instance.get_by_uuid(
context, instance.uuid, expected_attrs=expected_attrs)
instance.update(updates)
instance.save()
return instance
# NOTE(melwitt): We don't check instance lock for backup because lock is
# intended to prevent accidental change/delete of instances
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.PAUSED, vm_states.SUSPENDED])
def backup(self, context, instance, name, backup_type, rotation,
extra_properties=None):
"""Backup the given instance
:param instance: nova.objects.instance.Instance object
:param name: name of the backup
:param backup_type: 'daily' or 'weekly'
:param rotation: int representing how many backups to keep around;
None if rotation shouldn't be used (as in the case of snapshots)
:param extra_properties: dict of extra image properties to include
when creating the image.
:returns: A dict containing image metadata
"""
props_copy = dict(extra_properties, backup_type=backup_type)
if compute_utils.is_volume_backed_instance(context, instance):
LOG.info("It's not supported to backup volume backed "
"instance.", instance=instance)
raise exception.InvalidRequest(
_('Backup is not supported for volume-backed instances.'))
else:
image_meta = self._create_image(context, instance,
name, 'backup',
extra_properties=props_copy)
# NOTE(comstud): Any changes to this method should also be made
# to the backup_instance() method in nova/cells/messaging.py
instance.task_state = task_states.IMAGE_BACKUP
instance.save(expected_task_state=[None])
self._record_action_start(context, instance,
instance_actions.BACKUP)
self.compute_rpcapi.backup_instance(context, instance,
image_meta['id'],
backup_type,
rotation)
return image_meta
# NOTE(melwitt): We don't check instance lock for snapshot because lock is
# intended to prevent accidental change/delete of instances
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.PAUSED, vm_states.SUSPENDED])
def snapshot(self, context, instance, name, extra_properties=None):
"""Snapshot the given instance.
:param instance: nova.objects.instance.Instance object
:param name: name of the snapshot
:param extra_properties: dict of extra image properties to include
when creating the image.
:returns: A dict containing image metadata
"""
image_meta = self._create_image(context, instance, name,
'snapshot',
extra_properties=extra_properties)
# NOTE(comstud): Any changes to this method should also be made
# to the snapshot_instance() method in nova/cells/messaging.py
instance.task_state = task_states.IMAGE_SNAPSHOT_PENDING
try:
instance.save(expected_task_state=[None])
except (exception.InstanceNotFound,
exception.UnexpectedDeletingTaskStateError) as ex:
# Changing the instance task state to use in raising the
# InstanceInvalidException below
LOG.debug('Instance disappeared during snapshot.',
instance=instance)
try:
image_id = image_meta['id']
self.image_api.delete(context, image_id)
LOG.info('Image %s deleted because instance '
'deleted before snapshot started.',
image_id, instance=instance)
except exception.ImageNotFound:
pass
except Exception as exc:
LOG.warning("Error while trying to clean up image %(img_id)s: "
"%(error_msg)s",
{"img_id": image_meta['id'],
"error_msg": six.text_type(exc)})
attr = 'task_state'
state = task_states.DELETING
if type(ex) == exception.InstanceNotFound:
attr = 'vm_state'
state = vm_states.DELETED
raise exception.InstanceInvalidState(attr=attr,
instance_uuid=instance.uuid,
state=state,
method='snapshot')
self._record_action_start(context, instance,
instance_actions.CREATE_IMAGE)
self.compute_rpcapi.snapshot_instance(context, instance,
image_meta['id'])
return image_meta
def _create_image(self, context, instance, name, image_type,
extra_properties=None):
"""Create new image entry in the image service. This new image
will be reserved for the compute manager to upload a snapshot
or backup.
:param context: security context
:param instance: nova.objects.instance.Instance object
:param name: string for name of the snapshot
:param image_type: snapshot | backup
:param extra_properties: dict of extra image properties to include
"""
properties = {
'instance_uuid': instance.uuid,
'user_id': str(context.user_id),
'image_type': image_type,
}
properties.update(extra_properties or {})
image_meta = self._initialize_instance_snapshot_metadata(
instance, name, properties)
# if we're making a snapshot, omit the disk and container formats,
# since the image may have been converted to another format, and the
# original values won't be accurate. The driver will populate these
# with the correct values later, on image upload.
if image_type == 'snapshot':
image_meta.pop('disk_format', None)
image_meta.pop('container_format', None)
return self.image_api.create(context, image_meta)
def _initialize_instance_snapshot_metadata(self, instance, name,
extra_properties=None):
"""Initialize new metadata for a snapshot of the given instance.
:param instance: nova.objects.instance.Instance object
:param name: string for name of the snapshot
:param extra_properties: dict of extra metadata properties to include
:returns: the new instance snapshot metadata
"""
image_meta = utils.get_image_from_system_metadata(
instance.system_metadata)
image_meta.update({'name': name,
'is_public': False})
# Delete properties that are non-inheritable
properties = image_meta['properties']
for key in CONF.non_inheritable_image_properties:
properties.pop(key, None)
# The properties in extra_properties have precedence
properties.update(extra_properties or {})
return image_meta
# NOTE(melwitt): We don't check instance lock for snapshot because lock is
# intended to prevent accidental change/delete of instances
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.SUSPENDED])
def snapshot_volume_backed(self, context, instance, name,
extra_properties=None):
"""Snapshot the given volume-backed instance.
:param instance: nova.objects.instance.Instance object
:param name: name of the backup or snapshot
:param extra_properties: dict of extra image properties to include
:returns: the new image metadata
"""
image_meta = self._initialize_instance_snapshot_metadata(
instance, name, extra_properties)
# the new image is simply a bucket of properties (particularly the
# block device mapping, kernel and ramdisk IDs) with no image data,
# hence the zero size
image_meta['size'] = 0
for attr in ('container_format', 'disk_format'):
image_meta.pop(attr, None)
properties = image_meta['properties']
# clean properties before filling
for key in ('block_device_mapping', 'bdm_v2', 'root_device_name'):
properties.pop(key, None)
if instance.root_device_name:
properties['root_device_name'] = instance.root_device_name
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
mapping = [] # list of BDM dicts that can go into the image properties
# Do some up-front filtering of the list of BDMs from
# which we are going to create snapshots.
volume_bdms = []
for bdm in bdms:
if bdm.no_device:
continue
if bdm.is_volume:
# These will be handled below.
volume_bdms.append(bdm)
else:
mapping.append(bdm.get_image_mapping())
# Check limits in Cinder before creating snapshots to avoid going over
# quota in the middle of a list of volumes. This is a best-effort check
# but concurrently running snapshot requests from the same project
# could still fail to create volume snapshots if they go over limit.
if volume_bdms:
limits = self.volume_api.get_absolute_limits(context)
total_snapshots_used = limits['totalSnapshotsUsed']
max_snapshots = limits['maxTotalSnapshots']
# -1 means there is unlimited quota for snapshots
if (max_snapshots > -1 and
len(volume_bdms) + total_snapshots_used > max_snapshots):
LOG.debug('Unable to create volume snapshots for instance. '
'Currently has %s snapshots, requesting %s new '
'snapshots, with a limit of %s.',
total_snapshots_used, len(volume_bdms),
max_snapshots, instance=instance)
raise exception.OverQuota(overs='snapshots')
quiesced = False
if instance.vm_state == vm_states.ACTIVE:
try:
LOG.info("Attempting to quiesce instance before volume "
"snapshot.", instance=instance)
self.compute_rpcapi.quiesce_instance(context, instance)
quiesced = True
except (exception.InstanceQuiesceNotSupported,
exception.QemuGuestAgentNotEnabled,
exception.NovaException, NotImplementedError) as err:
if strutils.bool_from_string(instance.system_metadata.get(
'image_os_require_quiesce')):
raise
else:
LOG.info('Skipping quiescing instance: %(reason)s.',
{'reason': err},
instance=instance)
@wrap_instance_event(prefix='api')
def snapshot_instance(self, context, instance, bdms):
try:
for bdm in volume_bdms:
# create snapshot based on volume_id
volume = self.volume_api.get(context, bdm.volume_id)
# NOTE(yamahata): Should we wait for snapshot creation?
# Linux LVM snapshot creation completes in
# short time, it doesn't matter for now.
name = _('snapshot for %s') % image_meta['name']
LOG.debug('Creating snapshot from volume %s.',
volume['id'], instance=instance)
snapshot = self.volume_api.create_snapshot_force(
context, volume['id'],
name, volume['display_description'])
mapping_dict = block_device.snapshot_from_bdm(
snapshot['id'], bdm)
mapping_dict = mapping_dict.get_image_mapping()
mapping.append(mapping_dict)
return mapping
# NOTE(tasker): No error handling is done in the above for loop.
# This means that if the snapshot fails and throws an exception
# the traceback will skip right over the unquiesce needed below.
# Here, catch any exception, unquiesce the instance, and raise the
# error so that the calling function can do what it needs to in
# order to properly treat a failed snap.
except Exception:
with excutils.save_and_reraise_exception():
if quiesced:
LOG.info("Unquiescing instance after volume snapshot "
"failure.", instance=instance)
self.compute_rpcapi.unquiesce_instance(
context, instance, mapping)
self._record_action_start(context, instance,
instance_actions.CREATE_IMAGE)
mapping = snapshot_instance(self, context, instance, bdms)
if quiesced:
self.compute_rpcapi.unquiesce_instance(context, instance, mapping)
if mapping:
properties['block_device_mapping'] = mapping
properties['bdm_v2'] = True
return self.image_api.create(context, image_meta)
@check_instance_lock
def reboot(self, context, instance, reboot_type):
"""Reboot the given instance."""
if reboot_type == 'SOFT':
self._soft_reboot(context, instance)
else:
self._hard_reboot(context, instance)
@check_instance_state(vm_state=set(vm_states.ALLOW_SOFT_REBOOT),
task_state=[None])
def _soft_reboot(self, context, instance):
expected_task_state = [None]
instance.task_state = task_states.REBOOTING
instance.save(expected_task_state=expected_task_state)
self._record_action_start(context, instance, instance_actions.REBOOT)
self.compute_rpcapi.reboot_instance(context, instance=instance,
block_device_info=None,
reboot_type='SOFT')
@check_instance_state(vm_state=set(vm_states.ALLOW_HARD_REBOOT),
task_state=task_states.ALLOW_REBOOT)
def _hard_reboot(self, context, instance):
instance.task_state = task_states.REBOOTING_HARD
expected_task_state = [None,
task_states.REBOOTING,
task_states.REBOOT_PENDING,
task_states.REBOOT_STARTED,
task_states.REBOOTING_HARD,
task_states.RESUMING,
task_states.UNPAUSING,
task_states.SUSPENDING]
instance.save(expected_task_state = expected_task_state)
self._record_action_start(context, instance, instance_actions.REBOOT)
self.compute_rpcapi.reboot_instance(context, instance=instance,
block_device_info=None,
reboot_type='HARD')
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.ERROR])
def rebuild(self, context, instance, image_href, admin_password,
files_to_inject=None, **kwargs):
"""Rebuild the given instance with the provided attributes."""
files_to_inject = files_to_inject or []
metadata = kwargs.get('metadata', {})
preserve_ephemeral = kwargs.get('preserve_ephemeral', False)
auto_disk_config = kwargs.get('auto_disk_config')
if 'key_name' in kwargs:
key_name = kwargs.pop('key_name')
if key_name:
# NOTE(liuyulong): we are intentionally using the user_id from
# the request context rather than the instance.user_id because
# users own keys but instances are owned by projects, and
# another user in the same project can rebuild an instance
# even if they didn't create it.
key_pair = objects.KeyPair.get_by_name(context,
context.user_id,
key_name)
instance.key_name = key_pair.name
instance.key_data = key_pair.public_key
instance.keypairs = objects.KeyPairList(objects=[key_pair])
else:
instance.key_name = None
instance.key_data = None
instance.keypairs = objects.KeyPairList(objects=[])
image_id, image = self._get_image(context, image_href)
self._check_auto_disk_config(image=image, **kwargs)
flavor = instance.get_flavor()
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
root_bdm = compute_utils.get_root_bdm(context, instance, bdms)
# Check to see if the image is changing and we have a volume-backed
# server. The compute doesn't support changing the image in the
# root disk of a volume-backed server, so we need to just fail fast.
is_volume_backed = compute_utils.is_volume_backed_instance(
context, instance, bdms)
if is_volume_backed:
# For boot from volume, instance.image_ref is empty, so we need to
# query the image from the volume.
if root_bdm is None:
# This shouldn't happen and is an error, we need to fail. This
# is not the users fault, it's an internal error. Without a
# root BDM we have no way of knowing the backing volume (or
# image in that volume) for this instance.
raise exception.NovaException(
_('Unable to find root block device mapping for '
'volume-backed instance.'))
volume = self.volume_api.get(context, root_bdm.volume_id)
volume_image_metadata = volume.get('volume_image_metadata', {})
orig_image_ref = volume_image_metadata.get('image_id')
if orig_image_ref != image_href:
# Leave a breadcrumb.
LOG.debug('Requested to rebuild instance with a new image %s '
'for a volume-backed server with image %s in its '
'root volume which is not supported.', image_href,
orig_image_ref, instance=instance)
msg = _('Unable to rebuild with a different image for a '
'volume-backed server.')
raise exception.ImageUnacceptable(
image_id=image_href, reason=msg)
else:
orig_image_ref = instance.image_ref
self._checks_for_create_and_rebuild(context, image_id, image,
flavor, metadata, files_to_inject, root_bdm)
kernel_id, ramdisk_id = self._handle_kernel_and_ramdisk(
context, None, None, image)
def _reset_image_metadata():
"""Remove old image properties that we're storing as instance
system metadata. These properties start with 'image_'.
Then add the properties for the new image.
"""
# FIXME(comstud): There's a race condition here in that if
# the system_metadata for this instance is updated after
# we do the previous save() and before we update.. those
# other updates will be lost. Since this problem exists in
# a lot of other places, I think it should be addressed in
# a DB layer overhaul.
orig_sys_metadata = dict(instance.system_metadata)
# Remove the old keys
for key in list(instance.system_metadata.keys()):
if key.startswith(utils.SM_IMAGE_PROP_PREFIX):
del instance.system_metadata[key]
# Add the new ones
new_sys_metadata = utils.get_system_metadata_from_image(
image, flavor)
instance.system_metadata.update(new_sys_metadata)
instance.save()
return orig_sys_metadata
# Since image might have changed, we may have new values for
# os_type, vm_mode, etc
options_from_image = self._inherit_properties_from_image(
image, auto_disk_config)
instance.update(options_from_image)
instance.task_state = task_states.REBUILDING
# An empty instance.image_ref is currently used as an indication
# of BFV. Preserve that over a rebuild to not break users.
if not is_volume_backed:
instance.image_ref = image_href
instance.kernel_id = kernel_id or ""
instance.ramdisk_id = ramdisk_id or ""
instance.progress = 0
instance.update(kwargs)
instance.save(expected_task_state=[None])
# On a rebuild, since we're potentially changing images, we need to
# wipe out the old image properties that we're storing as instance
# system metadata... and copy in the properties for the new image.
orig_sys_metadata = _reset_image_metadata()
self._record_action_start(context, instance, instance_actions.REBUILD)
# NOTE(sbauza): The migration script we provided in Newton should make
# sure that all our instances are currently migrated to have an
# attached RequestSpec object but let's consider that the operator only
# half migrated all their instances in the meantime.
host = instance.host
try:
request_spec = objects.RequestSpec.get_by_instance_uuid(
context, instance.uuid)
# If a new image is provided on rebuild, we will need to run
# through the scheduler again, but we want the instance to be
# rebuilt on the same host it's already on.
if orig_image_ref != image_href:
# We have to modify the request spec that goes to the scheduler
# to contain the new image. We persist this since we've already
# changed the instance.image_ref above so we're being
# consistent.
request_spec.image = objects.ImageMeta.from_dict(image)
request_spec.save()
if 'scheduler_hints' not in request_spec:
request_spec.scheduler_hints = {}
# Nuke the id on this so we can't accidentally save
# this hint hack later
del request_spec.id
# NOTE(danms): Passing host=None tells conductor to
# call the scheduler. The _nova_check_type hint
# requires that the scheduler returns only the same
# host that we are currently on and only checks
# rebuild-related filters.
request_spec.scheduler_hints['_nova_check_type'] = ['rebuild']
request_spec.force_hosts = [instance.host]
request_spec.force_nodes = [instance.node]
host = None
except exception.RequestSpecNotFound:
# Some old instances can still have no RequestSpec object attached
# to them, we need to support the old way
request_spec = None
self.compute_task_api.rebuild_instance(context, instance=instance,
new_pass=admin_password, injected_files=files_to_inject,
image_ref=image_href, orig_image_ref=orig_image_ref,
orig_sys_metadata=orig_sys_metadata, bdms=bdms,
preserve_ephemeral=preserve_ephemeral, host=host,
request_spec=request_spec,
kwargs=kwargs)
@staticmethod
def _check_quota_for_upsize(context, instance, current_flavor, new_flavor):
project_id, user_id = quotas_obj.ids_from_instance(context,
instance)
# Deltas will be empty if the resize is not an upsize.
deltas = compute_utils.upsize_quota_delta(context, new_flavor,
current_flavor)
if deltas:
try:
res_deltas = {'cores': deltas.get('cores', 0),
'ram': deltas.get('ram', 0)}
objects.Quotas.check_deltas(context, res_deltas,
project_id, user_id=user_id,
check_project_id=project_id,
check_user_id=user_id)
except exception.OverQuota as exc:
quotas = exc.kwargs['quotas']
overs = exc.kwargs['overs']
usages = exc.kwargs['usages']
headroom = compute_utils.get_headroom(quotas, usages,
deltas)
(overs, reqs, total_alloweds,
useds) = compute_utils.get_over_quota_detail(headroom,
overs,
quotas,
deltas)
LOG.warning("%(overs)s quota exceeded for %(pid)s,"
" tried to resize instance.",
{'overs': overs, 'pid': context.project_id})
raise exception.TooManyInstances(overs=overs,
req=reqs,
used=useds,
allowed=total_alloweds)
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.RESIZED])
def revert_resize(self, context, instance):
"""Reverts a resize, deleting the 'new' instance in the process."""
elevated = context.elevated()
migration = objects.Migration.get_by_instance_and_status(
elevated, instance.uuid, 'finished')
# If this is a resize down, a revert might go over quota.
self._check_quota_for_upsize(context, instance, instance.flavor,
instance.old_flavor)
instance.task_state = task_states.RESIZE_REVERTING
instance.save(expected_task_state=[None])
migration.status = 'reverting'
migration.save()
self._record_action_start(context, instance,
instance_actions.REVERT_RESIZE)
# TODO(melwitt): We're not rechecking for strict quota here to guard
# against going over quota during a race at this time because the
# resource consumption for this operation is written to the database
# by compute.
self.compute_rpcapi.revert_resize(context, instance,
migration,
migration.dest_compute)
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.RESIZED])
def confirm_resize(self, context, instance, migration=None):
"""Confirms a migration/resize and deletes the 'old' instance."""
elevated = context.elevated()
# NOTE(melwitt): We're not checking quota here because there isn't a
# change in resource usage when confirming a resize. Resource
# consumption for resizes are written to the database by compute, so
# a confirm resize is just a clean up of the migration objects and a
# state change in compute.
if migration is None:
migration = objects.Migration.get_by_instance_and_status(
elevated, instance.uuid, 'finished')
migration.status = 'confirming'
migration.save()
self._record_action_start(context, instance,
instance_actions.CONFIRM_RESIZE)
self.compute_rpcapi.confirm_resize(context,
instance,
migration,
migration.source_compute)
@staticmethod
def _resize_cells_support(context, instance,
current_instance_type, new_instance_type):
"""Special API cell logic for resize."""
# NOTE(johannes/comstud): The API cell needs a local migration
# record for later resize_confirm and resize_reverts.
# We don't need source and/or destination
# information, just the old and new flavors. Status is set to
# 'finished' since nothing else will update the status along
# the way.
mig = objects.Migration(context=context.elevated())
mig.instance_uuid = instance.uuid
mig.old_instance_type_id = current_instance_type['id']
mig.new_instance_type_id = new_instance_type['id']
mig.status = 'finished'
mig.migration_type = (
mig.old_instance_type_id != mig.new_instance_type_id and
'resize' or 'migration')
mig.create()
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED])
def resize(self, context, instance, flavor_id=None, clean_shutdown=True,
host_name=None, **extra_instance_updates):
"""Resize (ie, migrate) a running instance.
If flavor_id is None, the process is considered a migration, keeping
the original flavor_id. If flavor_id is not None, the instance should
be migrated to a new host and resized to the new flavor_id.
host_name is always None in the resize case.
host_name can be set in the cold migration case only.
"""
if host_name is not None:
# Cannot migrate to the host where the instance exists
# because it is useless.
if host_name == instance.host:
raise exception.CannotMigrateToSameHost()
# Check whether host exists or not.
node = objects.ComputeNode.get_first_node_by_host_for_old_compat(
context, host_name, use_slave=True)
self._check_auto_disk_config(instance, **extra_instance_updates)
current_instance_type = instance.get_flavor()
# If flavor_id is not provided, only migrate the instance.
if not flavor_id:
LOG.debug("flavor_id is None. Assuming migration.",
instance=instance)
new_instance_type = current_instance_type
else:
new_instance_type = flavors.get_flavor_by_flavor_id(
flavor_id, read_deleted="no")
if (new_instance_type.get('root_gb') == 0 and
current_instance_type.get('root_gb') != 0 and
not compute_utils.is_volume_backed_instance(context,
instance)):
reason = _('Resize to zero disk flavor is not allowed.')
raise exception.CannotResizeDisk(reason=reason)
if not new_instance_type:
raise exception.FlavorNotFound(flavor_id=flavor_id)
current_instance_type_name = current_instance_type['name']
new_instance_type_name = new_instance_type['name']
LOG.debug("Old instance type %(current_instance_type_name)s, "
"new instance type %(new_instance_type_name)s",
{'current_instance_type_name': current_instance_type_name,
'new_instance_type_name': new_instance_type_name},
instance=instance)
same_instance_type = (current_instance_type['id'] ==
new_instance_type['id'])
# NOTE(sirp): We don't want to force a customer to change their flavor
# when Ops is migrating off of a failed host.
if not same_instance_type and new_instance_type.get('disabled'):
raise exception.FlavorNotFound(flavor_id=flavor_id)
if same_instance_type and flavor_id and self.cell_type != 'compute':
raise exception.CannotResizeToSameFlavor()
# ensure there is sufficient headroom for upsizes
if flavor_id:
self._check_quota_for_upsize(context, instance,
current_instance_type,
new_instance_type)
instance.task_state = task_states.RESIZE_PREP
instance.progress = 0
instance.update(extra_instance_updates)
instance.save(expected_task_state=[None])
filter_properties = {'ignore_hosts': []}
if not CONF.allow_resize_to_same_host:
filter_properties['ignore_hosts'].append(instance.host)
if self.cell_type == 'api':
# Create migration record.
self._resize_cells_support(context, instance,
current_instance_type,
new_instance_type)
if not flavor_id:
self._record_action_start(context, instance,
instance_actions.MIGRATE)
else:
self._record_action_start(context, instance,
instance_actions.RESIZE)
# NOTE(sbauza): The migration script we provided in Newton should make
# sure that all our instances are currently migrated to have an
# attached RequestSpec object but let's consider that the operator only
# half migrated all their instances in the meantime.
try:
request_spec = objects.RequestSpec.get_by_instance_uuid(
context, instance.uuid)
request_spec.ignore_hosts = filter_properties['ignore_hosts']
except exception.RequestSpecNotFound:
# Some old instances can still have no RequestSpec object attached
# to them, we need to support the old way
if host_name is not None:
# If there is no request spec we cannot honor the request
# and we need to fail.
raise exception.CannotMigrateWithTargetHost()
request_spec = None
# TODO(melwitt): We're not rechecking for strict quota here to guard
# against going over quota during a race at this time because the
# resource consumption for this operation is written to the database
# by compute.
scheduler_hint = {'filter_properties': filter_properties}
if request_spec:
if host_name is None:
# If 'host_name' is not specified,
# clear the 'requested_destination' field of the RequestSpec.
request_spec.requested_destination = None
else:
# Set the host and the node so that the scheduler will
# validate them.
# TODO(takashin): It will be added to check whether
# the specified host is within the same cell as
# the instance or not. If not, raise specific error message
# that is clear to the caller.
request_spec.requested_destination = objects.Destination(
host=node.host, node=node.hypervisor_hostname)
self.compute_task_api.resize_instance(context, instance,
extra_instance_updates, scheduler_hint=scheduler_hint,
flavor=new_instance_type,
clean_shutdown=clean_shutdown,
request_spec=request_spec)
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.PAUSED, vm_states.SUSPENDED])
def shelve(self, context, instance, clean_shutdown=True):
"""Shelve an instance.
Shuts down an instance and frees it up to be removed from the
hypervisor.
"""
instance.task_state = task_states.SHELVING
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.SHELVE)
if not compute_utils.is_volume_backed_instance(context, instance):
name = '%s-shelved' % instance.display_name
image_meta = self._create_image(context, instance, name,
'snapshot')
image_id = image_meta['id']
self.compute_rpcapi.shelve_instance(context, instance=instance,
image_id=image_id, clean_shutdown=clean_shutdown)
else:
self.compute_rpcapi.shelve_offload_instance(context,
instance=instance, clean_shutdown=clean_shutdown)
@check_instance_lock
@check_instance_state(vm_state=[vm_states.SHELVED])
def shelve_offload(self, context, instance, clean_shutdown=True):
"""Remove a shelved instance from the hypervisor."""
instance.task_state = task_states.SHELVING_OFFLOADING
instance.save(expected_task_state=[None])
self._record_action_start(context, instance,
instance_actions.SHELVE_OFFLOAD)
self.compute_rpcapi.shelve_offload_instance(context, instance=instance,
clean_shutdown=clean_shutdown)
@check_instance_lock
@check_instance_state(vm_state=[vm_states.SHELVED,
vm_states.SHELVED_OFFLOADED])
def unshelve(self, context, instance):
"""Restore a shelved instance."""
instance.task_state = task_states.UNSHELVING
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.UNSHELVE)
try:
request_spec = objects.RequestSpec.get_by_instance_uuid(
context, instance.uuid)
except exception.RequestSpecNotFound:
# Some old instances can still have no RequestSpec object attached
# to them, we need to support the old way
request_spec = None
self.compute_task_api.unshelve_instance(context, instance,
request_spec)
@check_instance_lock
def add_fixed_ip(self, context, instance, network_id):
"""Add fixed_ip from specified network to given instance."""
self.compute_rpcapi.add_fixed_ip_to_instance(context,
instance=instance, network_id=network_id)
@check_instance_lock
def remove_fixed_ip(self, context, instance, address):
"""Remove fixed_ip from specified network to given instance."""
self.compute_rpcapi.remove_fixed_ip_from_instance(context,
instance=instance, address=address)
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE])
def pause(self, context, instance):
"""Pause the given instance."""
instance.task_state = task_states.PAUSING
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.PAUSE)
self.compute_rpcapi.pause_instance(context, instance)
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.PAUSED])
def unpause(self, context, instance):
"""Unpause the given instance."""
instance.task_state = task_states.UNPAUSING
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.UNPAUSE)
self.compute_rpcapi.unpause_instance(context, instance)
@check_instance_host
def get_diagnostics(self, context, instance):
"""Retrieve diagnostics for the given instance."""
return self.compute_rpcapi.get_diagnostics(context, instance=instance)
@check_instance_host
def get_instance_diagnostics(self, context, instance):
"""Retrieve diagnostics for the given instance."""
return self.compute_rpcapi.get_instance_diagnostics(context,
instance=instance)
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE])
def suspend(self, context, instance):
"""Suspend the given instance."""
instance.task_state = task_states.SUSPENDING
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.SUSPEND)
self.compute_rpcapi.suspend_instance(context, instance)
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.SUSPENDED])
def resume(self, context, instance):
"""Resume the given instance."""
instance.task_state = task_states.RESUMING
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.RESUME)
self.compute_rpcapi.resume_instance(context, instance)
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.ERROR])
def rescue(self, context, instance, rescue_password=None,
rescue_image_ref=None, clean_shutdown=True):
"""Rescue the given instance."""
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
for bdm in bdms:
if bdm.volume_id:
vol = self.volume_api.get(context, bdm.volume_id)
self.volume_api.check_attached(context, vol)
if compute_utils.is_volume_backed_instance(context, instance, bdms):
reason = _("Cannot rescue a volume-backed instance")
raise exception.InstanceNotRescuable(instance_id=instance.uuid,
reason=reason)
instance.task_state = task_states.RESCUING
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.RESCUE)
self.compute_rpcapi.rescue_instance(context, instance=instance,
rescue_password=rescue_password, rescue_image_ref=rescue_image_ref,
clean_shutdown=clean_shutdown)
@check_instance_lock
@check_instance_state(vm_state=[vm_states.RESCUED])
def unrescue(self, context, instance):
"""Unrescue the given instance."""
instance.task_state = task_states.UNRESCUING
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.UNRESCUE)
self.compute_rpcapi.unrescue_instance(context, instance=instance)
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE])
def set_admin_password(self, context, instance, password=None):
"""Set the root/admin password for the given instance.
@param context: Nova auth context.
@param instance: Nova instance object.
@param password: The admin password for the instance.
"""
instance.task_state = task_states.UPDATING_PASSWORD
instance.save(expected_task_state=[None])
self._record_action_start(context, instance,
instance_actions.CHANGE_PASSWORD)
self.compute_rpcapi.set_admin_password(context,
instance=instance,
new_pass=password)
@check_instance_host
@reject_instance_state(
task_state=[task_states.DELETING, task_states.MIGRATING])
def get_vnc_console(self, context, instance, console_type):
"""Get a url to an instance Console."""
connect_info = self.compute_rpcapi.get_vnc_console(context,
instance=instance, console_type=console_type)
self.consoleauth_rpcapi.authorize_console(context,
connect_info['token'], console_type,
connect_info['host'], connect_info['port'],
connect_info['internal_access_path'], instance.uuid,
access_url=connect_info['access_url'])
return {'url': connect_info['access_url']}
@check_instance_host
def get_vnc_connect_info(self, context, instance, console_type):
"""Used in a child cell to get console info."""
connect_info = self.compute_rpcapi.get_vnc_console(context,
instance=instance, console_type=console_type)
return connect_info
@check_instance_host
@reject_instance_state(
task_state=[task_states.DELETING, task_states.MIGRATING])
def get_spice_console(self, context, instance, console_type):
"""Get a url to an instance Console."""
connect_info = self.compute_rpcapi.get_spice_console(context,
instance=instance, console_type=console_type)
self.consoleauth_rpcapi.authorize_console(context,
connect_info['token'], console_type,
connect_info['host'], connect_info['port'],
connect_info['internal_access_path'], instance.uuid,
access_url=connect_info['access_url'])
return {'url': connect_info['access_url']}
@check_instance_host
def get_spice_connect_info(self, context, instance, console_type):
"""Used in a child cell to get console info."""
connect_info = self.compute_rpcapi.get_spice_console(context,
instance=instance, console_type=console_type)
return connect_info
@check_instance_host
@reject_instance_state(
task_state=[task_states.DELETING, task_states.MIGRATING])
def get_rdp_console(self, context, instance, console_type):
"""Get a url to an instance Console."""
connect_info = self.compute_rpcapi.get_rdp_console(context,
instance=instance, console_type=console_type)
self.consoleauth_rpcapi.authorize_console(context,
connect_info['token'], console_type,
connect_info['host'], connect_info['port'],
connect_info['internal_access_path'], instance.uuid,
access_url=connect_info['access_url'])
return {'url': connect_info['access_url']}
@check_instance_host
def get_rdp_connect_info(self, context, instance, console_type):
"""Used in a child cell to get console info."""
connect_info = self.compute_rpcapi.get_rdp_console(context,
instance=instance, console_type=console_type)
return connect_info
@check_instance_host
@reject_instance_state(
task_state=[task_states.DELETING, task_states.MIGRATING])
def get_serial_console(self, context, instance, console_type):
"""Get a url to a serial console."""
connect_info = self.compute_rpcapi.get_serial_console(context,
instance=instance, console_type=console_type)
self.consoleauth_rpcapi.authorize_console(context,
connect_info['token'], console_type,
connect_info['host'], connect_info['port'],
connect_info['internal_access_path'], instance.uuid,
access_url=connect_info['access_url'])
return {'url': connect_info['access_url']}
@check_instance_host
def get_serial_console_connect_info(self, context, instance, console_type):
"""Used in a child cell to get serial console."""
connect_info = self.compute_rpcapi.get_serial_console(context,
instance=instance, console_type=console_type)
return connect_info
@check_instance_host
@reject_instance_state(
task_state=[task_states.DELETING, task_states.MIGRATING])
def get_mks_console(self, context, instance, console_type):
"""Get a url to a MKS console."""
connect_info = self.compute_rpcapi.get_mks_console(context,
instance=instance, console_type=console_type)
self.consoleauth_rpcapi.authorize_console(context,
connect_info['token'], console_type,
connect_info['host'], connect_info['port'],
connect_info['internal_access_path'], instance.uuid,
access_url=connect_info['access_url'])
return {'url': connect_info['access_url']}
@check_instance_host
def get_console_output(self, context, instance, tail_length=None):
"""Get console output for an instance."""
return self.compute_rpcapi.get_console_output(context,
instance=instance, tail_length=tail_length)
def lock(self, context, instance):
"""Lock the given instance."""
# Only update the lock if we are an admin (non-owner)
is_owner = instance.project_id == context.project_id
if instance.locked and is_owner:
return
context = context.elevated()
self._record_action_start(context, instance,
instance_actions.LOCK)
@wrap_instance_event(prefix='api')
def lock(self, context, instance):
LOG.debug('Locking', instance=instance)
instance.locked = True
instance.locked_by = 'owner' if is_owner else 'admin'
instance.save()
lock(self, context, instance)
def is_expected_locked_by(self, context, instance):
is_owner = instance.project_id == context.project_id
expect_locked_by = 'owner' if is_owner else 'admin'
locked_by = instance.locked_by
if locked_by and locked_by != expect_locked_by:
return False
return True
def unlock(self, context, instance):
"""Unlock the given instance."""
context = context.elevated()
self._record_action_start(context, instance,
instance_actions.UNLOCK)
@wrap_instance_event(prefix='api')
def unlock(self, context, instance):
LOG.debug('Unlocking', instance=instance)
instance.locked = False
instance.locked_by = None
instance.save()
unlock(self, context, instance)
@check_instance_lock
@check_instance_cell
def reset_network(self, context, instance):
"""Reset networking on the instance."""
self.compute_rpcapi.reset_network(context, instance=instance)
@check_instance_lock
@check_instance_cell
def inject_network_info(self, context, instance):
"""Inject network info for the instance."""
self.compute_rpcapi.inject_network_info(context, instance=instance)
def _create_volume_bdm(self, context, instance, device, volume,
disk_bus, device_type, is_local_creation=False,
tag=None):
volume_id = volume['id']
if is_local_creation:
# when the creation is done locally we can't specify the device
# name as we do not have a way to check that the name specified is
# a valid one.
# We leave the setting of that value when the actual attach
# happens on the compute manager
# NOTE(artom) Local attach (to a shelved-offload instance) cannot
# support device tagging because we have no way to call the compute
# manager to check that it supports device tagging. In fact, we
# don't even know which computer manager the instance will
# eventually end up on when it's unshelved.
volume_bdm = objects.BlockDeviceMapping(
context=context,
source_type='volume', destination_type='volume',
instance_uuid=instance.uuid, boot_index=None,
volume_id=volume_id,
device_name=None, guest_format=None,
disk_bus=disk_bus, device_type=device_type)
volume_bdm.create()
else:
# NOTE(vish): This is done on the compute host because we want
# to avoid a race where two devices are requested at
# the same time. When db access is removed from
# compute, the bdm will be created here and we will
# have to make sure that they are assigned atomically.
volume_bdm = self.compute_rpcapi.reserve_block_device_name(
context, instance, device, volume_id, disk_bus=disk_bus,
device_type=device_type, tag=tag,
multiattach=volume['multiattach'])
return volume_bdm
def _check_volume_already_attached_to_instance(self, context, instance,
volume_id):
"""Avoid attaching the same volume to the same instance twice.
As the new Cinder flow (microversion 3.44) is handling the checks
differently and allows to attach the same volume to the same
instance twice to enable live_migrate we are checking whether the
BDM already exists for this combination for the new flow and fail
if it does.
"""
try:
objects.BlockDeviceMapping.get_by_volume_and_instance(
context, volume_id, instance.uuid)
msg = _("volume %s already attached") % volume_id
raise exception.InvalidVolume(reason=msg)
except exception.VolumeBDMNotFound:
pass
def _check_attach_and_reserve_volume(self, context, volume, instance,
bdm, supports_multiattach=False):
volume_id = volume['id']
self.volume_api.check_availability_zone(context, volume,
instance=instance)
# If volume.multiattach=True and the microversion to
# support multiattach is not used, fail the request.
if volume['multiattach'] and not supports_multiattach:
raise exception.MultiattachNotSupportedOldMicroversion()
if 'id' in instance:
# This is a volume attach to an existing instance, so
# we only care about the cell the instance is in.
min_compute_version = objects.Service.get_minimum_version(
context, 'nova-compute')
else:
# The instance is being created and we don't know which
# cell it's going to land in, so check all cells.
min_compute_version = \
objects.service.get_minimum_version_all_cells(
context, ['nova-compute'])
# Check to see if the computes have been upgraded to support
# booting from a multiattach volume.
if (volume['multiattach'] and
min_compute_version < MIN_COMPUTE_MULTIATTACH):
raise exception.MultiattachSupportNotYetAvailable()
if min_compute_version >= CINDER_V3_ATTACH_MIN_COMPUTE_VERSION:
# Attempt a new style volume attachment, but fallback to old-style
# in case Cinder API 3.44 isn't available.
try:
attachment_id = self.volume_api.attachment_create(
context, volume_id, instance.uuid)['id']
bdm.attachment_id = attachment_id
# NOTE(ildikov): In case of boot from volume the BDM at this
# point is not yet created in a cell database, so we can't
# call save(). When attaching a volume to an existing
# instance, the instance is already in a cell and the BDM has
# been created in that same cell so updating here in that case
# is "ok".
if bdm.obj_attr_is_set('id'):
bdm.save()
except exception.CinderAPIVersionNotAvailable:
LOG.debug('The available Cinder microversion is not high '
'enough to create new style volume attachment.')
self.volume_api.reserve_volume(context, volume_id)
else:
LOG.debug('The compute service version is not high enough to '
'create a new style volume attachment.')
self.volume_api.reserve_volume(context, volume_id)
def _attach_volume(self, context, instance, volume, device,
disk_bus, device_type, tag=None,
supports_multiattach=False):
"""Attach an existing volume to an existing instance.
This method is separated to make it possible for cells version
to override it.
"""
volume_bdm = self._create_volume_bdm(
context, instance, device, volume, disk_bus=disk_bus,
device_type=device_type, tag=tag)
try:
self._check_attach_and_reserve_volume(context, volume, instance,
volume_bdm,
supports_multiattach)
self._record_action_start(
context, instance, instance_actions.ATTACH_VOLUME)
self.compute_rpcapi.attach_volume(context, instance, volume_bdm)
except Exception:
with excutils.save_and_reraise_exception():
volume_bdm.destroy()
return volume_bdm.device_name
def _attach_volume_shelved_offloaded(self, context, instance, volume,
device, disk_bus, device_type):
"""Attach an existing volume to an instance in shelved offloaded state.
Attaching a volume for an instance in shelved offloaded state requires
to perform the regular check to see if we can attach and reserve the
volume then we need to call the attach method on the volume API
to mark the volume as 'in-use'.
The instance at this stage is not managed by a compute manager
therefore the actual attachment will be performed once the
instance will be unshelved.
"""
volume_id = volume['id']
@wrap_instance_event(prefix='api')
def attach_volume(self, context, v_id, instance, dev, attachment_id):
if attachment_id:
# Normally we wouldn't complete an attachment without a host
# connector, but we do this to make the volume status change
# to "in-use" to maintain the API semantics with the old flow.
# When unshelving the instance, the compute service will deal
# with this disconnected attachment.
self.volume_api.attachment_complete(context, attachment_id)
else:
self.volume_api.attach(context,
v_id,
instance.uuid,
dev)
volume_bdm = self._create_volume_bdm(
context, instance, device, volume, disk_bus=disk_bus,
device_type=device_type, is_local_creation=True)
try:
self._check_attach_and_reserve_volume(context, volume, instance,
volume_bdm)
self._record_action_start(
context, instance,
instance_actions.ATTACH_VOLUME)
attach_volume(self, context, volume_id, instance, device,
volume_bdm.attachment_id)
except Exception:
with excutils.save_and_reraise_exception():
volume_bdm.destroy()
return volume_bdm.device_name
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
vm_states.STOPPED, vm_states.RESIZED,
vm_states.SOFT_DELETED, vm_states.SHELVED,
vm_states.SHELVED_OFFLOADED])
def attach_volume(self, context, instance, volume_id, device=None,
disk_bus=None, device_type=None, tag=None,
supports_multiattach=False):
"""Attach an existing volume to an existing instance."""
# NOTE(vish): Fail fast if the device is not going to pass. This
# will need to be removed along with the test if we
# change the logic in the manager for what constitutes
# a valid device.
if device and not block_device.match_device(device):
raise exception.InvalidDevicePath(path=device)
# Check to see if the computes in this cell can support new-style
# volume attachments.
min_compute_version = objects.Service.get_minimum_version(
context, 'nova-compute')
if min_compute_version >= CINDER_V3_ATTACH_MIN_COMPUTE_VERSION:
try:
# Check to see if Cinder is new enough to create new-style
# attachments.
cinder.is_microversion_supported(context, '3.44')
except exception.CinderAPIVersionNotAvailable:
pass
else:
# Make sure the volume isn't already attached to this instance
# because based on the above checks, we'll use the new style
# attachment flow in _check_attach_and_reserve_volume and
# Cinder will allow multiple attachments between the same
# volume and instance but the old flow API semantics don't
# allow that so we enforce it here.
self._check_volume_already_attached_to_instance(context,
instance,
volume_id)
volume = self.volume_api.get(context, volume_id)
is_shelved_offloaded = instance.vm_state == vm_states.SHELVED_OFFLOADED
if is_shelved_offloaded:
if tag:
# NOTE(artom) Local attach (to a shelved-offload instance)
# cannot support device tagging because we have no way to call
# the compute manager to check that it supports device tagging.
# In fact, we don't even know which computer manager the
# instance will eventually end up on when it's unshelved.
raise exception.VolumeTaggedAttachToShelvedNotSupported()
if volume['multiattach']:
# NOTE(mriedem): Similar to tagged attach, we don't support
# attaching a multiattach volume to shelved offloaded instances
# because we can't tell if the compute host (since there isn't
# one) supports it. This could possibly be supported in the
# future if the scheduler was made aware of which computes
# support multiattach volumes.
raise exception.MultiattachToShelvedNotSupported()
return self._attach_volume_shelved_offloaded(context,
instance,
volume,
device,
disk_bus,
device_type)
return self._attach_volume(context, instance, volume, device,
disk_bus, device_type, tag=tag,
supports_multiattach=supports_multiattach)
def _detach_volume(self, context, instance, volume):
"""Detach volume from instance.
This method is separated to make it easier for cells version
to override.
"""
try:
self.volume_api.begin_detaching(context, volume['id'])
except exception.InvalidInput as exc:
raise exception.InvalidVolume(reason=exc.format_message())
attachments = volume.get('attachments', {})
attachment_id = None
if attachments and instance.uuid in attachments:
attachment_id = attachments[instance.uuid]['attachment_id']
self._record_action_start(
context, instance, instance_actions.DETACH_VOLUME)
self.compute_rpcapi.detach_volume(context, instance=instance,
volume_id=volume['id'], attachment_id=attachment_id)
def _detach_volume_shelved_offloaded(self, context, instance, volume):
"""Detach a volume from an instance in shelved offloaded state.
If the instance is shelved offloaded we just need to cleanup volume
calling the volume api detach, the volume api terminate_connection
and delete the bdm record.
If the volume has delete_on_termination option set then we call the
volume api delete as well.
"""
@wrap_instance_event(prefix='api')
def detach_volume(self, context, instance, bdms):
self._local_cleanup_bdm_volumes(bdms, instance, context)
try:
self.volume_api.begin_detaching(context, volume['id'])
except exception.InvalidInput as exc:
raise exception.InvalidVolume(reason=exc.format_message())
bdms = [objects.BlockDeviceMapping.get_by_volume_id(
context, volume['id'], instance.uuid)]
self._record_action_start(
context, instance,
instance_actions.DETACH_VOLUME)
detach_volume(self, context, instance, bdms)
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
vm_states.STOPPED, vm_states.RESIZED,
vm_states.SOFT_DELETED, vm_states.SHELVED,
vm_states.SHELVED_OFFLOADED])
def detach_volume(self, context, instance, volume):
"""Detach a volume from an instance."""
if instance.vm_state == vm_states.SHELVED_OFFLOADED:
self._detach_volume_shelved_offloaded(context, instance, volume)
else:
self._detach_volume(context, instance, volume)
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
vm_states.RESIZED])
def swap_volume(self, context, instance, old_volume, new_volume):
"""Swap volume attached to an instance."""
# The caller likely got the instance from volume['attachments']
# in the first place, but let's sanity check.
if not old_volume.get('attachments', {}).get(instance.uuid):
msg = _("Old volume is attached to a different instance.")
raise exception.InvalidVolume(reason=msg)
if new_volume['attach_status'] == 'attached':
msg = _("New volume must be detached in order to swap.")
raise exception.InvalidVolume(reason=msg)
if int(new_volume['size']) < int(old_volume['size']):
msg = _("New volume must be the same size or larger.")
raise exception.InvalidVolume(reason=msg)
self.volume_api.check_availability_zone(context, new_volume,
instance=instance)
try:
self.volume_api.begin_detaching(context, old_volume['id'])
except exception.InvalidInput as exc:
raise exception.InvalidVolume(reason=exc.format_message())
# Get the BDM for the attached (old) volume so we can tell if it was
# attached with the new-style Cinder 3.44 API.
bdm = objects.BlockDeviceMapping.get_by_volume_and_instance(
context, old_volume['id'], instance.uuid)
new_attachment_id = None
if bdm.attachment_id is None:
# This is an old-style attachment so reserve the new volume before
# we cast to the compute host.
self.volume_api.reserve_volume(context, new_volume['id'])
else:
try:
self._check_volume_already_attached_to_instance(
context, instance, new_volume['id'])
except exception.InvalidVolume:
with excutils.save_and_reraise_exception():
self.volume_api.roll_detaching(context, old_volume['id'])
# This is a new-style attachment so for the volume that we are
# going to swap to, create a new volume attachment.
new_attachment_id = self.volume_api.attachment_create(
context, new_volume['id'], instance.uuid)['id']
self._record_action_start(
context, instance, instance_actions.SWAP_VOLUME)
try:
self.compute_rpcapi.swap_volume(
context, instance=instance,
old_volume_id=old_volume['id'],
new_volume_id=new_volume['id'],
new_attachment_id=new_attachment_id)
except Exception:
with excutils.save_and_reraise_exception():
self.volume_api.roll_detaching(context, old_volume['id'])
if new_attachment_id is None:
self.volume_api.unreserve_volume(context, new_volume['id'])
else:
self.volume_api.attachment_delete(
context, new_attachment_id)
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
vm_states.STOPPED],
task_state=[None])
def attach_interface(self, context, instance, network_id, port_id,
requested_ip, tag=None):
"""Use hotplug to add an network adapter to an instance."""
self._record_action_start(
context, instance, instance_actions.ATTACH_INTERFACE)
return self.compute_rpcapi.attach_interface(context,
instance=instance, network_id=network_id, port_id=port_id,
requested_ip=requested_ip, tag=tag)
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
vm_states.STOPPED],
task_state=[None])
def detach_interface(self, context, instance, port_id):
"""Detach an network adapter from an instance."""
self._record_action_start(
context, instance, instance_actions.DETACH_INTERFACE)
self.compute_rpcapi.detach_interface(context, instance=instance,
port_id=port_id)
def get_instance_metadata(self, context, instance):
"""Get all metadata associated with an instance."""
return self.db.instance_metadata_get(context, instance.uuid)
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
vm_states.SUSPENDED, vm_states.STOPPED],
task_state=None)
def delete_instance_metadata(self, context, instance, key):
"""Delete the given metadata item from an instance."""
instance.delete_metadata_key(key)
self.compute_rpcapi.change_instance_metadata(context,
instance=instance,
diff={key: ['-']})
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
vm_states.SUSPENDED, vm_states.STOPPED],
task_state=None)
def update_instance_metadata(self, context, instance,
metadata, delete=False):
"""Updates or creates instance metadata.
If delete is True, metadata items that are not specified in the
`metadata` argument will be deleted.
"""
orig = dict(instance.metadata)
if delete:
_metadata = metadata
else:
_metadata = dict(instance.metadata)
_metadata.update(metadata)
self._check_metadata_properties_quota(context, _metadata)
instance.metadata = _metadata
instance.save()
diff = _diff_dict(orig, instance.metadata)
self.compute_rpcapi.change_instance_metadata(context,
instance=instance,
diff=diff)
return _metadata
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED])
def live_migrate(self, context, instance, block_migration,
disk_over_commit, host_name, force=None, async=False):
"""Migrate a server lively to a new host."""
LOG.debug("Going to try to live migrate instance to %s",
host_name or "another host", instance=instance)
instance.task_state = task_states.MIGRATING
instance.save(expected_task_state=[None])
self._record_action_start(context, instance,
instance_actions.LIVE_MIGRATION)
self.consoleauth_rpcapi.delete_tokens_for_instance(
context, instance.uuid)
try:
request_spec = objects.RequestSpec.get_by_instance_uuid(
context, instance.uuid)
except exception.RequestSpecNotFound:
# Some old instances can still have no RequestSpec object attached
# to them, we need to support the old way
request_spec = None
# NOTE(sbauza): Force is a boolean by the new related API version
if force is False and host_name:
nodes = objects.ComputeNodeList.get_all_by_host(context, host_name)
# Unset the host to make sure we call the scheduler
# from the conductor LiveMigrationTask. Yes this is tightly-coupled
# to behavior in conductor and not great.
host_name = None
# FIXME(sbauza): Since only Ironic driver uses more than one
# compute per service but doesn't support live migrations,
# let's provide the first one.
target = nodes[0]
if request_spec:
# TODO(sbauza): Hydrate a fake spec for old instances not yet
# having a request spec attached to them (particularly true for
# cells v1). For the moment, let's keep the same behaviour for
# all the instances but provide the destination only if a spec
# is found.
destination = objects.Destination(
host=target.host,
node=target.hypervisor_hostname
)
# This is essentially a hint to the scheduler to only consider
# the specified host but still run it through the filters.
request_spec.requested_destination = destination
try:
self.compute_task_api.live_migrate_instance(context, instance,
host_name, block_migration=block_migration,
disk_over_commit=disk_over_commit,
request_spec=request_spec, async=async)
except oslo_exceptions.MessagingTimeout as messaging_timeout:
with excutils.save_and_reraise_exception():
# NOTE(pkoniszewski): It is possible that MessagingTimeout
# occurs, but LM will still be in progress, so write
# instance fault to database
compute_utils.add_instance_fault_from_exc(context,
instance,
messaging_timeout)
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE],
task_state=[task_states.MIGRATING])
def live_migrate_force_complete(self, context, instance, migration_id):
"""Force live migration to complete.
:param context: Security context
:param instance: The instance that is being migrated
:param migration_id: ID of ongoing migration
"""
LOG.debug("Going to try to force live migration to complete",
instance=instance)
# NOTE(pkoniszewski): Get migration object to check if there is ongoing
# live migration for particular instance. Also pass migration id to
# compute to double check and avoid possible race condition.
migration = objects.Migration.get_by_id_and_instance(
context, migration_id, instance.uuid)
if migration.status != 'running':
raise exception.InvalidMigrationState(migration_id=migration_id,
instance_uuid=instance.uuid,
state=migration.status,
method='force complete')
self._record_action_start(
context, instance, instance_actions.LIVE_MIGRATION_FORCE_COMPLETE)
self.compute_rpcapi.live_migration_force_complete(
context, instance, migration)
@check_instance_lock
@check_instance_cell
@check_instance_state(task_state=[task_states.MIGRATING])
def live_migrate_abort(self, context, instance, migration_id):
"""Abort an in-progress live migration.
:param context: Security context
:param instance: The instance that is being migrated
:param migration_id: ID of in-progress live migration
"""
migration = objects.Migration.get_by_id_and_instance(context,
migration_id, instance.uuid)
LOG.debug("Going to cancel live migration %s",
migration.id, instance=instance)
if migration.status != 'running':
raise exception.InvalidMigrationState(migration_id=migration_id,
instance_uuid=instance.uuid,
state=migration.status,
method='abort live migration')
self._record_action_start(context, instance,
instance_actions.LIVE_MIGRATION_CANCEL)
self.compute_rpcapi.live_migration_abort(context,
instance, migration.id)
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.ERROR])
def evacuate(self, context, instance, host, on_shared_storage,
admin_password=None, force=None):
"""Running evacuate to target host.
Checking vm compute host state, if the host not in expected_state,
raising an exception.
:param instance: The instance to evacuate
:param host: Target host. if not set, the scheduler will pick up one
:param on_shared_storage: True if instance files on shared storage
:param admin_password: password to set on rebuilt instance
:param force: Force the evacuation to the specific host target
"""
LOG.debug('vm evacuation scheduled', instance=instance)
inst_host = instance.host
service = objects.Service.get_by_compute_host(context, inst_host)
if self.servicegroup_api.service_is_up(service):
LOG.error('Instance compute service state on %s '
'expected to be down, but it was up.', inst_host)
raise exception.ComputeServiceInUse(host=inst_host)
instance.task_state = task_states.REBUILDING
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.EVACUATE)
# NOTE(danms): Create this as a tombstone for the source compute
# to find and cleanup. No need to pass it anywhere else.
migration = objects.Migration(context,
source_compute=instance.host,
source_node=instance.node,
instance_uuid=instance.uuid,
status='accepted',
migration_type='evacuation')
if host:
migration.dest_compute = host
migration.create()
compute_utils.notify_about_instance_usage(
self.notifier, context, instance, "evacuate")
compute_utils.notify_about_instance_action(
context, instance, CONF.host,
action=fields_obj.NotificationAction.EVACUATE,
source=fields_obj.NotificationSource.API)
try:
request_spec = objects.RequestSpec.get_by_instance_uuid(
context, instance.uuid)
except exception.RequestSpecNotFound:
# Some old instances can still have no RequestSpec object attached
# to them, we need to support the old way
request_spec = None
# NOTE(sbauza): Force is a boolean by the new related API version
if force is False and host:
nodes = objects.ComputeNodeList.get_all_by_host(context, host)
# NOTE(sbauza): Unset the host to make sure we call the scheduler
host = None
# FIXME(sbauza): Since only Ironic driver uses more than one
# compute per service but doesn't support evacuations,
# let's provide the first one.
target = nodes[0]
if request_spec:
# TODO(sbauza): Hydrate a fake spec for old instances not yet
# having a request spec attached to them (particularly true for
# cells v1). For the moment, let's keep the same behaviour for
# all the instances but provide the destination only if a spec
# is found.
destination = objects.Destination(
host=target.host,
node=target.hypervisor_hostname
)
request_spec.requested_destination = destination
return self.compute_task_api.rebuild_instance(context,
instance=instance,
new_pass=admin_password,
injected_files=None,
image_ref=None,
orig_image_ref=None,
orig_sys_metadata=None,
bdms=None,
recreate=True,
on_shared_storage=on_shared_storage,
host=host,
request_spec=request_spec,
)
def get_migrations(self, context, filters):
"""Get all migrations for the given filters."""
load_cells()
migrations = []
for cell in CELLS:
if cell.uuid == objects.CellMapping.CELL0_UUID:
continue
with nova_context.target_cell(context, cell) as cctxt:
migrations.extend(objects.MigrationList.get_by_filters(
cctxt, filters).objects)
return objects.MigrationList(objects=migrations)
def get_migrations_sorted(self, context, filters, sort_dirs=None,
sort_keys=None, limit=None, marker=None):
"""Get all migrations for the given parameters."""
mig_objs = migration_list.get_migration_objects_sorted(
context, filters, limit, marker, sort_keys, sort_dirs)
return mig_objs
def get_migrations_in_progress_by_instance(self, context, instance_uuid,
migration_type=None):
"""Get all migrations of an instance in progress."""
return objects.MigrationList.get_in_progress_by_instance(
context, instance_uuid, migration_type)
def get_migration_by_id_and_instance(self, context,
migration_id, instance_uuid):
"""Get the migration of an instance by id."""
return objects.Migration.get_by_id_and_instance(
context, migration_id, instance_uuid)
def _get_bdm_by_volume_id(self, context, volume_id, expected_attrs=None):
"""Retrieve a BDM without knowing its cell.
.. note:: The context will be targeted to the cell in which the
BDM is found, if any.
:param context: The API request context.
:param volume_id: The ID of the volume.
:param expected_attrs: list of any additional attributes that should
be joined when the BDM is loaded from the database.
:raises: nova.exception.VolumeBDMNotFound if not found in any cell
"""
load_cells()
for cell in CELLS:
nova_context.set_target_cell(context, cell)
try:
return objects.BlockDeviceMapping.get_by_volume(
context, volume_id, expected_attrs=expected_attrs)
except exception.NotFound:
continue
raise exception.VolumeBDMNotFound(volume_id=volume_id)
def volume_snapshot_create(self, context, volume_id, create_info):
bdm = self._get_bdm_by_volume_id(
context, volume_id, expected_attrs=['instance'])
# We allow creating the snapshot in any vm_state as long as there is
# no task being performed on the instance and it has a host.
@check_instance_host
@check_instance_state(vm_state=None)
def do_volume_snapshot_create(self, context, instance):
self.compute_rpcapi.volume_snapshot_create(context, instance,
volume_id, create_info)
snapshot = {
'snapshot': {
'id': create_info.get('id'),
'volumeId': volume_id
}
}
return snapshot
return do_volume_snapshot_create(self, context, bdm.instance)
def volume_snapshot_delete(self, context, volume_id, snapshot_id,
delete_info):
bdm = self._get_bdm_by_volume_id(
context, volume_id, expected_attrs=['instance'])
# We allow deleting the snapshot in any vm_state as long as there is
# no task being performed on the instance and it has a host.
@check_instance_host
@check_instance_state(vm_state=None)
def do_volume_snapshot_delete(self, context, instance):
self.compute_rpcapi.volume_snapshot_delete(context, instance,
volume_id, snapshot_id, delete_info)
do_volume_snapshot_delete(self, context, bdm.instance)
def external_instance_event(self, api_context, instances, events):
# NOTE(danms): The external API consumer just provides events,
# but doesn't know where they go. We need to collate lists
# by the host the affected instance is on and dispatch them
# according to host
instances_by_host = collections.defaultdict(list)
events_by_host = collections.defaultdict(list)
hosts_by_instance = collections.defaultdict(list)
cell_contexts_by_host = {}
for instance in instances:
# instance._context is used here since it's already targeted to
# the cell that the instance lives in, and we need to use that
# cell context to lookup any migrations associated to the instance.
for host in self._get_relevant_hosts(instance._context, instance):
# NOTE(danms): All instances on a host must have the same
# mapping, so just use that
# NOTE(mdbooth): We don't currently support migrations between
# cells, and given that the Migration record is hosted in the
# cell _get_relevant_hosts will likely have to change before we
# do. Consequently we can currently assume that the context for
# both the source and destination hosts of a migration is the
# same.
if host not in cell_contexts_by_host:
cell_contexts_by_host[host] = instance._context
instances_by_host[host].append(instance)
hosts_by_instance[instance.uuid].append(host)
for event in events:
if event.name == 'volume-extended':
# Volume extend is a user-initiated operation starting in the
# Block Storage service API. We record an instance action so
# the user can monitor the operation to completion.
host = hosts_by_instance[event.instance_uuid][0]
cell_context = cell_contexts_by_host[host]
objects.InstanceAction.action_start(
cell_context, event.instance_uuid,
instance_actions.EXTEND_VOLUME, want_result=False)
for host in hosts_by_instance[event.instance_uuid]:
events_by_host[host].append(event)
for host in instances_by_host:
cell_context = cell_contexts_by_host[host]
# TODO(salv-orlando): Handle exceptions raised by the rpc api layer
# in order to ensure that a failure in processing events on a host
# will not prevent processing events on other hosts
self.compute_rpcapi.external_instance_event(
cell_context, instances_by_host[host], events_by_host[host],
host=host)
def _get_relevant_hosts(self, context, instance):
hosts = set()
hosts.add(instance.host)
if instance.migration_context is not None:
migration_id = instance.migration_context.migration_id
migration = objects.Migration.get_by_id(context, migration_id)
hosts.add(migration.dest_compute)
hosts.add(migration.source_compute)
LOG.debug('Instance %(instance)s is migrating, '
'copying events to all relevant hosts: '
'%(hosts)s', {'instance': instance.uuid,
'hosts': hosts})
return hosts
def get_instance_host_status(self, instance):
if instance.host:
try:
service = [service for service in instance.services if
service.binary == 'nova-compute'][0]
if service.forced_down:
host_status = fields_obj.HostStatus.DOWN
elif service.disabled:
host_status = fields_obj.HostStatus.MAINTENANCE
else:
alive = self.servicegroup_api.service_is_up(service)
host_status = ((alive and fields_obj.HostStatus.UP) or
fields_obj.HostStatus.UNKNOWN)
except IndexError:
host_status = fields_obj.HostStatus.NONE
else:
host_status = fields_obj.HostStatus.NONE
return host_status
def get_instances_host_statuses(self, instance_list):
host_status_dict = dict()
host_statuses = dict()
for instance in instance_list:
if instance.host:
if instance.host not in host_status_dict:
host_status = self.get_instance_host_status(instance)
host_status_dict[instance.host] = host_status
else:
host_status = host_status_dict[instance.host]
else:
host_status = fields_obj.HostStatus.NONE
host_statuses[instance.uuid] = host_status
return host_statuses
def target_host_cell(fn):
"""Target a host-based function to a cell.
Expects to wrap a function of signature:
func(self, context, host, ...)
"""
@functools.wraps(fn)
def targeted(self, context, host, *args, **kwargs):
mapping = objects.HostMapping.get_by_host(context, host)
nova_context.set_target_cell(context, mapping.cell_mapping)
return fn(self, context, host, *args, **kwargs)
return targeted
def _find_service_in_cell(context, service_id=None, service_host=None):
"""Find a service by id or hostname by searching all cells.
If one matching service is found, return it. If none or multiple
are found, raise an exception.
:param context: A context.RequestContext
:param service_id: If not none, the DB ID of the service to find
:param service_host: If not None, the hostname of the service to find
:returns: An objects.Service
:raises: ServiceNotUnique if multiple matching IDs are found
:raises: NotFound if no matches are found
:raises: NovaException if called with neither search option
"""
load_cells()
service = None
found_in_cell = None
is_uuid = False
if service_id is not None:
is_uuid = uuidutils.is_uuid_like(service_id)
if is_uuid:
lookup_fn = lambda c: objects.Service.get_by_uuid(c, service_id)
else:
lookup_fn = lambda c: objects.Service.get_by_id(c, service_id)
elif service_host is not None:
lookup_fn = lambda c: (
objects.Service.get_by_compute_host(c, service_host))
else:
LOG.exception('_find_service_in_cell called with no search parameters')
# This is intentionally cryptic so we don't leak implementation details
# out of the API.
raise exception.NovaException()
for cell in CELLS:
# NOTE(danms): Services can be in cell0, so don't skip it here
try:
with nova_context.target_cell(context, cell) as cctxt:
cell_service = lookup_fn(cctxt)
except exception.NotFound:
# NOTE(danms): Keep looking in other cells
continue
if service and cell_service:
raise exception.ServiceNotUnique()
service = cell_service
found_in_cell = cell
if service and is_uuid:
break
if service:
# NOTE(danms): Set the cell on the context so it remains
# when we return to our caller
nova_context.set_target_cell(context, found_in_cell)
return service
else:
raise exception.NotFound()
class HostAPI(base.Base):
"""Sub-set of the Compute Manager API for managing host operations."""
def __init__(self, rpcapi=None):
self.rpcapi = rpcapi or compute_rpcapi.ComputeAPI()
self.servicegroup_api = servicegroup.API()
super(HostAPI, self).__init__()
def _assert_host_exists(self, context, host_name, must_be_up=False):
"""Raise HostNotFound if compute host doesn't exist."""
service = objects.Service.get_by_compute_host(context, host_name)
if not service:
raise exception.HostNotFound(host=host_name)
if must_be_up and not self.servicegroup_api.service_is_up(service):
raise exception.ComputeServiceUnavailable(host=host_name)
return service['host']
@wrap_exception()
@target_host_cell
def set_host_enabled(self, context, host_name, enabled):
"""Sets the specified host's ability to accept new instances."""
host_name = self._assert_host_exists(context, host_name)
payload = {'host_name': host_name, 'enabled': enabled}
compute_utils.notify_about_host_update(context,
'set_enabled.start',
payload)
result = self.rpcapi.set_host_enabled(context, enabled=enabled,
host=host_name)
compute_utils.notify_about_host_update(context,
'set_enabled.end',
payload)
return result
@target_host_cell
def get_host_uptime(self, context, host_name):
"""Returns the result of calling "uptime" on the target host."""
host_name = self._assert_host_exists(context, host_name,
must_be_up=True)
return self.rpcapi.get_host_uptime(context, host=host_name)
@wrap_exception()
@target_host_cell
def host_power_action(self, context, host_name, action):
"""Reboots, shuts down or powers up the host."""
host_name = self._assert_host_exists(context, host_name)
payload = {'host_name': host_name, 'action': action}
compute_utils.notify_about_host_update(context,
'power_action.start',
payload)
result = self.rpcapi.host_power_action(context, action=action,
host=host_name)
compute_utils.notify_about_host_update(context,
'power_action.end',
payload)
return result
@wrap_exception()
@target_host_cell
def set_host_maintenance(self, context, host_name, mode):
"""Start/Stop host maintenance window. On start, it triggers
guest VMs evacuation.
"""
host_name = self._assert_host_exists(context, host_name)
payload = {'host_name': host_name, 'mode': mode}
compute_utils.notify_about_host_update(context,
'set_maintenance.start',
payload)
result = self.rpcapi.host_maintenance_mode(context,
host_param=host_name, mode=mode, host=host_name)
compute_utils.notify_about_host_update(context,
'set_maintenance.end',
payload)
return result
def service_get_all(self, context, filters=None, set_zones=False,
all_cells=False):
"""Returns a list of services, optionally filtering the results.
If specified, 'filters' should be a dictionary containing services
attributes and matching values. Ie, to get a list of services for
the 'compute' topic, use filters={'topic': 'compute'}.
If all_cells=True, then scan all cells and merge the results.
"""
if filters is None:
filters = {}
disabled = filters.pop('disabled', None)
if 'availability_zone' in filters:
set_zones = True
# NOTE(danms): Eventually this all_cells nonsense should go away
# and we should always iterate over the cells. However, certain
# callers need the legacy behavior for now.
if all_cells:
load_cells()
services = []
for cell in CELLS:
with nova_context.target_cell(context, cell) as cctxt:
cell_services = objects.ServiceList.get_all(
cctxt, disabled, set_zones=set_zones)
services.extend(cell_services)
else:
services = objects.ServiceList.get_all(context, disabled,
set_zones=set_zones)
ret_services = []
for service in services:
for key, val in filters.items():
if service[key] != val:
break
else:
# All filters matched.
ret_services.append(service)
return ret_services
def service_get_by_id(self, context, service_id):
"""Get service entry for the given service id or uuid."""
try:
return _find_service_in_cell(context, service_id=service_id)
except exception.NotFound:
raise exception.ServiceNotFound(service_id=service_id)
@target_host_cell
def service_get_by_compute_host(self, context, host_name):
"""Get service entry for the given compute hostname."""
return objects.Service.get_by_compute_host(context, host_name)
def _service_update(self, context, host_name, binary, params_to_update):
"""Performs the actual service update operation."""
service = objects.Service.get_by_args(context, host_name, binary)
service.update(params_to_update)
service.save()
return service
@target_host_cell
def service_update(self, context, host_name, binary, params_to_update):
"""Enable / Disable a service.
For compute services, this stops new builds and migrations going to
the host.
"""
return self._service_update(context, host_name, binary,
params_to_update)
def _service_delete(self, context, service_id):
"""Performs the actual Service deletion operation."""
try:
service = _find_service_in_cell(context, service_id=service_id)
except exception.NotFound:
raise exception.ServiceNotFound(service_id=service_id)
service.destroy()
def service_delete(self, context, service_id):
"""Deletes the specified service found via id or uuid."""
self._service_delete(context, service_id)
@target_host_cell
def instance_get_all_by_host(self, context, host_name):
"""Return all instances on the given host."""
return objects.InstanceList.get_by_host(context, host_name)
def task_log_get_all(self, context, task_name, period_beginning,
period_ending, host=None, state=None):
"""Return the task logs within a given range, optionally
filtering by host and/or state.
"""
return self.db.task_log_get_all(context, task_name,
period_beginning,
period_ending,
host=host,
state=state)
def compute_node_get(self, context, compute_id):
"""Return compute node entry for particular integer ID or UUID."""
load_cells()
# NOTE(danms): Unfortunately this API exposes database identifiers
# which means we really can't do something efficient here
is_uuid = uuidutils.is_uuid_like(compute_id)
for cell in CELLS:
if cell.uuid == objects.CellMapping.CELL0_UUID:
continue
with nova_context.target_cell(context, cell) as cctxt:
try:
if is_uuid:
return objects.ComputeNode.get_by_uuid(cctxt,
compute_id)
return objects.ComputeNode.get_by_id(cctxt,
int(compute_id))
except exception.ComputeHostNotFound:
# NOTE(danms): Keep looking in other cells
continue
raise exception.ComputeHostNotFound(host=compute_id)
def compute_node_get_all(self, context, limit=None, marker=None):
load_cells()
computes = []
uuid_marker = marker and uuidutils.is_uuid_like(marker)
for cell in CELLS:
if cell.uuid == objects.CellMapping.CELL0_UUID:
continue
with nova_context.target_cell(context, cell) as cctxt:
# If we have a marker and it's a uuid, see if the compute node
# is in this cell.
if marker and uuid_marker:
try:
compute_marker = objects.ComputeNode.get_by_uuid(
cctxt, marker)
# we found the marker compute node, so use it's id
# for the actual marker for paging in this cell's db
marker = compute_marker.id
except exception.ComputeHostNotFound:
# The marker node isn't in this cell so keep looking.
continue
try:
cell_computes = objects.ComputeNodeList.get_by_pagination(
cctxt, limit=limit, marker=marker)
except exception.MarkerNotFound:
# NOTE(danms): Keep looking through cells
continue
computes.extend(cell_computes)
# NOTE(danms): We must have found the marker, so continue on
# without one
marker = None
if limit:
limit -= len(cell_computes)
if limit <= 0:
break
if marker is not None and len(computes) == 0:
# NOTE(danms): If we did not find the marker in any cell,
# mimic the db_api behavior here.
raise exception.MarkerNotFound(marker=marker)
return objects.ComputeNodeList(objects=computes)
def compute_node_search_by_hypervisor(self, context, hypervisor_match):
load_cells()
computes = []
for cell in CELLS:
if cell.uuid == objects.CellMapping.CELL0_UUID:
continue
with nova_context.target_cell(context, cell) as cctxt:
cell_computes = objects.ComputeNodeList.get_by_hypervisor(
cctxt, hypervisor_match)
computes.extend(cell_computes)
return objects.ComputeNodeList(objects=computes)
def compute_node_statistics(self, context):
load_cells()
cell_stats = []
for cell in CELLS:
if cell.uuid == objects.CellMapping.CELL0_UUID:
continue
with nova_context.target_cell(context, cell) as cctxt:
cell_stats.append(self.db.compute_node_statistics(cctxt))
if cell_stats:
keys = cell_stats[0].keys()
return {k: sum(stats[k] for stats in cell_stats)
for k in keys}
else:
return {}
class InstanceActionAPI(base.Base):
"""Sub-set of the Compute Manager API for managing instance actions."""
def actions_get(self, context, instance, limit=None, marker=None,
filters=None):
return objects.InstanceActionList.get_by_instance_uuid(
context, instance.uuid, limit, marker, filters)
def action_get_by_request_id(self, context, instance, request_id):
return objects.InstanceAction.get_by_request_id(
context, instance.uuid, request_id)
def action_events_get(self, context, instance, action_id):
return objects.InstanceActionEventList.get_by_action(
context, action_id)
class AggregateAPI(base.Base):
"""Sub-set of the Compute Manager API for managing host aggregates."""
def __init__(self, **kwargs):
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
self.scheduler_client = scheduler_client.SchedulerClient()
super(AggregateAPI, self).__init__(**kwargs)
@wrap_exception()
def create_aggregate(self, context, aggregate_name, availability_zone):
"""Creates the model for the aggregate."""
aggregate = objects.Aggregate(context=context)
aggregate.name = aggregate_name
if availability_zone:
aggregate.metadata = {'availability_zone': availability_zone}
aggregate.create()
self.scheduler_client.update_aggregates(context, [aggregate])
return aggregate
def get_aggregate(self, context, aggregate_id):
"""Get an aggregate by id."""
return objects.Aggregate.get_by_id(context, aggregate_id)
def get_aggregate_list(self, context):
"""Get all the aggregates."""
return objects.AggregateList.get_all(context)
def get_aggregates_by_host(self, context, compute_host):
"""Get all the aggregates where the given host is presented."""
return objects.AggregateList.get_by_host(context, compute_host)
@wrap_exception()
def update_aggregate(self, context, aggregate_id, values):
"""Update the properties of an aggregate."""
aggregate = objects.Aggregate.get_by_id(context, aggregate_id)
if 'name' in values:
aggregate.name = values.pop('name')
aggregate.save()
self.is_safe_to_update_az(context, values, aggregate=aggregate,
action_name=AGGREGATE_ACTION_UPDATE)
if values:
aggregate.update_metadata(values)
aggregate.updated_at = timeutils.utcnow()
self.scheduler_client.update_aggregates(context, [aggregate])
# If updated values include availability_zones, then the cache
# which stored availability_zones and host need to be reset
if values.get('availability_zone'):
availability_zones.reset_cache()
return aggregate
@wrap_exception()
def update_aggregate_metadata(self, context, aggregate_id, metadata):
"""Updates the aggregate metadata."""
aggregate = objects.Aggregate.get_by_id(context, aggregate_id)
self.is_safe_to_update_az(context, metadata, aggregate=aggregate,
action_name=AGGREGATE_ACTION_UPDATE_META)
aggregate.update_metadata(metadata)
self.scheduler_client.update_aggregates(context, [aggregate])
# If updated metadata include availability_zones, then the cache
# which stored availability_zones and host need to be reset
if metadata and metadata.get('availability_zone'):
availability_zones.reset_cache()
aggregate.updated_at = timeutils.utcnow()
return aggregate
@wrap_exception()
def delete_aggregate(self, context, aggregate_id):
"""Deletes the aggregate."""
aggregate_payload = {'aggregate_id': aggregate_id}
compute_utils.notify_about_aggregate_update(context,
"delete.start",
aggregate_payload)
aggregate = objects.Aggregate.get_by_id(context, aggregate_id)
compute_utils.notify_about_aggregate_action(
context=context,
aggregate=aggregate,
action=fields_obj.NotificationAction.DELETE,
phase=fields_obj.NotificationPhase.START)
if len(aggregate.hosts) > 0:
msg = _("Host aggregate is not empty")
raise exception.InvalidAggregateActionDelete(
aggregate_id=aggregate_id, reason=msg)
aggregate.destroy()
self.scheduler_client.delete_aggregate(context, aggregate)
compute_utils.notify_about_aggregate_update(context,
"delete.end",
aggregate_payload)
compute_utils.notify_about_aggregate_action(
context=context,
aggregate=aggregate,
action=fields_obj.NotificationAction.DELETE,
phase=fields_obj.NotificationPhase.END)
def is_safe_to_update_az(self, context, metadata, aggregate,
hosts=None,
action_name=AGGREGATE_ACTION_ADD):
"""Determine if updates alter an aggregate's availability zone.
:param context: local context
:param metadata: Target metadata for updating aggregate
:param aggregate: Aggregate to update
:param hosts: Hosts to check. If None, aggregate.hosts is used
:type hosts: list
:action_name: Calling method for logging purposes
"""
if 'availability_zone' in metadata:
if not metadata['availability_zone']:
msg = _("Aggregate %s does not support empty named "
"availability zone") % aggregate.name
self._raise_invalid_aggregate_exc(action_name, aggregate.id,
msg)
_hosts = hosts or aggregate.hosts
host_aggregates = objects.AggregateList.get_by_metadata_key(
context, 'availability_zone', hosts=_hosts)
conflicting_azs = [
agg.availability_zone for agg in host_aggregates
if agg.availability_zone != metadata['availability_zone']
and agg.id != aggregate.id]
if conflicting_azs:
msg = _("One or more hosts already in availability zone(s) "
"%s") % conflicting_azs
self._raise_invalid_aggregate_exc(action_name, aggregate.id,
msg)
def _raise_invalid_aggregate_exc(self, action_name, aggregate_id, reason):
if action_name == AGGREGATE_ACTION_ADD:
raise exception.InvalidAggregateActionAdd(
aggregate_id=aggregate_id, reason=reason)
elif action_name == AGGREGATE_ACTION_UPDATE:
raise exception.InvalidAggregateActionUpdate(
aggregate_id=aggregate_id, reason=reason)
elif action_name == AGGREGATE_ACTION_UPDATE_META:
raise exception.InvalidAggregateActionUpdateMeta(
aggregate_id=aggregate_id, reason=reason)
elif action_name == AGGREGATE_ACTION_DELETE:
raise exception.InvalidAggregateActionDelete(
aggregate_id=aggregate_id, reason=reason)
raise exception.NovaException(
_("Unexpected aggregate action %s") % action_name)
def _update_az_cache_for_host(self, context, host_name, aggregate_meta):
# Update the availability_zone cache to avoid getting wrong
# availability_zone in cache retention time when add/remove
# host to/from aggregate.
if aggregate_meta and aggregate_meta.get('availability_zone'):
availability_zones.update_host_availability_zone_cache(context,
host_name)
@wrap_exception()
def add_host_to_aggregate(self, context, aggregate_id, host_name):
"""Adds the host to an aggregate."""
aggregate_payload = {'aggregate_id': aggregate_id,
'host_name': host_name}
compute_utils.notify_about_aggregate_update(context,
"addhost.start",
aggregate_payload)
# validates the host; HostMappingNotFound or ComputeHostNotFound
# is raised if invalid
try:
mapping = objects.HostMapping.get_by_host(context, host_name)
nova_context.set_target_cell(context, mapping.cell_mapping)
objects.Service.get_by_compute_host(context, host_name)
except exception.HostMappingNotFound:
try:
# NOTE(danms): This targets our cell
_find_service_in_cell(context, service_host=host_name)
except exception.NotFound:
raise exception.ComputeHostNotFound(host=host_name)
aggregate = objects.Aggregate.get_by_id(context, aggregate_id)
compute_utils.notify_about_aggregate_action(
context=context,
aggregate=aggregate,
action=fields_obj.NotificationAction.ADD_HOST,
phase=fields_obj.NotificationPhase.START)
self.is_safe_to_update_az(context, aggregate.metadata,
hosts=[host_name], aggregate=aggregate)
aggregate.add_host(host_name)
self.scheduler_client.update_aggregates(context, [aggregate])
self._update_az_cache_for_host(context, host_name, aggregate.metadata)
# NOTE(jogo): Send message to host to support resource pools
self.compute_rpcapi.add_aggregate_host(context,
aggregate=aggregate, host_param=host_name, host=host_name)
aggregate_payload.update({'name': aggregate.name})
compute_utils.notify_about_aggregate_update(context,
"addhost.end",
aggregate_payload)
compute_utils.notify_about_aggregate_action(
context=context,
aggregate=aggregate,
action=fields_obj.NotificationAction.ADD_HOST,
phase=fields_obj.NotificationPhase.END)
return aggregate
@wrap_exception()
def remove_host_from_aggregate(self, context, aggregate_id, host_name):
"""Removes host from the aggregate."""
aggregate_payload = {'aggregate_id': aggregate_id,
'host_name': host_name}
compute_utils.notify_about_aggregate_update(context,
"removehost.start",
aggregate_payload)
# validates the host; HostMappingNotFound or ComputeHostNotFound
# is raised if invalid
mapping = objects.HostMapping.get_by_host(context, host_name)
nova_context.set_target_cell(context, mapping.cell_mapping)
objects.Service.get_by_compute_host(context, host_name)
aggregate = objects.Aggregate.get_by_id(context, aggregate_id)
compute_utils.notify_about_aggregate_action(
context=context,
aggregate=aggregate,
action=fields_obj.NotificationAction.REMOVE_HOST,
phase=fields_obj.NotificationPhase.START)
aggregate.delete_host(host_name)
self.scheduler_client.update_aggregates(context, [aggregate])
self._update_az_cache_for_host(context, host_name, aggregate.metadata)
self.compute_rpcapi.remove_aggregate_host(context,
aggregate=aggregate, host_param=host_name, host=host_name)
compute_utils.notify_about_aggregate_update(context,
"removehost.end",
aggregate_payload)
compute_utils.notify_about_aggregate_action(
context=context,
aggregate=aggregate,
action=fields_obj.NotificationAction.REMOVE_HOST,
phase=fields_obj.NotificationPhase.END)
return aggregate
class KeypairAPI(base.Base):
"""Subset of the Compute Manager API for managing key pairs."""
get_notifier = functools.partial(rpc.get_notifier, service='api')
wrap_exception = functools.partial(exception_wrapper.wrap_exception,
get_notifier=get_notifier,
binary='nova-api')
def _notify(self, context, event_suffix, keypair_name):
payload = {
'tenant_id': context.project_id,
'user_id': context.user_id,
'key_name': keypair_name,
}
notify = self.get_notifier()
notify.info(context, 'keypair.%s' % event_suffix, payload)
def _validate_new_key_pair(self, context, user_id, key_name, key_type):
safe_chars = "_- " + string.digits + string.ascii_letters
clean_value = "".join(x for x in key_name if x in safe_chars)
if clean_value != key_name:
raise exception.InvalidKeypair(
reason=_("Keypair name contains unsafe characters"))
try:
utils.check_string_length(key_name, min_length=1, max_length=255)
except exception.InvalidInput:
raise exception.InvalidKeypair(
reason=_('Keypair name must be string and between '
'1 and 255 characters long'))
try:
objects.Quotas.check_deltas(context, {'key_pairs': 1}, user_id)
except exception.OverQuota:
raise exception.KeypairLimitExceeded()
@wrap_exception()
def import_key_pair(self, context, user_id, key_name, public_key,
key_type=keypair_obj.KEYPAIR_TYPE_SSH):
"""Import a key pair using an existing public key."""
self._validate_new_key_pair(context, user_id, key_name, key_type)
self._notify(context, 'import.start', key_name)
keypair = objects.KeyPair(context)
keypair.user_id = user_id
keypair.name = key_name
keypair.type = key_type
keypair.fingerprint = None
keypair.public_key = public_key
compute_utils.notify_about_keypair_action(
context=context,
keypair=keypair,
action=fields_obj.NotificationAction.IMPORT,
phase=fields_obj.NotificationPhase.START)
fingerprint = self._generate_fingerprint(public_key, key_type)
keypair.fingerprint = fingerprint
keypair.create()
compute_utils.notify_about_keypair_action(
context=context,
keypair=keypair,
action=fields_obj.NotificationAction.IMPORT,
phase=fields_obj.NotificationPhase.END)
self._notify(context, 'import.end', key_name)
return keypair
@wrap_exception()
def create_key_pair(self, context, user_id, key_name,
key_type=keypair_obj.KEYPAIR_TYPE_SSH):
"""Create a new key pair."""
self._validate_new_key_pair(context, user_id, key_name, key_type)
keypair = objects.KeyPair(context)
keypair.user_id = user_id
keypair.name = key_name
keypair.type = key_type
keypair.fingerprint = None
keypair.public_key = None
self._notify(context, 'create.start', key_name)
compute_utils.notify_about_keypair_action(
context=context,
keypair=keypair,
action=fields_obj.NotificationAction.CREATE,
phase=fields_obj.NotificationPhase.START)
private_key, public_key, fingerprint = self._generate_key_pair(
user_id, key_type)
keypair.fingerprint = fingerprint
keypair.public_key = public_key
keypair.create()
# NOTE(melwitt): We recheck the quota after creating the object to
# prevent users from allocating more resources than their allowed quota
# in the event of a race. This is configurable because it can be
# expensive if strict quota limits are not required in a deployment.
if CONF.quota.recheck_quota:
try:
objects.Quotas.check_deltas(context, {'key_pairs': 0}, user_id)
except exception.OverQuota:
keypair.destroy()
raise exception.KeypairLimitExceeded()
compute_utils.notify_about_keypair_action(
context=context,
keypair=keypair,
action=fields_obj.NotificationAction.CREATE,
phase=fields_obj.NotificationPhase.END)
self._notify(context, 'create.end', key_name)
return keypair, private_key
def _generate_fingerprint(self, public_key, key_type):
if key_type == keypair_obj.KEYPAIR_TYPE_SSH:
return crypto.generate_fingerprint(public_key)
elif key_type == keypair_obj.KEYPAIR_TYPE_X509:
return crypto.generate_x509_fingerprint(public_key)
def _generate_key_pair(self, user_id, key_type):
if key_type == keypair_obj.KEYPAIR_TYPE_SSH:
return crypto.generate_key_pair()
elif key_type == keypair_obj.KEYPAIR_TYPE_X509:
return crypto.generate_winrm_x509_cert(user_id)
@wrap_exception()
def delete_key_pair(self, context, user_id, key_name):
"""Delete a keypair by name."""
self._notify(context, 'delete.start', key_name)
keypair = self.get_key_pair(context, user_id, key_name)
compute_utils.notify_about_keypair_action(
context=context,
keypair=keypair,
action=fields_obj.NotificationAction.DELETE,
phase=fields_obj.NotificationPhase.START)
objects.KeyPair.destroy_by_name(context, user_id, key_name)
compute_utils.notify_about_keypair_action(
context=context,
keypair=keypair,
action=fields_obj.NotificationAction.DELETE,
phase=fields_obj.NotificationPhase.END)
self._notify(context, 'delete.end', key_name)
def get_key_pairs(self, context, user_id, limit=None, marker=None):
"""List key pairs."""
return objects.KeyPairList.get_by_user(
context, user_id, limit=limit, marker=marker)
def get_key_pair(self, context, user_id, key_name):
"""Get a keypair by name."""
return objects.KeyPair.get_by_name(context, user_id, key_name)
class SecurityGroupAPI(base.Base, security_group_base.SecurityGroupBase):
"""Sub-set of the Compute API related to managing security groups
and security group rules
"""
# The nova security group api does not use a uuid for the id.
id_is_uuid = False
def __init__(self, **kwargs):
super(SecurityGroupAPI, self).__init__(**kwargs)
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
def validate_property(self, value, property, allowed):
"""Validate given security group property.
:param value: the value to validate, as a string or unicode
:param property: the property, either 'name' or 'description'
:param allowed: the range of characters allowed
"""
try:
val = value.strip()
except AttributeError:
msg = _("Security group %s is not a string or unicode") % property
self.raise_invalid_property(msg)
utils.check_string_length(val, name=property, min_length=1,
max_length=255)
if allowed and not re.match(allowed, val):
# Some validation to ensure that values match API spec.
# - Alphanumeric characters, spaces, dashes, and underscores.
# TODO(Daviey): LP: #813685 extend beyond group_name checking, and
# probably create a param validator that can be used elsewhere.
msg = (_("Value (%(value)s) for parameter Group%(property)s is "
"invalid. Content limited to '%(allowed)s'.") %
{'value': value, 'allowed': allowed,
'property': property.capitalize()})
self.raise_invalid_property(msg)
def ensure_default(self, context):
"""Ensure that a context has a security group.
Creates a security group for the security context if it does not
already exist.
:param context: the security context
"""
self.db.security_group_ensure_default(context)
def create_security_group(self, context, name, description):
try:
objects.Quotas.check_deltas(context, {'security_groups': 1},
context.project_id,
user_id=context.user_id)
except exception.OverQuota:
msg = _("Quota exceeded, too many security groups.")
self.raise_over_quota(msg)
LOG.info("Create Security Group %s", name)
self.ensure_default(context)
group = {'user_id': context.user_id,
'project_id': context.project_id,
'name': name,
'description': description}
try:
group_ref = self.db.security_group_create(context, group)
except exception.SecurityGroupExists:
msg = _('Security group %s already exists') % name
self.raise_group_already_exists(msg)
# NOTE(melwitt): We recheck the quota after creating the object to
# prevent users from allocating more resources than their allowed quota
# in the event of a race. This is configurable because it can be
# expensive if strict quota limits are not required in a deployment.
if CONF.quota.recheck_quota:
try:
objects.Quotas.check_deltas(context, {'security_groups': 0},
context.project_id,
user_id=context.user_id)
except exception.OverQuota:
self.db.security_group_destroy(context, group_ref['id'])
msg = _("Quota exceeded, too many security groups.")
self.raise_over_quota(msg)
return group_ref
def update_security_group(self, context, security_group,
name, description):
if security_group['name'] in RO_SECURITY_GROUPS:
msg = (_("Unable to update system group '%s'") %
security_group['name'])
self.raise_invalid_group(msg)
group = {'name': name,
'description': description}
columns_to_join = ['rules.grantee_group']
group_ref = self.db.security_group_update(context,
security_group['id'],
group,
columns_to_join=columns_to_join)
return group_ref
def get(self, context, name=None, id=None, map_exception=False):
self.ensure_default(context)
cols = ['rules']
try:
if name:
return self.db.security_group_get_by_name(context,
context.project_id,
name,
columns_to_join=cols)
elif id:
return self.db.security_group_get(context, id,
columns_to_join=cols)
except exception.NotFound as exp:
if map_exception:
msg = exp.format_message()
self.raise_not_found(msg)
else:
raise
def list(self, context, names=None, ids=None, project=None,
search_opts=None):
self.ensure_default(context)
groups = []
if names or ids:
if names:
for name in names:
groups.append(self.db.security_group_get_by_name(context,
project,
name))
if ids:
for id in ids:
groups.append(self.db.security_group_get(context, id))
elif context.is_admin:
# TODO(eglynn): support a wider set of search options than just
# all_tenants, at least include the standard filters defined for
# the EC2 DescribeSecurityGroups API for the non-admin case also
if (search_opts and 'all_tenants' in search_opts):
groups = self.db.security_group_get_all(context)
else:
groups = self.db.security_group_get_by_project(context,
project)
elif project:
groups = self.db.security_group_get_by_project(context, project)
return groups
def destroy(self, context, security_group):
if security_group['name'] in RO_SECURITY_GROUPS:
msg = _("Unable to delete system group '%s'") % \
security_group['name']
self.raise_invalid_group(msg)
if self.db.security_group_in_use(context, security_group['id']):
msg = _("Security group is still in use")
self.raise_invalid_group(msg)
LOG.info("Delete security group %s", security_group['name'])
self.db.security_group_destroy(context, security_group['id'])
def is_associated_with_server(self, security_group, instance_uuid):
"""Check if the security group is already associated
with the instance. If Yes, return True.
"""
if not security_group:
return False
instances = security_group.get('instances')
if not instances:
return False
for inst in instances:
if (instance_uuid == inst['uuid']):
return True
return False
def add_to_instance(self, context, instance, security_group_name):
"""Add security group to the instance."""
security_group = self.db.security_group_get_by_name(context,
context.project_id,
security_group_name)
instance_uuid = instance.uuid
# check if the security group is associated with the server
if self.is_associated_with_server(security_group, instance_uuid):
raise exception.SecurityGroupExistsForInstance(
security_group_id=security_group['id'],
instance_id=instance_uuid)
self.db.instance_add_security_group(context.elevated(),
instance_uuid,
security_group['id'])
if instance.host:
self.compute_rpcapi.refresh_instance_security_rules(
context, instance, instance.host)
def remove_from_instance(self, context, instance, security_group_name):
"""Remove the security group associated with the instance."""
security_group = self.db.security_group_get_by_name(context,
context.project_id,
security_group_name)
instance_uuid = instance.uuid
# check if the security group is associated with the server
if not self.is_associated_with_server(security_group, instance_uuid):
raise exception.SecurityGroupNotExistsForInstance(
security_group_id=security_group['id'],
instance_id=instance_uuid)
self.db.instance_remove_security_group(context.elevated(),
instance_uuid,
security_group['id'])
if instance.host:
self.compute_rpcapi.refresh_instance_security_rules(
context, instance, instance.host)
def get_rule(self, context, id):
self.ensure_default(context)
try:
return self.db.security_group_rule_get(context, id)
except exception.NotFound:
msg = _("Rule (%s) not found") % id
self.raise_not_found(msg)
def add_rules(self, context, id, name, vals):
"""Add security group rule(s) to security group.
Note: the Nova security group API doesn't support adding multiple
security group rules at once but the EC2 one does. Therefore,
this function is written to support both.
"""
try:
objects.Quotas.check_deltas(context,
{'security_group_rules': len(vals)},
id)
except exception.OverQuota:
msg = _("Quota exceeded, too many security group rules.")
self.raise_over_quota(msg)
msg = ("Security group %(name)s added %(protocol)s ingress "
"(%(from_port)s:%(to_port)s)")
rules = []
for v in vals:
rule = self.db.security_group_rule_create(context, v)
# NOTE(melwitt): We recheck the quota after creating the object to
# prevent users from allocating more resources than their allowed
# quota in the event of a race. This is configurable because it can
# be expensive if strict quota limits are not required in a
# deployment.
if CONF.quota.recheck_quota:
try:
objects.Quotas.check_deltas(context,
{'security_group_rules': 0},
id)
except exception.OverQuota:
self.db.security_group_rule_destroy(context, rule['id'])
msg = _("Quota exceeded, too many security group rules.")
self.raise_over_quota(msg)
rules.append(rule)
LOG.info(msg, {'name': name,
'protocol': rule.protocol,
'from_port': rule.from_port,
'to_port': rule.to_port})
self.trigger_rules_refresh(context, id=id)
return rules
def remove_rules(self, context, security_group, rule_ids):
msg = ("Security group %(name)s removed %(protocol)s ingress "
"(%(from_port)s:%(to_port)s)")
for rule_id in rule_ids:
rule = self.get_rule(context, rule_id)
LOG.info(msg, {'name': security_group['name'],
'protocol': rule.protocol,
'from_port': rule.from_port,
'to_port': rule.to_port})
self.db.security_group_rule_destroy(context, rule_id)
# NOTE(vish): we removed some rules, so refresh
self.trigger_rules_refresh(context, id=security_group['id'])
def remove_default_rules(self, context, rule_ids):
for rule_id in rule_ids:
self.db.security_group_default_rule_destroy(context, rule_id)
def add_default_rules(self, context, vals):
rules = [self.db.security_group_default_rule_create(context, v)
for v in vals]
return rules
def default_rule_exists(self, context, values):
"""Indicates whether the specified rule values are already
defined in the default security group rules.
"""
for rule in self.db.security_group_default_rule_list(context):
keys = ('cidr', 'from_port', 'to_port', 'protocol')
for key in keys:
if rule.get(key) != values.get(key):
break
else:
return rule.get('id') or True
return False
def get_all_default_rules(self, context):
try:
rules = self.db.security_group_default_rule_list(context)
except Exception:
msg = 'cannot get default security group rules'
raise exception.SecurityGroupDefaultRuleNotFound(msg)
return rules
def get_default_rule(self, context, id):
return self.db.security_group_default_rule_get(context, id)
def validate_id(self, id):
try:
return int(id)
except ValueError:
msg = _("Security group id should be integer")
self.raise_invalid_property(msg)
def _refresh_instance_security_rules(self, context, instances):
for instance in instances:
if instance.host is not None:
self.compute_rpcapi.refresh_instance_security_rules(
context, instance, instance.host)
def trigger_rules_refresh(self, context, id):
"""Called when a rule is added to or removed from a security_group."""
instances = objects.InstanceList.get_by_security_group_id(context, id)
self._refresh_instance_security_rules(context, instances)
def trigger_members_refresh(self, context, group_ids):
"""Called when a security group gains a new or loses a member.
Sends an update request to each compute node for each instance for
which this is relevant.
"""
instances = objects.InstanceList.get_by_grantee_security_group_ids(
context, group_ids)
self._refresh_instance_security_rules(context, instances)
def get_instance_security_groups(self, context, instance, detailed=False):
if detailed:
return self.db.security_group_get_by_instance(context,
instance.uuid)
return [{'name': group.name} for group in instance.security_groups]
| 46.289962 | 79 | 0.601188 |
02c6bb6b11ddd767194ce8051729f7c09c0d65d8 | 3,922 | py | Python | registryparse.py | mike-bailey/registry-csvdump | abab0572986cb0c074a21c56e7905b281522dd5e | [
"MIT"
] | null | null | null | registryparse.py | mike-bailey/registry-csvdump | abab0572986cb0c074a21c56e7905b281522dd5e | [
"MIT"
] | null | null | null | registryparse.py | mike-bailey/registry-csvdump | abab0572986cb0c074a21c56e7905b281522dd5e | [
"MIT"
] | null | null | null | import sys
import os
import base64
from Registry import Registry
import argparse
import calendar
import time
import binascii
reload(sys)
# Sets encoding
sys.setdefaultencoding('utf-8')
parser = argparse.ArgumentParser(description='Processes Registry Hives into relatively somewhat ok CSVs.')
parser.add_argument('-f','--filename', help='Parse a specific registry file')
parser.add_argument('-d','--directory', help='Parse everything in a folder')
args = parser.parse_args()
if args.filename == None and args.directory == None:
parser.print_help()
exit(1)
def determine_type(valtype):
if valtype == Registry.RegSZ:
return "RegSZ"
elif valtype == Registry.RegExpandSZ:
return "RegExSZ"
elif valtype == Registry.RegBin:
return "RegBin"
elif valtype == Registry.RegDWord:
return "RegDWord"
elif valtype == Registry.RegNone:
return "RegNone"
elif valtype == Registry.RegLink:
return "RegLink"
elif valtype == Registry.RegFullResourceDescriptor:
return "RegFullResourceDescriptor"
elif valtype == Registry.RegQWord:
return "RegQWord"
elif valtype == Registry.RegResourceRequirementsList:
return "RegResourceRequirementsList"
elif valtype == Registry.RegResourceList:
return "RegResourceList"
elif valtype == Registry.RegMultiSZ:
return "RegMultiSZ"
else:
return "N/A"
def clean(valuedata, valtype):
# Specify what to do with common key types
if determine_type(valtype) is "RegSZ" or "RegLink" or "RegResourceRequirementsList" or "RegResourceList" or "RegFullResourceDescriptor" or "RegExSZ" or "RegMultiSZ":
return valuedata
elif determine_type(valtype) is "RegDWord" or "RegQWord":
return valuedata
# If I don't know what to do, try to base64 it and if it's not ok with that (it'd be bc it's an int), return it
else:
try:
return binascii.b2a_base64(valuedata)
except TypeError:
return valuedata
def change(timestamp):
return timestamp.strftime("%Y-%m-%d %H:%M:%S")
def rec(key, depth=0, resultname="output_default.csv"):
for valueiter in key.values():
if valueiter.value_type() is Registry.RegBin:
f.write("{},{},{},{},{}\n".format(key.path(), change(key.timestamp()), valueiter.name(), determine_type(valueiter.value_type()), binascii.b2a_base64(valueiter.value()).replace('\n', '')))
elif valueiter.value_type() is Registry.RegResourceRequirementsList:
f.write("{},{},{},{},{}\n".format(key.path(), change(key.timestamp()), valueiter.name(), determine_type(valueiter.value_type()), binascii.b2a_base64(valueiter.value()).replace('\n', '')))
elif valueiter.value_type() is Registry.RegResourceList:
f.write("{},{},{},{},{}\n".format(key.path(), change(key.timestamp()), valueiter.name(), determine_type(valueiter.value_type()), binascii.b2a_base64(valueiter.value()).replace('\n', '')))
else:
f.write("{},{},{},{},{}\n".format(key.path(), change(key.timestamp()), valueiter.name(), determine_type(valueiter.value_type()), clean(valueiter.value(), valueiter.value_type())))
for subkey in key.subkeys():
rec(subkey, depth + 1, resultname)
if args.filename != None:
resultname = "{}_{}.csv".format(os.path.basename(args.filename),str(calendar.timegm(time.gmtime())))
f = open(resultname, 'a+')
f.write("Path, Timestamp, Key Name, Key Type, Key Data\n")
reg = Registry.Registry(args.filename)
rec(reg.root(), 0, resultname)
print "{} {}".format("Written data to",str(resultname))
if args.directory != None:
directory = "{}{}".format("output_",calendar.timegm(time.gmtime()))
if not os.path.exists(directory):
os.makedirs(directory)
(_, _, filenames) = os.walk(args.directory).next()
for file in filenames:
reg = Registry.Registry(args.directory+"/"+file)
resultname = directory+"/"+str(file+"_"+str(calendar.timegm(time.gmtime()))+".csv")
f = open(resultname, 'a+')
f.write("Path, Timestamp, Key Name, Key Type, Key Data")
rec(reg.root(), 0, resultname)
f.close()
print "Written data to "+str(resultname)
| 38.45098 | 190 | 0.717746 |
2636f847a25892fd83f3e055e7ae55ae507d5099 | 4,192 | py | Python | lib/uart/tb/test_uart_rx.py | bmindur/xfcp | dd2eef2cb23a17209180b278b1e2640fd09fda28 | [
"MIT"
] | 234 | 2015-05-12T11:40:21.000Z | 2022-03-29T12:27:40.000Z | lib/uart/tb/test_uart_rx.py | bmindur/xfcp | dd2eef2cb23a17209180b278b1e2640fd09fda28 | [
"MIT"
] | 7 | 2017-03-25T15:31:43.000Z | 2022-01-06T19:33:51.000Z | lib/uart/tb/test_uart_rx.py | bmindur/xfcp | dd2eef2cb23a17209180b278b1e2640fd09fda28 | [
"MIT"
] | 89 | 2015-11-06T02:04:13.000Z | 2022-03-22T14:50:39.000Z | #!/usr/bin/env python
"""
Copyright (c) 2014-2017 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from myhdl import *
import os
import axis_ep
import uart_ep
module = 'uart_rx'
testbench = 'test_%s' % module
srcs = []
srcs.append("../rtl/%s.v" % module)
srcs.append("%s.v" % testbench)
src = ' '.join(srcs)
build_cmd = "iverilog -o %s.vvp %s" % (testbench, src)
def bench():
# Inputs
clk = Signal(bool(0))
rst = Signal(bool(0))
current_test = Signal(intbv(0)[8:])
m_axis_tready = Signal(bool(0))
rxd = Signal(bool(1))
prescale = Signal(intbv(0)[16:])
# Outputs
m_axis_tdata = Signal(intbv(0)[8:])
m_axis_tvalid = Signal(bool(0))
busy = Signal(bool(0))
overrun_error = Signal(bool(0))
frame_error = Signal(bool(0))
# sources and sinks
sink_pause = Signal(bool(0))
source = uart_ep.UARTSource()
source_logic = source.create_logic(
clk,
rst,
txd=rxd,
prescale=prescale,
name='source'
)
sink = axis_ep.AXIStreamSink()
sink_logic = sink.create_logic(
clk,
rst,
tdata=m_axis_tdata,
tvalid=m_axis_tvalid,
tready=m_axis_tready,
pause=sink_pause,
name='sink'
)
# DUT
if os.system(build_cmd):
raise Exception("Error running build command")
dut = Cosimulation(
"vvp -m myhdl %s.vvp -lxt2" % testbench,
clk=clk,
rst=rst,
current_test=current_test,
m_axis_tdata=m_axis_tdata,
m_axis_tvalid=m_axis_tvalid,
m_axis_tready=m_axis_tready,
rxd=rxd,
busy=busy,
overrun_error=overrun_error,
frame_error=frame_error,
prescale=prescale
)
@always(delay(4))
def clkgen():
clk.next = not clk
@instance
def check():
yield delay(100)
yield clk.posedge
rst.next = 1
yield clk.posedge
rst.next = 0
yield clk.posedge
yield delay(100)
yield clk.posedge
yield clk.posedge
yield clk.posedge
prescale.next = 1;
yield clk.posedge
yield clk.posedge
print("test 1: walk")
current_test.next = 1
source.write(b'\x00\x01\x02\x04\x08\x10\x20\x40\x80')
yield clk.posedge
yield delay(5000)
yield delay(1000)
yield clk.posedge
rx_data = bytearray(sink.read())
print(rx_data)
assert rx_data == b'\x00\x01\x02\x04\x08\x10\x20\x40\x80'
yield clk.posedge
print("test 2: walk 2")
current_test.next = 2
source.write(b'\x00\x01\x03\x07\x0F\x1F\x3F\x7F\xFF')
yield clk.posedge
yield delay(5000)
yield delay(1000)
yield clk.posedge
rx_data = bytearray(sink.read())
print(rx_data)
assert rx_data == b'\x00\x01\x03\x07\x0F\x1F\x3F\x7F\xFF'
yield delay(100)
raise StopSimulation
return instances()
def test_bench():
os.chdir(os.path.dirname(os.path.abspath(__file__)))
sim = Simulation(bench())
sim.run()
if __name__ == '__main__':
print("Running test...")
test_bench()
| 22.907104 | 77 | 0.635258 |
48ed515b7ba36228eec595c8e919946d3371e1bd | 8,005 | py | Python | auto_ge.py | AIESECMX/AUTO_GT | ab6b64d1d65cd161142f1967a3c718243944874c | [
"MIT"
] | null | null | null | auto_ge.py | AIESECMX/AUTO_GT | ab6b64d1d65cd161142f1967a3c718243944874c | [
"MIT"
] | null | null | null | auto_ge.py | AIESECMX/AUTO_GT | ab6b64d1d65cd161142f1967a3c718243944874c | [
"MIT"
] | null | null | null | #this mprogram is an miplementation that will look in the opportunitities for GT that were
#open in the last week and will consult the list of active gt eps in GetRepose for GT
import requests
import config
import json
import datetime
import urllib
import urllib2
import cookielib
import logging
from ep import EP
from opp import OP
import sys
sys.path.append("../")
from gis_token_generator import GIS
from get_response import GetResponse
#CONFIG VARS start
#CONFIG VARS start
#the get reponbse instace for us to work
gr = GetResponse()
token_genrator = GIS()
expa_token = token_genrator.generate_token(config.user_mail,config.user_pass)
yop_token = token_genrator.generate_op_token(config.user_mail,config.user_pass)
#print
#CONFIG VARS end
#CONFIG VARS end
#this method gets the opps of gt oppened in the last week and sends them to GR for the
#apropiate profiles
## this is supposed to be run once a week to then notify Get reposnse
def notify_new_opps(expa_token):
#map one opp per porgram
backgrounds = open('backgrounds.json','r')
programs = json.loads(backgrounds.read())['data']
opps = {}
#getting opps from our country partners
peru = get_opps(expa_token,config.PERU)
colombia = get_opps(expa_token,config.COLOMBIA)
argentina = get_opps(expa_token,config.ARGENTINA)
costarica = get_opps(expa_token,config.COSTARICA)
brasil = get_opps(expa_token,config.Brasil)
url = 'https://gis-api.aiesec.org/v2/opportunities/'
#getting the full opps
opps['peru_op'] = json.loads(requests.get(url+str(peru)+'.json?access_token='+expa_token).text)
opps['colombia_op'] = json.loads(requests.get(url+str(colombia)+'.json?access_token='+expa_token).text)
opps['argentina_op'] = json.loads(requests.get(url+str(argentina)+'.json?access_token='+expa_token).text)
opps['costarica_op'] = json.loads(requests.get(url+str(costarica)+'.json?access_token='+expa_token).text)
opps['brasil_op'] = json.loads(requests.get(url+str(brasil)+'.json?access_token='+expa_token).text)
get_eps_gr_1(opps)
#this gets opportunities form te last week form expa using the yop token
def get_opps(aiesec_token,country = config.PERU):
headersx={'access_token': aiesec_token}
url = "https://gis-api.aiesec.org/v2/opportunities.json"
yesterday = datetime.date.today()-datetime.timedelta(6)
params = {
"access_token" :aiesec_token,
'filters[programmes][]':[5],#GE
"filters[home_mcs][]":[country],
"per_page":2,
#"filters[work_fields][]":[724,742],
"filters[created][to]" : datetime.date.today().strftime('%Y-%m-%d'),
"sort":"filters[created][to]"
}
q = requests.get(url, params=params)
ops_expa= json.loads(q.text)['data']
return ops_expa[0]['id']
#this method gets eps from get reponse to match them with the opps and then update their profiles
def get_eps_gr_1(opps):
eps = None
#dates form today and 3 months ago
day = 3
while day < 90 :
#just egtting the eps in days 7*times to reduce requests
created = datetime.date.today()-datetime.timedelta(day)
day += 7
params = {
'query[campaignId]':config.oge_gr_campaign_id,
'query[createdOn][from]':created.strftime('%Y-%m-%d'),
'query[createdOn][to]':created.strftime('%Y-%m-%d'),
'fields':''
}
query = 'contacts'
contacts = gr.get_request(query,params = params)
l = json.loads(contacts)
non_applicants = []
for ep in l :
#print 'imprimiendo usarios de algo '
#print ep
ep_aux = json.loads(gr.get_request(query+'/'+ep['contactId']))
custom_fields = {}
for cf in ep_aux['customFieldValues']:
custom_fields[cf['name']] = cf['value'][0]
#check if the ep has a complete profile
if 'perfil_completo' in custom_fields:
if custom_fields['perfil_completo'] != 'yes':
continue
if not is_profile_complete(custom_fields['expa_id'],ep['contactId']):
continue
#check if the ep is an apllicant in gr and check if the ep is applicant in expa
if 'aplicante' in custom_fields:
if custom_fields['aplicante'] != 'yes' :
#send the new opportunities to getresponse
send_opps(gr_id = ep['contactId'],opps = opps)
elif not is_applicant(custom_fields['expa_id'],ep['contactId']):
send_opps(gr_id = ep['contactId'],opps= opps)
#
#
def send_opps(gr_id , opps):
#get full ifor for the opps
peru_op = opps['peru_op']
colombia_op = opps['colombia_op']
argentina_op = opps['argentina_op']
costarica_op = opps['costarica_op']
brasil_op = opps['brasil_op']
#print costarica_op
params = {
"customFieldValues": [
#http_op_costaricaineering
{"customFieldId": 'zDY7G',"value": ['https://opportunities.aiesec.org/opportunity/'+str(costarica_op['id'])]},
#http_op_teahcing
{"customFieldId": 'zDY7L',"value": ['https://opportunities.aiesec.org/opportunity/'+str(colombia_op['id'])]},
#http_op_ussines
{"customFieldId": 'zDY7o',"value": ['https://opportunities.aiesec.org/opportunity/'+str(brasil_op['id'])]},
#http_op_argentina
{"customFieldId": 'zDY74',"value": ['https://opportunities.aiesec.org/opportunity/'+str(argentina_op['id'])]},
#http_op_it
{"customFieldId": 'zDY72',"value": ['https://opportunities.aiesec.org/opportunity/'+str(peru_op['id'])]},
#titulo costarica
{"customFieldId": 'zDY7q',"value": [costarica_op['title']]},
#titulo colombia
{"customFieldId": 'zDY7V',"value": [colombia_op['title']]},
#titulo brasil
{"customFieldId": 'zDY70',"value": [brasil_op['title']]},
#titulo argentina
{"customFieldId": 'zDY7X',"value": [argentina_op['title']]},
#titulo peru
{"customFieldId": 'zDY7m',"value": [peru_op['title']]},
#desc costarica
{"customFieldId": 'zDY7v',"value": [costarica_op['description'][:250]]},
#description colombia
{"customFieldId": 'zDY7B',"value": [colombia_op['description'][:250]]},
#description brasil
{"customFieldId": 'zDY7k',"value": [brasil_op['description'][:250]]},
#description argentina
{"customFieldId": 'zDY7i',"value": [argentina_op['description'][:250]]},
#description it
{"customFieldId": 'zDY73',"value": [peru_op['description'][:250]]}
]
}
#print params
test = gr.post_requests('/contacts/'+str(gr_id)+'/custom-fields',data=params)
#print 'aqui se hicieron las madre estas de las practicas par auna usarios'
#print params
#print test.text
#this method check if the profile is complete in expa and if its then it notifyies get response
def is_profile_complete(expa_id,gr_id,):
print 'checando perfil completo de :'+str(expa_id)
url = 'https://gis-api.aiesec.org/v2/people/'+str(expa_id)+'.json?access_token='+expa_token
q = requests.get(url)
ep = json.loads(q.text)
if 'missing_profile_fields' in ep:
if len(ep['missing_profile_fields']) == 0:
#set profile complete as true
params = {
"customFieldValues": [
{"customFieldId": 'zDY1V',"value": ['yes']}
]
}
test = gr.post_requests('/contacts/'+str(gr_id)+'/custom-fields',data=params)
print 'perfil completo'
print 'yes'
return True
print 'no'
return False
#this method check if the ep is already applying to something in expa dn inf it is it marks it as applicant in getresponse
def is_applicant(expa_id,gr_id):
url = 'https://gis-api.aiesec.org/v2/people/'+str(expa_id)+'.json?access_token='+expa_token
q = requests.get(url)
ep = json.loads(q.text)
if ep['status'] != 'open':
#set profile complete as true
params = {
"customFieldValues": [
#country it
{"customFieldId": 'zDYEt',"value": ['yes']}
]
}
test = gr.post_requests('/contacts/'+str(gr_id)+'/custom-fields',data=params)
print 'lo de si ya es palicante'
return True
return False
# the main method
def main():
#this methos starts the full excecution of autogt
notify_new_opps(yop_token)
#print gr.get_request('custom-fields')
#
if __name__ == "__main__":
main()
| 34.65368 | 122 | 0.686446 |
802fb945c0f8f4ed4235b1861da22a7843274442 | 10,814 | py | Python | src/oci/apm_synthetics/models/update_script_details.py | Manny27nyc/oci-python-sdk | de60b04e07a99826254f7255e992f41772902df7 | [
"Apache-2.0",
"BSD-3-Clause"
] | 249 | 2017-09-11T22:06:05.000Z | 2022-03-04T17:09:29.000Z | src/oci/apm_synthetics/models/update_script_details.py | Manny27nyc/oci-python-sdk | de60b04e07a99826254f7255e992f41772902df7 | [
"Apache-2.0",
"BSD-3-Clause"
] | 228 | 2017-09-11T23:07:26.000Z | 2022-03-23T10:58:50.000Z | src/oci/apm_synthetics/models/update_script_details.py | Manny27nyc/oci-python-sdk | de60b04e07a99826254f7255e992f41772902df7 | [
"Apache-2.0",
"BSD-3-Clause"
] | 224 | 2017-09-27T07:32:43.000Z | 2022-03-25T16:55:42.000Z | # coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class UpdateScriptDetails(object):
"""
Details of the request body used to update a script.
Only Side or JavaScript content types are supported and content should be in Side or JavaScript formats only.
"""
#: A constant which can be used with the content_type property of a UpdateScriptDetails.
#: This constant has a value of "SIDE"
CONTENT_TYPE_SIDE = "SIDE"
#: A constant which can be used with the content_type property of a UpdateScriptDetails.
#: This constant has a value of "JS"
CONTENT_TYPE_JS = "JS"
def __init__(self, **kwargs):
"""
Initializes a new UpdateScriptDetails object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param display_name:
The value to assign to the display_name property of this UpdateScriptDetails.
:type display_name: str
:param content_type:
The value to assign to the content_type property of this UpdateScriptDetails.
Allowed values for this property are: "SIDE", "JS"
:type content_type: str
:param content:
The value to assign to the content property of this UpdateScriptDetails.
:type content: str
:param content_file_name:
The value to assign to the content_file_name property of this UpdateScriptDetails.
:type content_file_name: str
:param parameters:
The value to assign to the parameters property of this UpdateScriptDetails.
:type parameters: list[oci.apm_synthetics.models.ScriptParameter]
:param freeform_tags:
The value to assign to the freeform_tags property of this UpdateScriptDetails.
:type freeform_tags: dict(str, str)
:param defined_tags:
The value to assign to the defined_tags property of this UpdateScriptDetails.
:type defined_tags: dict(str, dict(str, object))
"""
self.swagger_types = {
'display_name': 'str',
'content_type': 'str',
'content': 'str',
'content_file_name': 'str',
'parameters': 'list[ScriptParameter]',
'freeform_tags': 'dict(str, str)',
'defined_tags': 'dict(str, dict(str, object))'
}
self.attribute_map = {
'display_name': 'displayName',
'content_type': 'contentType',
'content': 'content',
'content_file_name': 'contentFileName',
'parameters': 'parameters',
'freeform_tags': 'freeformTags',
'defined_tags': 'definedTags'
}
self._display_name = None
self._content_type = None
self._content = None
self._content_file_name = None
self._parameters = None
self._freeform_tags = None
self._defined_tags = None
@property
def display_name(self):
"""
Gets the display_name of this UpdateScriptDetails.
Unique name that can be edited. The name should not contain any confidential information.
:return: The display_name of this UpdateScriptDetails.
:rtype: str
"""
return self._display_name
@display_name.setter
def display_name(self, display_name):
"""
Sets the display_name of this UpdateScriptDetails.
Unique name that can be edited. The name should not contain any confidential information.
:param display_name: The display_name of this UpdateScriptDetails.
:type: str
"""
self._display_name = display_name
@property
def content_type(self):
"""
Gets the content_type of this UpdateScriptDetails.
Content type of script.
Allowed values for this property are: "SIDE", "JS"
:return: The content_type of this UpdateScriptDetails.
:rtype: str
"""
return self._content_type
@content_type.setter
def content_type(self, content_type):
"""
Sets the content_type of this UpdateScriptDetails.
Content type of script.
:param content_type: The content_type of this UpdateScriptDetails.
:type: str
"""
allowed_values = ["SIDE", "JS"]
if not value_allowed_none_or_none_sentinel(content_type, allowed_values):
raise ValueError(
"Invalid value for `content_type`, must be None or one of {0}"
.format(allowed_values)
)
self._content_type = content_type
@property
def content(self):
"""
Gets the content of this UpdateScriptDetails.
The content of the script. It may contain custom-defined tags that can be used for setting dynamic parameters.
The format to set dynamic parameters is: `<ORAP><ON>param name</ON><OV>param value</OV><OS>isParamValueSecret(true/false)</OS></ORAP>`.
Param value and isParamValueSecret are optional, the default value for isParamValueSecret is false.
Examples:
With mandatory param name : `<ORAP><ON>param name</ON></ORAP>`
With parameter name and value : `<ORAP><ON>param name</ON><OV>param value</OV></ORAP>`
Note that the content is valid if it matches the given content type. For example, if the content type is SIDE, then the content should be in Side script format. If the content type is JS, then the content should be in JavaScript format.
:return: The content of this UpdateScriptDetails.
:rtype: str
"""
return self._content
@content.setter
def content(self, content):
"""
Sets the content of this UpdateScriptDetails.
The content of the script. It may contain custom-defined tags that can be used for setting dynamic parameters.
The format to set dynamic parameters is: `<ORAP><ON>param name</ON><OV>param value</OV><OS>isParamValueSecret(true/false)</OS></ORAP>`.
Param value and isParamValueSecret are optional, the default value for isParamValueSecret is false.
Examples:
With mandatory param name : `<ORAP><ON>param name</ON></ORAP>`
With parameter name and value : `<ORAP><ON>param name</ON><OV>param value</OV></ORAP>`
Note that the content is valid if it matches the given content type. For example, if the content type is SIDE, then the content should be in Side script format. If the content type is JS, then the content should be in JavaScript format.
:param content: The content of this UpdateScriptDetails.
:type: str
"""
self._content = content
@property
def content_file_name(self):
"""
Gets the content_file_name of this UpdateScriptDetails.
File name of uploaded script content.
:return: The content_file_name of this UpdateScriptDetails.
:rtype: str
"""
return self._content_file_name
@content_file_name.setter
def content_file_name(self, content_file_name):
"""
Sets the content_file_name of this UpdateScriptDetails.
File name of uploaded script content.
:param content_file_name: The content_file_name of this UpdateScriptDetails.
:type: str
"""
self._content_file_name = content_file_name
@property
def parameters(self):
"""
Gets the parameters of this UpdateScriptDetails.
List of script parameters. Example: `[{\"paramName\": \"userid\", \"paramValue\":\"testuser\", \"isSecret\": false}]`
:return: The parameters of this UpdateScriptDetails.
:rtype: list[oci.apm_synthetics.models.ScriptParameter]
"""
return self._parameters
@parameters.setter
def parameters(self, parameters):
"""
Sets the parameters of this UpdateScriptDetails.
List of script parameters. Example: `[{\"paramName\": \"userid\", \"paramValue\":\"testuser\", \"isSecret\": false}]`
:param parameters: The parameters of this UpdateScriptDetails.
:type: list[oci.apm_synthetics.models.ScriptParameter]
"""
self._parameters = parameters
@property
def freeform_tags(self):
"""
Gets the freeform_tags of this UpdateScriptDetails.
Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only.
Example: `{\"bar-key\": \"value\"}`
:return: The freeform_tags of this UpdateScriptDetails.
:rtype: dict(str, str)
"""
return self._freeform_tags
@freeform_tags.setter
def freeform_tags(self, freeform_tags):
"""
Sets the freeform_tags of this UpdateScriptDetails.
Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only.
Example: `{\"bar-key\": \"value\"}`
:param freeform_tags: The freeform_tags of this UpdateScriptDetails.
:type: dict(str, str)
"""
self._freeform_tags = freeform_tags
@property
def defined_tags(self):
"""
Gets the defined_tags of this UpdateScriptDetails.
Defined tags for this resource. Each key is predefined and scoped to a namespace.
Example: `{\"foo-namespace\": {\"bar-key\": \"value\"}}`
:return: The defined_tags of this UpdateScriptDetails.
:rtype: dict(str, dict(str, object))
"""
return self._defined_tags
@defined_tags.setter
def defined_tags(self, defined_tags):
"""
Sets the defined_tags of this UpdateScriptDetails.
Defined tags for this resource. Each key is predefined and scoped to a namespace.
Example: `{\"foo-namespace\": {\"bar-key\": \"value\"}}`
:param defined_tags: The defined_tags of this UpdateScriptDetails.
:type: dict(str, dict(str, object))
"""
self._defined_tags = defined_tags
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 37.161512 | 245 | 0.65369 |
2105000ebfc6d58fbb1923328dcb9f3d31663341 | 1,082 | py | Python | src/models/model_exploration.py | Quentindcf-grindstone/member_value_estimator | 0b32087d50723173d7d52956c8e4b05bcbe21f64 | [
"MIT"
] | null | null | null | src/models/model_exploration.py | Quentindcf-grindstone/member_value_estimator | 0b32087d50723173d7d52956c8e4b05bcbe21f64 | [
"MIT"
] | null | null | null | src/models/model_exploration.py | Quentindcf-grindstone/member_value_estimator | 0b32087d50723173d7d52956c8e4b05bcbe21f64 | [
"MIT"
] | null | null | null | import pandas as pd
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import SGDRegressor
from sklearn.metrics import r2_score
from datetime import datetime
from sklearn.svm import SVR
def print_now():
now = datetime.now()
current_time = now.strftime("%H:%M:%S")
print("Current Time =", current_time)
print_now()
targets_list = ['customer_value', 'fraud_indicator', 'chargebacks', 'amount_paid']
y = pd.read_csv('../../data/interim/post_encoding.csv').drop(columns='Unnamed: 0')
print(y.columns)
y = y.head(100000)
X = y.drop(columns = targets_list)
y = y[['chargebacks']]
print_now()
reg = LinearRegression().fit(X, y)
y_hat = reg.predict(X)
print(r2_score(y,y_hat))
print_now()
svr = SVR().fit(X, y)
y_hat = svr.predict(X)
print(r2_score(y,y_hat))
print_now()
y = y['chargebacks'].values
sgd = SGDRegressor(random_state=0, max_iter=1000, tol=0.000001, warm_start=True)
sgd.fit(X, y)
y_hat= sgd.predict(X)
print(r2_score(y,y_hat))
for i in range(100):
sgd.fit(X, y)
y_hat = sgd.predict(X)
print(r2_score(y,y_hat))
print_now() | 25.761905 | 82 | 0.719963 |
1506cd2434be659858cf6c0c6c0195866070a951 | 761 | py | Python | client.py | MustafaTheCoder/chat-app | ef6c9bce32ff46d4cdf8866dc0314e9d5207bd2f | [
"MIT"
] | null | null | null | client.py | MustafaTheCoder/chat-app | ef6c9bce32ff46d4cdf8866dc0314e9d5207bd2f | [
"MIT"
] | null | null | null | client.py | MustafaTheCoder/chat-app | ef6c9bce32ff46d4cdf8866dc0314e9d5207bd2f | [
"MIT"
] | null | null | null | import socket
import time
PORT = 5050
SERVER = "localhost"
ADDR = (SERVER, PORT)
FORMAT = "utf-8"
DISCONNECT_MESSAGE = "!DISCONNECT"
def connect():
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect(ADDR)
return client
def send(client, msg):
message = msg.encode(FORMAT)
client.send(message)
def start():
answer = input('Would you like to connect (yes/no)? ')
if answer.lower() != 'yes':
return
connection = connect()
while True:
msg = input("Message (q for quit): ")
if msg == 'q':
break
send(connection, msg)
send(connection, DISCONNECT_MESSAGE)
time.sleep(1)
print('Disconnected')
start()
| 18.119048 | 63 | 0.587385 |
5f6092276c766eac45ee7cc94ce826f79bfb29ae | 10,527 | py | Python | aiogram/bot/api.py | altvod/aiogram | bb1c774bccd084248bf47338255d1d375949e40b | [
"MIT"
] | null | null | null | aiogram/bot/api.py | altvod/aiogram | bb1c774bccd084248bf47338255d1d375949e40b | [
"MIT"
] | 1 | 2022-02-28T13:11:36.000Z | 2022-02-28T13:11:48.000Z | aiogram/bot/api.py | altvod/aiogram | bb1c774bccd084248bf47338255d1d375949e40b | [
"MIT"
] | null | null | null | import logging
import os
from dataclasses import dataclass
from http import HTTPStatus
import aiohttp
from .. import types
from ..utils import exceptions, json
from ..utils.helper import Helper, HelperMode, Item
# Main aiogram logger
log = logging.getLogger('aiogram')
@dataclass(frozen=True)
class TelegramAPIServer:
"""
Base config for API Endpoints
"""
base: str
file: str
def api_url(self, token: str, method: str) -> str:
"""
Generate URL for API methods
:param token: Bot token
:param method: API method name (case insensitive)
:return: URL
"""
return self.base.format(token=token, method=method)
def file_url(self, token: str, path: str) -> str:
"""
Generate URL for downloading files
:param token: Bot token
:param path: file path
:return: URL
"""
return self.file.format(token=token, path=path)
@classmethod
def from_base(cls, base: str) -> 'TelegramAPIServer':
base = base.rstrip("/")
return cls(
base=f"{base}/bot{{token}}/{{method}}",
file=f"{base}/file/bot{{token}}/{{path}}",
)
TELEGRAM_PRODUCTION = TelegramAPIServer.from_base("https://api.telegram.org")
def check_token(token: str) -> bool:
"""
Validate BOT token
:param token:
:return:
"""
if not isinstance(token, str):
message = (f"Token is invalid! "
f"It must be 'str' type instead of {type(token)} type.")
raise exceptions.ValidationError(message)
if any(x.isspace() for x in token):
message = "Token is invalid! It can't contains spaces."
raise exceptions.ValidationError(message)
left, sep, right = token.partition(':')
if (not sep) or (not left.isdigit()) or (not right):
raise exceptions.ValidationError('Token is invalid!')
return True
def check_result(method_name: str, content_type: str, status_code: int, body: str):
"""
Checks whether `result` is a valid API response.
A result is considered invalid if:
- The server returned an HTTP response code other than 200
- The content of the result is invalid JSON.
- The method call was unsuccessful (The JSON 'ok' field equals False)
:param method_name: The name of the method called
:param status_code: status code
:param content_type: content type of result
:param body: result body
:return: The result parsed to a JSON dictionary
:raises ApiException: if one of the above listed cases is applicable
"""
log.debug('Response for %s: [%d] "%r"', method_name, status_code, body)
if content_type != 'application/json':
raise exceptions.NetworkError(f"Invalid response with content type {content_type}: \"{body}\"")
try:
result_json = json.loads(body)
except ValueError:
result_json = {}
description = result_json.get('description') or body
parameters = types.ResponseParameters(**result_json.get('parameters', {}) or {})
if HTTPStatus.OK <= status_code <= HTTPStatus.IM_USED:
return result_json.get('result')
elif parameters.retry_after:
raise exceptions.RetryAfter(parameters.retry_after)
elif parameters.migrate_to_chat_id:
raise exceptions.MigrateToChat(parameters.migrate_to_chat_id)
elif status_code == HTTPStatus.BAD_REQUEST:
exceptions.BadRequest.detect(description)
elif status_code == HTTPStatus.NOT_FOUND:
exceptions.NotFound.detect(description)
elif status_code == HTTPStatus.CONFLICT:
exceptions.ConflictError.detect(description)
elif status_code in (HTTPStatus.UNAUTHORIZED, HTTPStatus.FORBIDDEN):
exceptions.Unauthorized.detect(description)
elif status_code == HTTPStatus.REQUEST_ENTITY_TOO_LARGE:
raise exceptions.NetworkError('File too large for uploading. '
'Check telegram api limits https://core.telegram.org/bots/api#senddocument')
elif status_code >= HTTPStatus.INTERNAL_SERVER_ERROR:
if 'restart' in description:
raise exceptions.RestartingTelegram()
raise exceptions.TelegramAPIError(description)
raise exceptions.TelegramAPIError(f"{description} [{status_code}]")
async def make_request(session, server, token, method, data=None, files=None, **kwargs):
log.debug('Make request: "%s" with data: "%r" and files "%r"', method, data, files)
url = server.api_url(token=token, method=method)
req = compose_data(data, files)
try:
async with session.post(url, data=req, **kwargs) as response:
return check_result(method, response.content_type, response.status, await response.text())
except aiohttp.ClientError as e:
raise exceptions.NetworkError(f"aiohttp client throws an error: {e.__class__.__name__}: {e}")
def guess_filename(obj):
"""
Get file name from object
:param obj:
:return:
"""
name = getattr(obj, 'name', None)
if name and isinstance(name, str) and name[0] != '<' and name[-1] != '>':
return os.path.basename(name)
def compose_data(params=None, files=None):
"""
Prepare request data
:param params:
:param files:
:return:
"""
data = aiohttp.formdata.FormData(quote_fields=False)
if params:
for key, value in params.items():
data.add_field(key, str(value))
if files:
for key, f in files.items():
if isinstance(f, tuple):
if len(f) == 2:
filename, fileobj = f
else:
raise ValueError('Tuple must have exactly 2 elements: filename, fileobj')
elif isinstance(f, types.InputFile):
filename, fileobj = f.filename, f.file
else:
filename, fileobj = guess_filename(f) or key, f
data.add_field(key, fileobj, filename=filename)
return data
class Methods(Helper):
"""
Helper for Telegram API Methods listed on https://core.telegram.org/bots/api
"""
mode = HelperMode.lowerCamelCase
# Getting Updates
GET_UPDATES = Item() # getUpdates
SET_WEBHOOK = Item() # setWebhook
DELETE_WEBHOOK = Item() # deleteWebhook
GET_WEBHOOK_INFO = Item() # getWebhookInfo
# Available methods
GET_ME = Item() # getMe
LOG_OUT = Item() # logOut
CLOSE = Item() # close
SEND_MESSAGE = Item() # sendMessage
FORWARD_MESSAGE = Item() # forwardMessage
COPY_MESSAGE = Item() # copyMessage
SEND_PHOTO = Item() # sendPhoto
SEND_AUDIO = Item() # sendAudio
SEND_DOCUMENT = Item() # sendDocument
SEND_VIDEO = Item() # sendVideo
SEND_ANIMATION = Item() # sendAnimation
SEND_VOICE = Item() # sendVoice
SEND_VIDEO_NOTE = Item() # sendVideoNote
SEND_MEDIA_GROUP = Item() # sendMediaGroup
SEND_LOCATION = Item() # sendLocation
EDIT_MESSAGE_LIVE_LOCATION = Item() # editMessageLiveLocation
STOP_MESSAGE_LIVE_LOCATION = Item() # stopMessageLiveLocation
SEND_VENUE = Item() # sendVenue
SEND_CONTACT = Item() # sendContact
SEND_POLL = Item() # sendPoll
SEND_DICE = Item() # sendDice
SEND_CHAT_ACTION = Item() # sendChatAction
GET_USER_PROFILE_PHOTOS = Item() # getUserProfilePhotos
GET_FILE = Item() # getFile
KICK_CHAT_MEMBER = Item() # kickChatMember
BAN_CHAT_MEMBER = Item() # banChatMember
UNBAN_CHAT_MEMBER = Item() # unbanChatMember
RESTRICT_CHAT_MEMBER = Item() # restrictChatMember
PROMOTE_CHAT_MEMBER = Item() # promoteChatMember
SET_CHAT_ADMINISTRATOR_CUSTOM_TITLE = Item() # setChatAdministratorCustomTitle
BAN_CHAT_SENDER_CHAT = Item() # banChatSenderChat
UNBAN_CHAT_SENDER_CHAT = Item() # unbanChatSenderChat
SET_CHAT_PERMISSIONS = Item() # setChatPermissions
EXPORT_CHAT_INVITE_LINK = Item() # exportChatInviteLink
CREATE_CHAT_INVITE_LINK = Item() # createChatInviteLink
EDIT_CHAT_INVITE_LINK = Item() # editChatInviteLink
REVOKE_CHAT_INVITE_LINK = Item() # revokeChatInviteLink
APPROVE_CHAT_JOIN_REQUEST = Item() # approveChatJoinRequest
DECLINE_CHAT_JOIN_REQUEST = Item() # declineChatJoinRequest
SET_CHAT_PHOTO = Item() # setChatPhoto
DELETE_CHAT_PHOTO = Item() # deleteChatPhoto
SET_CHAT_TITLE = Item() # setChatTitle
SET_CHAT_DESCRIPTION = Item() # setChatDescription
PIN_CHAT_MESSAGE = Item() # pinChatMessage
UNPIN_CHAT_MESSAGE = Item() # unpinChatMessage
UNPIN_ALL_CHAT_MESSAGES = Item() # unpinAllChatMessages
LEAVE_CHAT = Item() # leaveChat
GET_CHAT = Item() # getChat
GET_CHAT_ADMINISTRATORS = Item() # getChatAdministrators
GET_CHAT_MEMBER_COUNT = Item() # getChatMemberCount
GET_CHAT_MEMBERS_COUNT = Item() # getChatMembersCount (renamed to getChatMemberCount)
GET_CHAT_MEMBER = Item() # getChatMember
SET_CHAT_STICKER_SET = Item() # setChatStickerSet
DELETE_CHAT_STICKER_SET = Item() # deleteChatStickerSet
ANSWER_CALLBACK_QUERY = Item() # answerCallbackQuery
SET_MY_COMMANDS = Item() # setMyCommands
DELETE_MY_COMMANDS = Item() # deleteMyCommands
GET_MY_COMMANDS = Item() # getMyCommands
# Updating messages
EDIT_MESSAGE_TEXT = Item() # editMessageText
EDIT_MESSAGE_CAPTION = Item() # editMessageCaption
EDIT_MESSAGE_MEDIA = Item() # editMessageMedia
EDIT_MESSAGE_REPLY_MARKUP = Item() # editMessageReplyMarkup
STOP_POLL = Item() # stopPoll
DELETE_MESSAGE = Item() # deleteMessage
# Stickers
SEND_STICKER = Item() # sendSticker
GET_STICKER_SET = Item() # getStickerSet
UPLOAD_STICKER_FILE = Item() # uploadStickerFile
CREATE_NEW_STICKER_SET = Item() # createNewStickerSet
ADD_STICKER_TO_SET = Item() # addStickerToSet
SET_STICKER_POSITION_IN_SET = Item() # setStickerPositionInSet
DELETE_STICKER_FROM_SET = Item() # deleteStickerFromSet
SET_STICKER_SET_THUMB = Item() # setStickerSetThumb
# Inline mode
ANSWER_INLINE_QUERY = Item() # answerInlineQuery
# Payments
SEND_INVOICE = Item() # sendInvoice
ANSWER_SHIPPING_QUERY = Item() # answerShippingQuery
ANSWER_PRE_CHECKOUT_QUERY = Item() # answerPreCheckoutQuery
# Telegram Passport
SET_PASSPORT_DATA_ERRORS = Item() # setPassportDataErrors
# Games
SEND_GAME = Item() # sendGame
SET_GAME_SCORE = Item() # setGameScore
GET_GAME_HIGH_SCORES = Item() # getGameHighScores
| 35.928328 | 114 | 0.675501 |
196f5c48f49be6e2504dda4d7f64a70204e60623 | 246 | py | Python | portfolio/apps.py | rkisdp/rkisdp.django.backend | 771481cdeea6a101305c4819b06b839266ce6921 | [
"MIT"
] | null | null | null | portfolio/apps.py | rkisdp/rkisdp.django.backend | 771481cdeea6a101305c4819b06b839266ce6921 | [
"MIT"
] | null | null | null | portfolio/apps.py | rkisdp/rkisdp.django.backend | 771481cdeea6a101305c4819b06b839266ce6921 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# python imports
from __future__ import unicode_literals
# lib imports
from django.apps import AppConfig
class PortfolioConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'portfolio'
| 20.5 | 56 | 0.747967 |
da3620971b796a1a88b26608482dcd657362ff66 | 4,084 | py | Python | tests/integration/services/ticketing/test_user_check_in.py | byceps/byceps | ca9874f9b4f2a76daacb37c4135bbfb97e468040 | [
"BSD-3-Clause"
] | 33 | 2018-01-16T02:04:51.000Z | 2022-03-22T22:57:29.000Z | tests/integration/services/ticketing/test_user_check_in.py | byceps/byceps | ca9874f9b4f2a76daacb37c4135bbfb97e468040 | [
"BSD-3-Clause"
] | 7 | 2019-06-16T22:02:03.000Z | 2021-10-02T13:45:31.000Z | tests/integration/services/ticketing/test_user_check_in.py | byceps/byceps | ca9874f9b4f2a76daacb37c4135bbfb97e468040 | [
"BSD-3-Clause"
] | 14 | 2019-06-01T21:39:24.000Z | 2022-03-14T17:56:43.000Z | """
:Copyright: 2006-2021 Jochen Kupperschmidt
:License: Revised BSD (see `LICENSE` file for details)
"""
import pytest
from pytest import raises
from byceps.database import db
from byceps.services.party import service as party_service
from byceps.events.ticketing import TicketCheckedIn
from byceps.services.ticketing import (
event_service,
ticket_creation_service,
ticket_service,
ticket_user_checkin_service,
)
from byceps.services.ticketing.exceptions import (
TicketBelongsToDifferentParty,
TicketIsRevoked,
TicketLacksUser,
UserAccountSuspended,
UserAlreadyCheckedIn,
)
from tests.helpers import create_party
@pytest.fixture
def ticket(admin_app, category, ticket_owner):
ticket = ticket_creation_service.create_ticket(
category.party_id, category.id, ticket_owner.id
)
yield ticket
ticket_service.delete_ticket(ticket.id)
def test_check_in_user(admin_app, party, ticket, ticketing_admin, make_user):
ticket_user = make_user()
ticket_before = ticket
ticket_before.used_by_id = ticket_user.id
db.session.commit()
assert not ticket_before.user_checked_in
events_before = event_service.get_events_for_ticket(ticket_before.id)
assert len(events_before) == 0
# -------------------------------- #
ticket_id = ticket_before.id
event = check_in_user(party.id, ticket_id, ticketing_admin.id)
# -------------------------------- #
ticket_after = ticket_service.get_ticket(ticket_id)
assert ticket_after.user_checked_in
assert event.__class__ is TicketCheckedIn
assert event.occurred_at is not None
assert event.initiator_id == ticketing_admin.id
assert event.initiator_screen_name == ticketing_admin.screen_name
assert event.ticket_id == ticket.id
assert event.ticket_code == ticket.code
assert event.occupied_seat_id is None
assert event.user_id == ticket_user.id
assert event.user_screen_name == ticket_user.screen_name
ticket_events_after = event_service.get_events_for_ticket(ticket_after.id)
assert len(ticket_events_after) == 1
ticket_checked_in_event = ticket_events_after[0]
assert ticket_checked_in_event.event_type == 'user-checked-in'
assert ticket_checked_in_event.data == {
'checked_in_user_id': str(ticket_user.id),
'initiator_id': str(ticketing_admin.id),
}
def test_check_in_user_with_ticket_for_another_party(
admin_app, brand, ticket, ticketing_admin
):
other_party = create_party(brand.id, 'next-party', 'Next Party')
with raises(TicketBelongsToDifferentParty):
check_in_user(other_party.id, ticket.id, ticketing_admin.id)
# Clean up.
party_service.delete_party(other_party.id)
def test_check_in_user_with_ticket_without_assigned_user(
admin_app, party, ticket, ticketing_admin
):
with raises(TicketLacksUser):
check_in_user(party.id, ticket.id, ticketing_admin.id)
def test_check_in_user_with_revoked_ticket(
admin_app, party, ticket, ticketing_admin, make_user
):
ticket_user = make_user()
ticket.revoked = True
ticket.used_by_id = ticket_user.id
db.session.commit()
with raises(TicketIsRevoked):
check_in_user(party.id, ticket.id, ticketing_admin.id)
def test_check_in_user_with_ticket_user_already_checked_in(
admin_app, party, ticket, ticketing_admin, make_user
):
ticket_user = make_user()
ticket.used_by_id = ticket_user.id
ticket.user_checked_in = True
db.session.commit()
with raises(UserAlreadyCheckedIn):
check_in_user(party.id, ticket.id, ticketing_admin.id)
def test_check_in_suspended_user(
admin_app, party, ticket, ticketing_admin, make_user
):
ticket_user = make_user(suspended=True)
ticket.used_by_id = ticket_user.id
db.session.commit()
with raises(UserAccountSuspended):
check_in_user(party.id, ticket.id, ticketing_admin.id)
# helpers
def check_in_user(party_id, ticket_id, admin_id):
return ticket_user_checkin_service.check_in_user(
party_id, ticket_id, admin_id
)
| 27.782313 | 78 | 0.742899 |
1dca548c39324b805aa31cf858970ffda01e49f0 | 1,162 | py | Python | setup.py | startupcodebr/django-datatables-pagination | 4193ee920287e2a13dfd811013189a44752fbffd | [
"MIT"
] | 2 | 2020-08-17T13:37:40.000Z | 2021-08-12T22:43:26.000Z | setup.py | startupcodebr/django-datatables-pagination | 4193ee920287e2a13dfd811013189a44752fbffd | [
"MIT"
] | null | null | null | setup.py | startupcodebr/django-datatables-pagination | 4193ee920287e2a13dfd811013189a44752fbffd | [
"MIT"
] | 1 | 2021-03-08T15:22:20.000Z | 2021-03-08T15:22:20.000Z | from distutils.core import setup
from setuptools import find_packages
setup(
packages=find_packages(),
include_package_data=True,
name='django-datatables-pagination',
version='0.1.5',
license='MIT',
description='A Django ListView integration with datatables library.',
author='Matheus Zickuhr',
author_email='matheuszickuhr97@gmail.com',
url='https://github.com/MatheusZickuhr/django-datatables-pagination',
keywords=['django', 'datatables', 'pagination'],
install_requires=['django'],
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content'
],
)
| 35.212121 | 73 | 0.634251 |
63326a11c47339b10fe5f5d9de8bd11ec19661c9 | 1,759 | py | Python | src/kedro_tutorial/__main__.py | DavidMG01/KedroSpaceflightsTutorial | ce610e2dded722b65ffb35eb85f21db119b51b2e | [
"MIT"
] | null | null | null | src/kedro_tutorial/__main__.py | DavidMG01/KedroSpaceflightsTutorial | ce610e2dded722b65ffb35eb85f21db119b51b2e | [
"MIT"
] | null | null | null | src/kedro_tutorial/__main__.py | DavidMG01/KedroSpaceflightsTutorial | ce610e2dded722b65ffb35eb85f21db119b51b2e | [
"MIT"
] | null | null | null | # Copyright 2021 QuantumBlack Visual Analytics Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND
# NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS
# BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# The QuantumBlack Visual Analytics Limited ("QuantumBlack") name and logo
# (either separately or in combination, "QuantumBlack Trademarks") are
# trademarks of QuantumBlack. The License does not grant you any right or
# license to the QuantumBlack Trademarks. You may not use the QuantumBlack
# Trademarks or any confusingly similar mark as a trademark for your product,
# or use the QuantumBlack Trademarks in any other manner that might cause
# confusion in the marketplace, including but not limited to in advertising,
# on websites, or on software.
#
# See the License for the specific language governing permissions and
# limitations under the License.
"""Kedro Tutorial file for ensuring the package is executable
as `kedro-tutorial` and `python -m kedro_tutorial`
"""
from pathlib import Path
from kedro.framework.project import configure_project
from .cli import run
def main():
configure_project(Path(__file__).parent.name)
run()
if __name__ == "__main__":
main()
| 39.088889 | 77 | 0.770893 |
aa62a5d6d6f4bf9b24708a8aac94813421a5d7ce | 480 | py | Python | ports/stm32/boards/manifest.py | psnergy/micropython | f57c526115a4b16fdda436065e0dfda7f2916fd5 | [
"MIT"
] | null | null | null | ports/stm32/boards/manifest.py | psnergy/micropython | f57c526115a4b16fdda436065e0dfda7f2916fd5 | [
"MIT"
] | null | null | null | ports/stm32/boards/manifest.py | psnergy/micropython | f57c526115a4b16fdda436065e0dfda7f2916fd5 | [
"MIT"
] | null | null | null | include("$(MPY_DIR)/extmod/uasyncio/manifest.py")
freeze("$(MPY_DIR)/drivers/dht", "dht.py")
freeze("$(MPY_DIR)/drivers/display", ("lcd160cr.py", "lcd160cr_test.py"))
freeze("$(MPY_DIR)/drivers/onewire", "onewire.py")
freeze("../../../frozen", "main.py")
freeze("../../../frozen", "config.py")
freeze("../../../frozen", "fram.py")
freeze("../../../frozen", "ssc.py")
freeze("../../../frozen", "crc8.py")
freeze("../../../frozen", "nextion.py")
freeze("../../../frozen", "adc.py")
| 40 | 73 | 0.589583 |
8fff6ba4d8bd2b5a48ddc7693baf4c341c575436 | 179 | py | Python | museum_management/gadget/doctype/gadget_product_revision/test_gadget_product_revision.py | neung2542/museum_management1 | 0cd31f28d390cca758bf2e77b59a584828d24507 | [
"MIT"
] | null | null | null | museum_management/gadget/doctype/gadget_product_revision/test_gadget_product_revision.py | neung2542/museum_management1 | 0cd31f28d390cca758bf2e77b59a584828d24507 | [
"MIT"
] | null | null | null | museum_management/gadget/doctype/gadget_product_revision/test_gadget_product_revision.py | neung2542/museum_management1 | 0cd31f28d390cca758bf2e77b59a584828d24507 | [
"MIT"
] | null | null | null | # Copyright (c) 2021, Frappe Technologies Pvt. Ltd. and Contributors
# See license.txt
# import frappe
import unittest
class TestGadgetProductRevision(unittest.TestCase):
pass
| 19.888889 | 68 | 0.793296 |
c32b5bbd07f079a18c8ecb48a012b70a0953a62e | 6,652 | py | Python | kw_tests/test_data_storage.py | alex-kalanis/upload-per-partes | 639df2df326c9b8231acef4cd5fc001d6395b71b | [
"BSD-3-Clause"
] | null | null | null | kw_tests/test_data_storage.py | alex-kalanis/upload-per-partes | 639df2df326c9b8231acef4cd5fc001d6395b71b | [
"BSD-3-Clause"
] | null | null | null | kw_tests/test_data_storage.py | alex-kalanis/upload-per-partes | 639df2df326c9b8231acef4cd5fc001d6395b71b | [
"BSD-3-Clause"
] | null | null | null | from kw_tests.common_class import CommonTestClass
from kw_tests.support import Files, Dirs, DataRam, InfoRam
from kw_upload.data_storage import VolumeBasic
from kw_upload.data_storage import AStorage as DataStorage
from kw_upload.uploader.essentials import Calculates, Hashed, TargetSearch
from kw_upload.exceptions import UploadException
from kw_upload.uploader.translations import Translations
class ADataStorageTest(CommonTestClass):
def tearDown(self):
if Files.is_file(self._mock_test_file()):
Files.unlink(self._mock_test_file())
if Dirs.is_dir(self._mock_test_file()):
Dirs.rmdir(self._mock_test_file())
super().tearDown()
def _mock_storage(self) -> DataStorage:
return VolumeBasic(Translations())
class VolumeTest(ADataStorageTest):
def test_thru(self):
file = self._mock_test_file()
storage = self._mock_storage()
storage.add_part(file, b'abcdefghijklmnopqrstuvwxyz')
assert b'abcdefghijklmnopqrstuvwxyz' == storage.get_part(file, 0)
storage.truncate(file, 16)
assert b'abcdefghijklmnop' == storage.get_part(file, 0)
storage.remove(file)
assert not Files.is_file(file)
def test_unreadable(self):
file = self._mock_test_file()
storage = self._mock_storage()
Dirs.mkdir(file)
try:
storage.add_part(file, b'abcdefghijklmnopqrstuvwxyz') # fail
assert False, 'Accessing unreadable!'
except UploadException as ex:
assert 'CANNOT OPEN FILE' == ex.get_message()
finally:
Dirs.rmdir(file)
def test_unreadable_seek(self):
file = self._mock_test_file()
storage = self._mock_storage()
Dirs.mkdir(file)
try:
storage.add_part(file, b'abcdefghijklmnopqrstuvwxyz', 10) # fail
assert False, 'Accessing unreadable!'
except UploadException as ex:
assert 'CANNOT OPEN FILE' == ex.get_message()
finally:
Dirs.rmdir(file)
def test_unwriteable(self):
file = self._mock_test_file()
storage = self._mock_storage()
storage.add_part(file, b'abcdefghijklmnopqrstuvwxyz')
Files.chmod(file, 0o444)
try:
storage.add_part(file, b'abcdefghijklmnopqrstuvwxyz') # fail
assert False, 'Writing to locked file!'
except UploadException as ex:
assert 'CANNOT WRITE FILE' == ex.get_message()
finally:
Files.chmod(file, 0o666)
storage.remove(self._mock_test_file())
def test_unwriteable_seek(self):
file = self._mock_test_file()
storage = self._mock_storage()
storage.add_part(file, b'abcdefghijklmnopqrstuvwxyz', 0)
Files.chmod(file, 0o444)
try:
storage.add_part(file, b'abcdefghijklmnopqrstuvwxyz', 26) # fail
assert False, 'Writing to non-available seek in file!'
except UploadException as ex:
assert 'CANNOT WRITE FILE' == ex.get_message()
finally:
Files.chmod(file, 0o666)
storage.remove(self._mock_test_file())
def test_deleted(self):
file = self._mock_test_file()
storage = self._mock_storage()
assert not storage.exists(file)
storage.add_part(file, b'abcdefghijklmnopqrstuvwxyz', 0)
assert storage.exists(file)
try:
storage.remove(file)
storage.remove(file) # fail
assert False, 'Deleting non-existent file!'
except UploadException as ex:
assert 'CANNOT REMOVE DATA' == ex.get_message()
class TargetTest(CommonTestClass):
def test_fail_no_remote(self):
lang = Translations()
lib = TargetSearch(lang, InfoRam(lang), DataRam(lang))
try:
lib.process()
assert False, 'No remote and passed'
except UploadException as ex:
assert 'SENT FILE NAME IS EMPTY' == ex.get_message()
def test_fail_no_target(self):
lang = Translations()
lib = TargetSearch(lang, InfoRam(lang), DataRam(lang))
lib.set_remote_file_name('abcdefg')
try:
lib.process()
assert False, 'No target and passed'
except UploadException as ex:
assert 'TARGET DIR IS NOT SET' == ex.get_message()
def test_fail_no_base(self):
lang = Translations()
lib = TargetSearch(lang, InfoRam(lang), DataRam(lang))
try:
lib.get_final_target_name()
assert False, 'No final target name and passed'
except UploadException as ex:
assert 'UPLOAD FILE NAME IS EMPTY' == ex.get_message()
def test_process_clear(self):
lang = Translations()
lib = TargetSearch(lang, InfoRam(lang), DataRam(lang))
lib.set_target_dir(self._get_test_dir()).set_remote_file_name('what can be found$.here').process()
assert 'what_can_be_found.here' == lib.get_final_target_name()
assert self._get_test_dir() + 'what_can_be_found' + TargetSearch.FILE_DRIVER_SUFF == lib.get_driver_location()
assert self._get_test_dir() + 'what_can_be_found.here' + TargetSearch.FILE_UPLOAD_SUFF == lib.get_temporary_target_location()
def test_process_no_clear(self):
lang = Translations()
lib = TargetSearch(lang, InfoRam(lang), DataRam(lang), False, False)
lib.set_target_dir(self._get_test_dir()).set_remote_file_name('what el$e can be found').process()
assert 'what el$e can be found' == lib.get_final_target_name()
assert self._get_test_dir() + 'what el$e can be found' + TargetSearch.FILE_DRIVER_SUFF == lib.get_driver_location()
assert self._get_test_dir() + 'what el$e can be found' + TargetSearch.FILE_UPLOAD_SUFF == lib.get_temporary_target_location()
def test_process_name_lookup(self):
lang = Translations()
data_ram = DataRam(lang)
data_ram.add_part(self._get_test_dir() + 'dummyFile.tst', 'asdfghjklqwertzuiopyxcvbnm')
data_ram.add_part(self._get_test_dir() + 'dummyFile.0.tst', 'asdfghjklqwertzuiopyxcvbnm')
data_ram.add_part(self._get_test_dir() + 'dummyFile.1.tst', 'asdfghjklqwertzuiopyxcvbnm')
data_ram.add_part(self._get_test_dir() + 'dummyFile.2.tst', 'asdfghjklqwertzuiopyxcvbnm')
lib = TargetSearch(lang, InfoRam(lang), data_ram, False, False)
lib.set_target_dir(self._get_test_dir()).set_remote_file_name('dummyFile.tst').process()
assert self._get_test_dir() + 'dummyFile.3.tst' + TargetSearch.FILE_UPLOAD_SUFF == lib.get_temporary_target_location()
| 42.369427 | 133 | 0.661455 |
135bcea46e0d8b121d087066a3791d4635555127 | 206 | py | Python | redock/__init__.py | xolox/python-redock | 12f8282d7e6199468992c5ee558513cc2a5cc536 | [
"MIT"
] | 8 | 2015-05-20T23:06:54.000Z | 2020-12-03T03:45:23.000Z | redock/__init__.py | xolox/python-redock | 12f8282d7e6199468992c5ee558513cc2a5cc536 | [
"MIT"
] | null | null | null | redock/__init__.py | xolox/python-redock | 12f8282d7e6199468992c5ee558513cc2a5cc536 | [
"MIT"
] | 2 | 2016-06-21T20:44:09.000Z | 2017-08-29T21:11:49.000Z | # Semi-standard module versioning.
__version__ = '0.5.8'
# Silence the logger of an external dependency.
import logging
logging.getLogger('requests.packages.urllib3.connectionpool').setLevel(logging.WARN)
| 29.428571 | 84 | 0.800971 |
be621af6798871247d9e874fa0afe1f7a2b335bd | 81,166 | py | Python | code/tmp_rtrip/test/test_pathlib.py | emilyemorehouse/ast-and-me | 3f58117512e125e1ecbe3c72f2f0d26adb80b7b3 | [
"MIT"
] | 24 | 2018-01-23T05:28:40.000Z | 2021-04-13T20:52:59.000Z | code/tmp_rtrip/test/test_pathlib.py | emilyemorehouse/ast-and-me | 3f58117512e125e1ecbe3c72f2f0d26adb80b7b3 | [
"MIT"
] | 17 | 2017-12-21T18:32:31.000Z | 2018-12-18T17:09:50.000Z | code/tmp_rtrip/test/test_pathlib.py | emilyemorehouse/ast-and-me | 3f58117512e125e1ecbe3c72f2f0d26adb80b7b3 | [
"MIT"
] | null | null | null | import collections
import io
import os
import errno
import pathlib
import pickle
import socket
import stat
import tempfile
import unittest
from unittest import mock
from test import support
android_not_root = support.android_not_root
TESTFN = support.TESTFN
try:
import grp, pwd
except ImportError:
grp = pwd = None
class _BaseFlavourTest(object):
def _check_parse_parts(self, arg, expected):
f = self.flavour.parse_parts
sep = self.flavour.sep
altsep = self.flavour.altsep
actual = f([x.replace('/', sep) for x in arg])
self.assertEqual(actual, expected)
if altsep:
actual = f([x.replace('/', altsep) for x in arg])
self.assertEqual(actual, expected)
def test_parse_parts_common(self):
check = self._check_parse_parts
sep = self.flavour.sep
check([], ('', '', []))
check(['a'], ('', '', ['a']))
check(['a/'], ('', '', ['a']))
check(['a', 'b'], ('', '', ['a', 'b']))
check(['a/b'], ('', '', ['a', 'b']))
check(['a/b/'], ('', '', ['a', 'b']))
check(['a', 'b/c', 'd'], ('', '', ['a', 'b', 'c', 'd']))
check(['a', 'b//c', 'd'], ('', '', ['a', 'b', 'c', 'd']))
check(['a', 'b/c/', 'd'], ('', '', ['a', 'b', 'c', 'd']))
check(['.'], ('', '', []))
check(['.', '.', 'b'], ('', '', ['b']))
check(['a', '.', 'b'], ('', '', ['a', 'b']))
check(['a', '.', '.'], ('', '', ['a']))
check(['/a/b'], ('', sep, [sep, 'a', 'b']))
check(['/a', 'b'], ('', sep, [sep, 'a', 'b']))
check(['/a/', 'b'], ('', sep, [sep, 'a', 'b']))
check(['a', '/b', 'c'], ('', sep, [sep, 'b', 'c']))
check(['a', '/b', '/c'], ('', sep, [sep, 'c']))
class PosixFlavourTest(_BaseFlavourTest, unittest.TestCase):
flavour = pathlib._posix_flavour
def test_parse_parts(self):
check = self._check_parse_parts
check(['//a', 'b'], ('', '//', ['//', 'a', 'b']))
check(['///a', 'b'], ('', '/', ['/', 'a', 'b']))
check(['////a', 'b'], ('', '/', ['/', 'a', 'b']))
check(['c:a'], ('', '', ['c:a']))
check(['c:\\a'], ('', '', ['c:\\a']))
check(['\\a'], ('', '', ['\\a']))
def test_splitroot(self):
f = self.flavour.splitroot
self.assertEqual(f(''), ('', '', ''))
self.assertEqual(f('a'), ('', '', 'a'))
self.assertEqual(f('a/b'), ('', '', 'a/b'))
self.assertEqual(f('a/b/'), ('', '', 'a/b/'))
self.assertEqual(f('/a'), ('', '/', 'a'))
self.assertEqual(f('/a/b'), ('', '/', 'a/b'))
self.assertEqual(f('/a/b/'), ('', '/', 'a/b/'))
self.assertEqual(f('//a'), ('', '//', 'a'))
self.assertEqual(f('///a'), ('', '/', 'a'))
self.assertEqual(f('///a/b'), ('', '/', 'a/b'))
self.assertEqual(f('c:/a/b'), ('', '', 'c:/a/b'))
self.assertEqual(f('\\/a/b'), ('', '', '\\/a/b'))
self.assertEqual(f('\\a\\b'), ('', '', '\\a\\b'))
class NTFlavourTest(_BaseFlavourTest, unittest.TestCase):
flavour = pathlib._windows_flavour
def test_parse_parts(self):
check = self._check_parse_parts
check(['c:'], ('c:', '', ['c:']))
check(['c:/'], ('c:', '\\', ['c:\\']))
check(['/'], ('', '\\', ['\\']))
check(['c:a'], ('c:', '', ['c:', 'a']))
check(['c:/a'], ('c:', '\\', ['c:\\', 'a']))
check(['/a'], ('', '\\', ['\\', 'a']))
check(['//a/b'], ('\\\\a\\b', '\\', ['\\\\a\\b\\']))
check(['//a/b/'], ('\\\\a\\b', '\\', ['\\\\a\\b\\']))
check(['//a/b/c'], ('\\\\a\\b', '\\', ['\\\\a\\b\\', 'c']))
check(['a', 'Z:b', 'c'], ('Z:', '', ['Z:', 'b', 'c']))
check(['a', 'Z:/b', 'c'], ('Z:', '\\', ['Z:\\', 'b', 'c']))
check(['a', '//b/c', 'd'], ('\\\\b\\c', '\\', ['\\\\b\\c\\', 'd']))
check(['a', 'Z://b//c/', 'd/'], ('Z:', '\\', ['Z:\\', 'b', 'c', 'd']))
check(['a', '//b/c//', 'd'], ('\\\\b\\c', '\\', ['\\\\b\\c\\', 'd']))
check(['//?/c:/'], ('\\\\?\\c:', '\\', ['\\\\?\\c:\\']))
check(['//?/c:/a'], ('\\\\?\\c:', '\\', ['\\\\?\\c:\\', 'a']))
check(['//?/c:/a', '/b'], ('\\\\?\\c:', '\\', ['\\\\?\\c:\\', 'b']))
check(['//?/UNC/b/c'], ('\\\\?\\UNC\\b\\c', '\\', [
'\\\\?\\UNC\\b\\c\\']))
check(['//?/UNC/b/c/d'], ('\\\\?\\UNC\\b\\c', '\\', [
'\\\\?\\UNC\\b\\c\\', 'd']))
check(['a', '/b', 'c'], ('', '\\', ['\\', 'b', 'c']))
check(['Z:/a', '/b', 'c'], ('Z:', '\\', ['Z:\\', 'b', 'c']))
check(['//?/Z:/a', '/b', 'c'], ('\\\\?\\Z:', '\\', ['\\\\?\\Z:\\',
'b', 'c']))
def test_splitroot(self):
f = self.flavour.splitroot
self.assertEqual(f(''), ('', '', ''))
self.assertEqual(f('a'), ('', '', 'a'))
self.assertEqual(f('a\\b'), ('', '', 'a\\b'))
self.assertEqual(f('\\a'), ('', '\\', 'a'))
self.assertEqual(f('\\a\\b'), ('', '\\', 'a\\b'))
self.assertEqual(f('c:a\\b'), ('c:', '', 'a\\b'))
self.assertEqual(f('c:\\a\\b'), ('c:', '\\', 'a\\b'))
self.assertEqual(f('\\\\a'), ('', '\\', 'a'))
self.assertEqual(f('\\\\\\a/b'), ('', '\\', 'a/b'))
self.assertEqual(f('c:\\\\a'), ('c:', '\\', 'a'))
self.assertEqual(f('c:\\\\\\a/b'), ('c:', '\\', 'a/b'))
self.assertEqual(f('\\\\a\\b'), ('\\\\a\\b', '\\', ''))
self.assertEqual(f('\\\\a\\b\\'), ('\\\\a\\b', '\\', ''))
self.assertEqual(f('\\\\a\\b\\c\\d'), ('\\\\a\\b', '\\', 'c\\d'))
self.assertEqual(f('\\\\\\a\\b'), ('', '\\', 'a\\b'))
self.assertEqual(f('\\\\a'), ('', '\\', 'a'))
class _BasePurePathTest(object):
equivalences = {'a/b': [('a', 'b'), ('a/', 'b'), ('a', 'b/'), ('a/',
'b/'), ('a/b/',), ('a//b',), ('a//b//',), ('', 'a', 'b'), ('a', '',
'b'), ('a', 'b', '')], '/b/c/d': [('a', '/b/c', 'd'), ('a',
'///b//c', 'd/'), ('/a', '/b/c', 'd'), ('/', 'b', '', 'c/d'), ('/',
'', 'b/c/d'), ('', '/b/c/d')]}
def setUp(self):
p = self.cls('a')
self.flavour = p._flavour
self.sep = self.flavour.sep
self.altsep = self.flavour.altsep
def test_constructor_common(self):
P = self.cls
p = P('a')
self.assertIsInstance(p, P)
class PathLike:
def __fspath__(self):
return 'a/b/c'
P('a', 'b', 'c')
P('/a', 'b', 'c')
P('a/b/c')
P('/a/b/c')
P(PathLike())
self.assertEqual(P(P('a')), P('a'))
self.assertEqual(P(P('a'), 'b'), P('a/b'))
self.assertEqual(P(P('a'), P('b')), P('a/b'))
self.assertEqual(P(P('a'), P('b'), P('c')), P(PathLike()))
def _check_str_subclass(self, *args):
class StrSubclass(str):
pass
P = self.cls
p = P(*(StrSubclass(x) for x in args))
self.assertEqual(p, P(*args))
for part in p.parts:
self.assertIs(type(part), str)
def test_str_subclass_common(self):
self._check_str_subclass('')
self._check_str_subclass('.')
self._check_str_subclass('a')
self._check_str_subclass('a/b.txt')
self._check_str_subclass('/a/b.txt')
def test_join_common(self):
P = self.cls
p = P('a/b')
pp = p.joinpath('c')
self.assertEqual(pp, P('a/b/c'))
self.assertIs(type(pp), type(p))
pp = p.joinpath('c', 'd')
self.assertEqual(pp, P('a/b/c/d'))
pp = p.joinpath(P('c'))
self.assertEqual(pp, P('a/b/c'))
pp = p.joinpath('/c')
self.assertEqual(pp, P('/c'))
def test_div_common(self):
P = self.cls
p = P('a/b')
pp = p / 'c'
self.assertEqual(pp, P('a/b/c'))
self.assertIs(type(pp), type(p))
pp = p / 'c/d'
self.assertEqual(pp, P('a/b/c/d'))
pp = p / 'c' / 'd'
self.assertEqual(pp, P('a/b/c/d'))
pp = 'c' / p / 'd'
self.assertEqual(pp, P('c/a/b/d'))
pp = p / P('c')
self.assertEqual(pp, P('a/b/c'))
pp = p / '/c'
self.assertEqual(pp, P('/c'))
def _check_str(self, expected, args):
p = self.cls(*args)
self.assertEqual(str(p), expected.replace('/', self.sep))
def test_str_common(self):
for pathstr in ('a', 'a/b', 'a/b/c', '/', '/a/b', '/a/b/c'):
self._check_str(pathstr, (pathstr,))
self._check_str('.', ('',))
def test_as_posix_common(self):
P = self.cls
for pathstr in ('a', 'a/b', 'a/b/c', '/', '/a/b', '/a/b/c'):
self.assertEqual(P(pathstr).as_posix(), pathstr)
def test_as_bytes_common(self):
sep = os.fsencode(self.sep)
P = self.cls
self.assertEqual(bytes(P('a/b')), b'a' + sep + b'b')
def test_as_uri_common(self):
P = self.cls
with self.assertRaises(ValueError):
P('a').as_uri()
with self.assertRaises(ValueError):
P().as_uri()
def test_repr_common(self):
for pathstr in ('a', 'a/b', 'a/b/c', '/', '/a/b', '/a/b/c'):
p = self.cls(pathstr)
clsname = p.__class__.__name__
r = repr(p)
self.assertTrue(r.startswith(clsname + '('), r)
self.assertTrue(r.endswith(')'), r)
inner = r[len(clsname) + 1:-1]
self.assertEqual(eval(inner), p.as_posix())
q = eval(r, pathlib.__dict__)
self.assertIs(q.__class__, p.__class__)
self.assertEqual(q, p)
self.assertEqual(repr(q), r)
def test_eq_common(self):
P = self.cls
self.assertEqual(P('a/b'), P('a/b'))
self.assertEqual(P('a/b'), P('a', 'b'))
self.assertNotEqual(P('a/b'), P('a'))
self.assertNotEqual(P('a/b'), P('/a/b'))
self.assertNotEqual(P('a/b'), P())
self.assertNotEqual(P('/a/b'), P('/'))
self.assertNotEqual(P(), P('/'))
self.assertNotEqual(P(), '')
self.assertNotEqual(P(), {})
self.assertNotEqual(P(), int)
def test_match_common(self):
P = self.cls
self.assertRaises(ValueError, P('a').match, '')
self.assertRaises(ValueError, P('a').match, '.')
self.assertTrue(P('b.py').match('b.py'))
self.assertTrue(P('a/b.py').match('b.py'))
self.assertTrue(P('/a/b.py').match('b.py'))
self.assertFalse(P('a.py').match('b.py'))
self.assertFalse(P('b/py').match('b.py'))
self.assertFalse(P('/a.py').match('b.py'))
self.assertFalse(P('b.py/c').match('b.py'))
self.assertTrue(P('b.py').match('*.py'))
self.assertTrue(P('a/b.py').match('*.py'))
self.assertTrue(P('/a/b.py').match('*.py'))
self.assertFalse(P('b.pyc').match('*.py'))
self.assertFalse(P('b./py').match('*.py'))
self.assertFalse(P('b.py/c').match('*.py'))
self.assertTrue(P('ab/c.py').match('a*/*.py'))
self.assertTrue(P('/d/ab/c.py').match('a*/*.py'))
self.assertFalse(P('a.py').match('a*/*.py'))
self.assertFalse(P('/dab/c.py').match('a*/*.py'))
self.assertFalse(P('ab/c.py/d').match('a*/*.py'))
self.assertTrue(P('/b.py').match('/*.py'))
self.assertFalse(P('b.py').match('/*.py'))
self.assertFalse(P('a/b.py').match('/*.py'))
self.assertFalse(P('/a/b.py').match('/*.py'))
self.assertTrue(P('/a/b.py').match('/a/*.py'))
self.assertFalse(P('/ab.py').match('/a/*.py'))
self.assertFalse(P('/a/b/c.py').match('/a/*.py'))
def test_ordering_common(self):
def assertLess(a, b):
self.assertLess(a, b)
self.assertGreater(b, a)
P = self.cls
a = P('a')
b = P('a/b')
c = P('abc')
d = P('b')
assertLess(a, b)
assertLess(a, c)
assertLess(a, d)
assertLess(b, c)
assertLess(c, d)
P = self.cls
a = P('/a')
b = P('/a/b')
c = P('/abc')
d = P('/b')
assertLess(a, b)
assertLess(a, c)
assertLess(a, d)
assertLess(b, c)
assertLess(c, d)
with self.assertRaises(TypeError):
P() < {}
def test_parts_common(self):
sep = self.sep
P = self.cls
p = P('a/b')
parts = p.parts
self.assertEqual(parts, ('a', 'b'))
self.assertIs(parts, p.parts)
p = P('/a/b')
parts = p.parts
self.assertEqual(parts, (sep, 'a', 'b'))
def test_fspath_common(self):
P = self.cls
p = P('a/b')
self._check_str(p.__fspath__(), ('a/b',))
self._check_str(os.fspath(p), ('a/b',))
def test_equivalences(self):
for k, tuples in self.equivalences.items():
canon = k.replace('/', self.sep)
posix = k.replace(self.sep, '/')
if canon != posix:
tuples = tuples + [tuple(part.replace('/', self.sep) for
part in t) for t in tuples]
tuples.append((posix,))
pcanon = self.cls(canon)
for t in tuples:
p = self.cls(*t)
self.assertEqual(p, pcanon, 'failed with args {}'.format(t))
self.assertEqual(hash(p), hash(pcanon))
self.assertEqual(str(p), canon)
self.assertEqual(p.as_posix(), posix)
def test_parent_common(self):
P = self.cls
p = P('a/b/c')
self.assertEqual(p.parent, P('a/b'))
self.assertEqual(p.parent.parent, P('a'))
self.assertEqual(p.parent.parent.parent, P())
self.assertEqual(p.parent.parent.parent.parent, P())
p = P('/a/b/c')
self.assertEqual(p.parent, P('/a/b'))
self.assertEqual(p.parent.parent, P('/a'))
self.assertEqual(p.parent.parent.parent, P('/'))
self.assertEqual(p.parent.parent.parent.parent, P('/'))
def test_parents_common(self):
P = self.cls
p = P('a/b/c')
par = p.parents
self.assertEqual(len(par), 3)
self.assertEqual(par[0], P('a/b'))
self.assertEqual(par[1], P('a'))
self.assertEqual(par[2], P('.'))
self.assertEqual(list(par), [P('a/b'), P('a'), P('.')])
with self.assertRaises(IndexError):
par[-1]
with self.assertRaises(IndexError):
par[3]
with self.assertRaises(TypeError):
par[0] = p
p = P('/a/b/c')
par = p.parents
self.assertEqual(len(par), 3)
self.assertEqual(par[0], P('/a/b'))
self.assertEqual(par[1], P('/a'))
self.assertEqual(par[2], P('/'))
self.assertEqual(list(par), [P('/a/b'), P('/a'), P('/')])
with self.assertRaises(IndexError):
par[3]
def test_drive_common(self):
P = self.cls
self.assertEqual(P('a/b').drive, '')
self.assertEqual(P('/a/b').drive, '')
self.assertEqual(P('').drive, '')
def test_root_common(self):
P = self.cls
sep = self.sep
self.assertEqual(P('').root, '')
self.assertEqual(P('a/b').root, '')
self.assertEqual(P('/').root, sep)
self.assertEqual(P('/a/b').root, sep)
def test_anchor_common(self):
P = self.cls
sep = self.sep
self.assertEqual(P('').anchor, '')
self.assertEqual(P('a/b').anchor, '')
self.assertEqual(P('/').anchor, sep)
self.assertEqual(P('/a/b').anchor, sep)
def test_name_common(self):
P = self.cls
self.assertEqual(P('').name, '')
self.assertEqual(P('.').name, '')
self.assertEqual(P('/').name, '')
self.assertEqual(P('a/b').name, 'b')
self.assertEqual(P('/a/b').name, 'b')
self.assertEqual(P('/a/b/.').name, 'b')
self.assertEqual(P('a/b.py').name, 'b.py')
self.assertEqual(P('/a/b.py').name, 'b.py')
def test_suffix_common(self):
P = self.cls
self.assertEqual(P('').suffix, '')
self.assertEqual(P('.').suffix, '')
self.assertEqual(P('..').suffix, '')
self.assertEqual(P('/').suffix, '')
self.assertEqual(P('a/b').suffix, '')
self.assertEqual(P('/a/b').suffix, '')
self.assertEqual(P('/a/b/.').suffix, '')
self.assertEqual(P('a/b.py').suffix, '.py')
self.assertEqual(P('/a/b.py').suffix, '.py')
self.assertEqual(P('a/.hgrc').suffix, '')
self.assertEqual(P('/a/.hgrc').suffix, '')
self.assertEqual(P('a/.hg.rc').suffix, '.rc')
self.assertEqual(P('/a/.hg.rc').suffix, '.rc')
self.assertEqual(P('a/b.tar.gz').suffix, '.gz')
self.assertEqual(P('/a/b.tar.gz').suffix, '.gz')
self.assertEqual(P('a/Some name. Ending with a dot.').suffix, '')
self.assertEqual(P('/a/Some name. Ending with a dot.').suffix, '')
def test_suffixes_common(self):
P = self.cls
self.assertEqual(P('').suffixes, [])
self.assertEqual(P('.').suffixes, [])
self.assertEqual(P('/').suffixes, [])
self.assertEqual(P('a/b').suffixes, [])
self.assertEqual(P('/a/b').suffixes, [])
self.assertEqual(P('/a/b/.').suffixes, [])
self.assertEqual(P('a/b.py').suffixes, ['.py'])
self.assertEqual(P('/a/b.py').suffixes, ['.py'])
self.assertEqual(P('a/.hgrc').suffixes, [])
self.assertEqual(P('/a/.hgrc').suffixes, [])
self.assertEqual(P('a/.hg.rc').suffixes, ['.rc'])
self.assertEqual(P('/a/.hg.rc').suffixes, ['.rc'])
self.assertEqual(P('a/b.tar.gz').suffixes, ['.tar', '.gz'])
self.assertEqual(P('/a/b.tar.gz').suffixes, ['.tar', '.gz'])
self.assertEqual(P('a/Some name. Ending with a dot.').suffixes, [])
self.assertEqual(P('/a/Some name. Ending with a dot.').suffixes, [])
def test_stem_common(self):
P = self.cls
self.assertEqual(P('').stem, '')
self.assertEqual(P('.').stem, '')
self.assertEqual(P('..').stem, '..')
self.assertEqual(P('/').stem, '')
self.assertEqual(P('a/b').stem, 'b')
self.assertEqual(P('a/b.py').stem, 'b')
self.assertEqual(P('a/.hgrc').stem, '.hgrc')
self.assertEqual(P('a/.hg.rc').stem, '.hg')
self.assertEqual(P('a/b.tar.gz').stem, 'b.tar')
self.assertEqual(P('a/Some name. Ending with a dot.').stem,
'Some name. Ending with a dot.')
def test_with_name_common(self):
P = self.cls
self.assertEqual(P('a/b').with_name('d.xml'), P('a/d.xml'))
self.assertEqual(P('/a/b').with_name('d.xml'), P('/a/d.xml'))
self.assertEqual(P('a/b.py').with_name('d.xml'), P('a/d.xml'))
self.assertEqual(P('/a/b.py').with_name('d.xml'), P('/a/d.xml'))
self.assertEqual(P('a/Dot ending.').with_name('d.xml'), P('a/d.xml'))
self.assertEqual(P('/a/Dot ending.').with_name('d.xml'), P('/a/d.xml'))
self.assertRaises(ValueError, P('').with_name, 'd.xml')
self.assertRaises(ValueError, P('.').with_name, 'd.xml')
self.assertRaises(ValueError, P('/').with_name, 'd.xml')
self.assertRaises(ValueError, P('a/b').with_name, '')
self.assertRaises(ValueError, P('a/b').with_name, '/c')
self.assertRaises(ValueError, P('a/b').with_name, 'c/')
self.assertRaises(ValueError, P('a/b').with_name, 'c/d')
def test_with_suffix_common(self):
P = self.cls
self.assertEqual(P('a/b').with_suffix('.gz'), P('a/b.gz'))
self.assertEqual(P('/a/b').with_suffix('.gz'), P('/a/b.gz'))
self.assertEqual(P('a/b.py').with_suffix('.gz'), P('a/b.gz'))
self.assertEqual(P('/a/b.py').with_suffix('.gz'), P('/a/b.gz'))
self.assertEqual(P('a/b.py').with_suffix(''), P('a/b'))
self.assertEqual(P('/a/b').with_suffix(''), P('/a/b'))
self.assertRaises(ValueError, P('').with_suffix, '.gz')
self.assertRaises(ValueError, P('.').with_suffix, '.gz')
self.assertRaises(ValueError, P('/').with_suffix, '.gz')
self.assertRaises(ValueError, P('a/b').with_suffix, 'gz')
self.assertRaises(ValueError, P('a/b').with_suffix, '/')
self.assertRaises(ValueError, P('a/b').with_suffix, '.')
self.assertRaises(ValueError, P('a/b').with_suffix, '/.gz')
self.assertRaises(ValueError, P('a/b').with_suffix, 'c/d')
self.assertRaises(ValueError, P('a/b').with_suffix, '.c/.d')
self.assertRaises(ValueError, P('a/b').with_suffix, './.d')
self.assertRaises(ValueError, P('a/b').with_suffix, '.d/.')
def test_relative_to_common(self):
P = self.cls
p = P('a/b')
self.assertRaises(TypeError, p.relative_to)
self.assertRaises(TypeError, p.relative_to, b'a')
self.assertEqual(p.relative_to(P()), P('a/b'))
self.assertEqual(p.relative_to(''), P('a/b'))
self.assertEqual(p.relative_to(P('a')), P('b'))
self.assertEqual(p.relative_to('a'), P('b'))
self.assertEqual(p.relative_to('a/'), P('b'))
self.assertEqual(p.relative_to(P('a/b')), P())
self.assertEqual(p.relative_to('a/b'), P())
self.assertEqual(p.relative_to('a', 'b'), P())
self.assertRaises(ValueError, p.relative_to, P('c'))
self.assertRaises(ValueError, p.relative_to, P('a/b/c'))
self.assertRaises(ValueError, p.relative_to, P('a/c'))
self.assertRaises(ValueError, p.relative_to, P('/a'))
p = P('/a/b')
self.assertEqual(p.relative_to(P('/')), P('a/b'))
self.assertEqual(p.relative_to('/'), P('a/b'))
self.assertEqual(p.relative_to(P('/a')), P('b'))
self.assertEqual(p.relative_to('/a'), P('b'))
self.assertEqual(p.relative_to('/a/'), P('b'))
self.assertEqual(p.relative_to(P('/a/b')), P())
self.assertEqual(p.relative_to('/a/b'), P())
self.assertRaises(ValueError, p.relative_to, P('/c'))
self.assertRaises(ValueError, p.relative_to, P('/a/b/c'))
self.assertRaises(ValueError, p.relative_to, P('/a/c'))
self.assertRaises(ValueError, p.relative_to, P())
self.assertRaises(ValueError, p.relative_to, '')
self.assertRaises(ValueError, p.relative_to, P('a'))
def test_pickling_common(self):
P = self.cls
p = P('/a/b')
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
dumped = pickle.dumps(p, proto)
pp = pickle.loads(dumped)
self.assertIs(pp.__class__, p.__class__)
self.assertEqual(pp, p)
self.assertEqual(hash(pp), hash(p))
self.assertEqual(str(pp), str(p))
class PurePosixPathTest(_BasePurePathTest, unittest.TestCase):
cls = pathlib.PurePosixPath
def test_root(self):
P = self.cls
self.assertEqual(P('/a/b').root, '/')
self.assertEqual(P('///a/b').root, '/')
self.assertEqual(P('//a/b').root, '//')
def test_eq(self):
P = self.cls
self.assertNotEqual(P('a/b'), P('A/b'))
self.assertEqual(P('/a'), P('///a'))
self.assertNotEqual(P('/a'), P('//a'))
def test_as_uri(self):
P = self.cls
self.assertEqual(P('/').as_uri(), 'file:///')
self.assertEqual(P('/a/b.c').as_uri(), 'file:///a/b.c')
self.assertEqual(P('/a/b%#c').as_uri(), 'file:///a/b%25%23c')
def test_as_uri_non_ascii(self):
from urllib.parse import quote_from_bytes
P = self.cls
try:
os.fsencode('é')
except UnicodeEncodeError:
self.skipTest('\\xe9 cannot be encoded to the filesystem encoding')
self.assertEqual(P('/a/bé').as_uri(), 'file:///a/b' +
quote_from_bytes(os.fsencode('é')))
def test_match(self):
P = self.cls
self.assertFalse(P('A.py').match('a.PY'))
def test_is_absolute(self):
P = self.cls
self.assertFalse(P().is_absolute())
self.assertFalse(P('a').is_absolute())
self.assertFalse(P('a/b/').is_absolute())
self.assertTrue(P('/').is_absolute())
self.assertTrue(P('/a').is_absolute())
self.assertTrue(P('/a/b/').is_absolute())
self.assertTrue(P('//a').is_absolute())
self.assertTrue(P('//a/b').is_absolute())
def test_is_reserved(self):
P = self.cls
self.assertIs(False, P('').is_reserved())
self.assertIs(False, P('/').is_reserved())
self.assertIs(False, P('/foo/bar').is_reserved())
self.assertIs(False, P('/dev/con/PRN/NUL').is_reserved())
def test_join(self):
P = self.cls
p = P('//a')
pp = p.joinpath('b')
self.assertEqual(pp, P('//a/b'))
pp = P('/a').joinpath('//c')
self.assertEqual(pp, P('//c'))
pp = P('//a').joinpath('/c')
self.assertEqual(pp, P('/c'))
def test_div(self):
P = self.cls
p = P('//a')
pp = p / 'b'
self.assertEqual(pp, P('//a/b'))
pp = P('/a') / '//c'
self.assertEqual(pp, P('//c'))
pp = P('//a') / '/c'
self.assertEqual(pp, P('/c'))
class PureWindowsPathTest(_BasePurePathTest, unittest.TestCase):
cls = pathlib.PureWindowsPath
equivalences = _BasePurePathTest.equivalences.copy()
equivalences.update({'c:a': [('c:', 'a'), ('c:', 'a/'), ('/', 'c:', 'a'
)], 'c:/a': [('c:/', 'a'), ('c:', '/', 'a'), ('c:', '/a'), ('/z',
'c:/', 'a'), ('//x/y', 'c:/', 'a')], '//a/b/': [('//a/b',)],
'//a/b/c': [('//a/b', 'c'), ('//a/b/', 'c')]})
def test_str(self):
p = self.cls('a/b/c')
self.assertEqual(str(p), 'a\\b\\c')
p = self.cls('c:/a/b/c')
self.assertEqual(str(p), 'c:\\a\\b\\c')
p = self.cls('//a/b')
self.assertEqual(str(p), '\\\\a\\b\\')
p = self.cls('//a/b/c')
self.assertEqual(str(p), '\\\\a\\b\\c')
p = self.cls('//a/b/c/d')
self.assertEqual(str(p), '\\\\a\\b\\c\\d')
def test_str_subclass(self):
self._check_str_subclass('c:')
self._check_str_subclass('c:a')
self._check_str_subclass('c:a\\b.txt')
self._check_str_subclass('c:\\')
self._check_str_subclass('c:\\a')
self._check_str_subclass('c:\\a\\b.txt')
self._check_str_subclass('\\\\some\\share')
self._check_str_subclass('\\\\some\\share\\a')
self._check_str_subclass('\\\\some\\share\\a\\b.txt')
def test_eq(self):
P = self.cls
self.assertEqual(P('c:a/b'), P('c:a/b'))
self.assertEqual(P('c:a/b'), P('c:', 'a', 'b'))
self.assertNotEqual(P('c:a/b'), P('d:a/b'))
self.assertNotEqual(P('c:a/b'), P('c:/a/b'))
self.assertNotEqual(P('/a/b'), P('c:/a/b'))
self.assertEqual(P('a/B'), P('A/b'))
self.assertEqual(P('C:a/B'), P('c:A/b'))
self.assertEqual(P('//Some/SHARE/a/B'), P('//somE/share/A/b'))
def test_as_uri(self):
P = self.cls
with self.assertRaises(ValueError):
P('/a/b').as_uri()
with self.assertRaises(ValueError):
P('c:a/b').as_uri()
self.assertEqual(P('c:/').as_uri(), 'file:///c:/')
self.assertEqual(P('c:/a/b.c').as_uri(), 'file:///c:/a/b.c')
self.assertEqual(P('c:/a/b%#c').as_uri(), 'file:///c:/a/b%25%23c')
self.assertEqual(P('c:/a/bé').as_uri(), 'file:///c:/a/b%C3%A9')
self.assertEqual(P('//some/share/').as_uri(), 'file://some/share/')
self.assertEqual(P('//some/share/a/b.c').as_uri(),
'file://some/share/a/b.c')
self.assertEqual(P('//some/share/a/b%#cé').as_uri(),
'file://some/share/a/b%25%23c%C3%A9')
def test_match_common(self):
P = self.cls
self.assertTrue(P('c:/b.py').match('/*.py'))
self.assertTrue(P('c:/b.py').match('c:*.py'))
self.assertTrue(P('c:/b.py').match('c:/*.py'))
self.assertFalse(P('d:/b.py').match('c:/*.py'))
self.assertFalse(P('b.py').match('/*.py'))
self.assertFalse(P('b.py').match('c:*.py'))
self.assertFalse(P('b.py').match('c:/*.py'))
self.assertFalse(P('c:b.py').match('/*.py'))
self.assertFalse(P('c:b.py').match('c:/*.py'))
self.assertFalse(P('/b.py').match('c:*.py'))
self.assertFalse(P('/b.py').match('c:/*.py'))
self.assertTrue(P('//some/share/a.py').match('/*.py'))
self.assertTrue(P('//some/share/a.py').match('//some/share/*.py'))
self.assertFalse(P('//other/share/a.py').match('//some/share/*.py'))
self.assertFalse(P('//some/share/a/b.py').match('//some/share/*.py'))
self.assertTrue(P('B.py').match('b.PY'))
self.assertTrue(P('c:/a/B.Py').match('C:/A/*.pY'))
self.assertTrue(P('//Some/Share/B.Py').match('//somE/sharE/*.pY'))
def test_ordering_common(self):
def assertOrderedEqual(a, b):
self.assertLessEqual(a, b)
self.assertGreaterEqual(b, a)
P = self.cls
p = P('c:A/b')
q = P('C:a/B')
assertOrderedEqual(p, q)
self.assertFalse(p < q)
self.assertFalse(p > q)
p = P('//some/Share/A/b')
q = P('//Some/SHARE/a/B')
assertOrderedEqual(p, q)
self.assertFalse(p < q)
self.assertFalse(p > q)
def test_parts(self):
P = self.cls
p = P('c:a/b')
parts = p.parts
self.assertEqual(parts, ('c:', 'a', 'b'))
p = P('c:/a/b')
parts = p.parts
self.assertEqual(parts, ('c:\\', 'a', 'b'))
p = P('//a/b/c/d')
parts = p.parts
self.assertEqual(parts, ('\\\\a\\b\\', 'c', 'd'))
def test_parent(self):
P = self.cls
p = P('z:a/b/c')
self.assertEqual(p.parent, P('z:a/b'))
self.assertEqual(p.parent.parent, P('z:a'))
self.assertEqual(p.parent.parent.parent, P('z:'))
self.assertEqual(p.parent.parent.parent.parent, P('z:'))
p = P('z:/a/b/c')
self.assertEqual(p.parent, P('z:/a/b'))
self.assertEqual(p.parent.parent, P('z:/a'))
self.assertEqual(p.parent.parent.parent, P('z:/'))
self.assertEqual(p.parent.parent.parent.parent, P('z:/'))
p = P('//a/b/c/d')
self.assertEqual(p.parent, P('//a/b/c'))
self.assertEqual(p.parent.parent, P('//a/b'))
self.assertEqual(p.parent.parent.parent, P('//a/b'))
def test_parents(self):
P = self.cls
p = P('z:a/b/')
par = p.parents
self.assertEqual(len(par), 2)
self.assertEqual(par[0], P('z:a'))
self.assertEqual(par[1], P('z:'))
self.assertEqual(list(par), [P('z:a'), P('z:')])
with self.assertRaises(IndexError):
par[2]
p = P('z:/a/b/')
par = p.parents
self.assertEqual(len(par), 2)
self.assertEqual(par[0], P('z:/a'))
self.assertEqual(par[1], P('z:/'))
self.assertEqual(list(par), [P('z:/a'), P('z:/')])
with self.assertRaises(IndexError):
par[2]
p = P('//a/b/c/d')
par = p.parents
self.assertEqual(len(par), 2)
self.assertEqual(par[0], P('//a/b/c'))
self.assertEqual(par[1], P('//a/b'))
self.assertEqual(list(par), [P('//a/b/c'), P('//a/b')])
with self.assertRaises(IndexError):
par[2]
def test_drive(self):
P = self.cls
self.assertEqual(P('c:').drive, 'c:')
self.assertEqual(P('c:a/b').drive, 'c:')
self.assertEqual(P('c:/').drive, 'c:')
self.assertEqual(P('c:/a/b/').drive, 'c:')
self.assertEqual(P('//a/b').drive, '\\\\a\\b')
self.assertEqual(P('//a/b/').drive, '\\\\a\\b')
self.assertEqual(P('//a/b/c/d').drive, '\\\\a\\b')
def test_root(self):
P = self.cls
self.assertEqual(P('c:').root, '')
self.assertEqual(P('c:a/b').root, '')
self.assertEqual(P('c:/').root, '\\')
self.assertEqual(P('c:/a/b/').root, '\\')
self.assertEqual(P('//a/b').root, '\\')
self.assertEqual(P('//a/b/').root, '\\')
self.assertEqual(P('//a/b/c/d').root, '\\')
def test_anchor(self):
P = self.cls
self.assertEqual(P('c:').anchor, 'c:')
self.assertEqual(P('c:a/b').anchor, 'c:')
self.assertEqual(P('c:/').anchor, 'c:\\')
self.assertEqual(P('c:/a/b/').anchor, 'c:\\')
self.assertEqual(P('//a/b').anchor, '\\\\a\\b\\')
self.assertEqual(P('//a/b/').anchor, '\\\\a\\b\\')
self.assertEqual(P('//a/b/c/d').anchor, '\\\\a\\b\\')
def test_name(self):
P = self.cls
self.assertEqual(P('c:').name, '')
self.assertEqual(P('c:/').name, '')
self.assertEqual(P('c:a/b').name, 'b')
self.assertEqual(P('c:/a/b').name, 'b')
self.assertEqual(P('c:a/b.py').name, 'b.py')
self.assertEqual(P('c:/a/b.py').name, 'b.py')
self.assertEqual(P('//My.py/Share.php').name, '')
self.assertEqual(P('//My.py/Share.php/a/b').name, 'b')
def test_suffix(self):
P = self.cls
self.assertEqual(P('c:').suffix, '')
self.assertEqual(P('c:/').suffix, '')
self.assertEqual(P('c:a/b').suffix, '')
self.assertEqual(P('c:/a/b').suffix, '')
self.assertEqual(P('c:a/b.py').suffix, '.py')
self.assertEqual(P('c:/a/b.py').suffix, '.py')
self.assertEqual(P('c:a/.hgrc').suffix, '')
self.assertEqual(P('c:/a/.hgrc').suffix, '')
self.assertEqual(P('c:a/.hg.rc').suffix, '.rc')
self.assertEqual(P('c:/a/.hg.rc').suffix, '.rc')
self.assertEqual(P('c:a/b.tar.gz').suffix, '.gz')
self.assertEqual(P('c:/a/b.tar.gz').suffix, '.gz')
self.assertEqual(P('c:a/Some name. Ending with a dot.').suffix, '')
self.assertEqual(P('c:/a/Some name. Ending with a dot.').suffix, '')
self.assertEqual(P('//My.py/Share.php').suffix, '')
self.assertEqual(P('//My.py/Share.php/a/b').suffix, '')
def test_suffixes(self):
P = self.cls
self.assertEqual(P('c:').suffixes, [])
self.assertEqual(P('c:/').suffixes, [])
self.assertEqual(P('c:a/b').suffixes, [])
self.assertEqual(P('c:/a/b').suffixes, [])
self.assertEqual(P('c:a/b.py').suffixes, ['.py'])
self.assertEqual(P('c:/a/b.py').suffixes, ['.py'])
self.assertEqual(P('c:a/.hgrc').suffixes, [])
self.assertEqual(P('c:/a/.hgrc').suffixes, [])
self.assertEqual(P('c:a/.hg.rc').suffixes, ['.rc'])
self.assertEqual(P('c:/a/.hg.rc').suffixes, ['.rc'])
self.assertEqual(P('c:a/b.tar.gz').suffixes, ['.tar', '.gz'])
self.assertEqual(P('c:/a/b.tar.gz').suffixes, ['.tar', '.gz'])
self.assertEqual(P('//My.py/Share.php').suffixes, [])
self.assertEqual(P('//My.py/Share.php/a/b').suffixes, [])
self.assertEqual(P('c:a/Some name. Ending with a dot.').suffixes, [])
self.assertEqual(P('c:/a/Some name. Ending with a dot.').suffixes, [])
def test_stem(self):
P = self.cls
self.assertEqual(P('c:').stem, '')
self.assertEqual(P('c:.').stem, '')
self.assertEqual(P('c:..').stem, '..')
self.assertEqual(P('c:/').stem, '')
self.assertEqual(P('c:a/b').stem, 'b')
self.assertEqual(P('c:a/b.py').stem, 'b')
self.assertEqual(P('c:a/.hgrc').stem, '.hgrc')
self.assertEqual(P('c:a/.hg.rc').stem, '.hg')
self.assertEqual(P('c:a/b.tar.gz').stem, 'b.tar')
self.assertEqual(P('c:a/Some name. Ending with a dot.').stem,
'Some name. Ending with a dot.')
def test_with_name(self):
P = self.cls
self.assertEqual(P('c:a/b').with_name('d.xml'), P('c:a/d.xml'))
self.assertEqual(P('c:/a/b').with_name('d.xml'), P('c:/a/d.xml'))
self.assertEqual(P('c:a/Dot ending.').with_name('d.xml'), P(
'c:a/d.xml'))
self.assertEqual(P('c:/a/Dot ending.').with_name('d.xml'), P(
'c:/a/d.xml'))
self.assertRaises(ValueError, P('c:').with_name, 'd.xml')
self.assertRaises(ValueError, P('c:/').with_name, 'd.xml')
self.assertRaises(ValueError, P('//My/Share').with_name, 'd.xml')
self.assertRaises(ValueError, P('c:a/b').with_name, 'd:')
self.assertRaises(ValueError, P('c:a/b').with_name, 'd:e')
self.assertRaises(ValueError, P('c:a/b').with_name, 'd:/e')
self.assertRaises(ValueError, P('c:a/b').with_name, '//My/Share')
def test_with_suffix(self):
P = self.cls
self.assertEqual(P('c:a/b').with_suffix('.gz'), P('c:a/b.gz'))
self.assertEqual(P('c:/a/b').with_suffix('.gz'), P('c:/a/b.gz'))
self.assertEqual(P('c:a/b.py').with_suffix('.gz'), P('c:a/b.gz'))
self.assertEqual(P('c:/a/b.py').with_suffix('.gz'), P('c:/a/b.gz'))
self.assertRaises(ValueError, P('').with_suffix, '.gz')
self.assertRaises(ValueError, P('.').with_suffix, '.gz')
self.assertRaises(ValueError, P('/').with_suffix, '.gz')
self.assertRaises(ValueError, P('//My/Share').with_suffix, '.gz')
self.assertRaises(ValueError, P('c:a/b').with_suffix, 'gz')
self.assertRaises(ValueError, P('c:a/b').with_suffix, '/')
self.assertRaises(ValueError, P('c:a/b').with_suffix, '\\')
self.assertRaises(ValueError, P('c:a/b').with_suffix, 'c:')
self.assertRaises(ValueError, P('c:a/b').with_suffix, '/.gz')
self.assertRaises(ValueError, P('c:a/b').with_suffix, '\\.gz')
self.assertRaises(ValueError, P('c:a/b').with_suffix, 'c:.gz')
self.assertRaises(ValueError, P('c:a/b').with_suffix, 'c/d')
self.assertRaises(ValueError, P('c:a/b').with_suffix, 'c\\d')
self.assertRaises(ValueError, P('c:a/b').with_suffix, '.c/d')
self.assertRaises(ValueError, P('c:a/b').with_suffix, '.c\\d')
def test_relative_to(self):
P = self.cls
p = P('C:Foo/Bar')
self.assertEqual(p.relative_to(P('c:')), P('Foo/Bar'))
self.assertEqual(p.relative_to('c:'), P('Foo/Bar'))
self.assertEqual(p.relative_to(P('c:foO')), P('Bar'))
self.assertEqual(p.relative_to('c:foO'), P('Bar'))
self.assertEqual(p.relative_to('c:foO/'), P('Bar'))
self.assertEqual(p.relative_to(P('c:foO/baR')), P())
self.assertEqual(p.relative_to('c:foO/baR'), P())
self.assertRaises(ValueError, p.relative_to, P())
self.assertRaises(ValueError, p.relative_to, '')
self.assertRaises(ValueError, p.relative_to, P('d:'))
self.assertRaises(ValueError, p.relative_to, P('/'))
self.assertRaises(ValueError, p.relative_to, P('Foo'))
self.assertRaises(ValueError, p.relative_to, P('/Foo'))
self.assertRaises(ValueError, p.relative_to, P('C:/Foo'))
self.assertRaises(ValueError, p.relative_to, P('C:Foo/Bar/Baz'))
self.assertRaises(ValueError, p.relative_to, P('C:Foo/Baz'))
p = P('C:/Foo/Bar')
self.assertEqual(p.relative_to(P('c:')), P('/Foo/Bar'))
self.assertEqual(p.relative_to('c:'), P('/Foo/Bar'))
self.assertEqual(str(p.relative_to(P('c:'))), '\\Foo\\Bar')
self.assertEqual(str(p.relative_to('c:')), '\\Foo\\Bar')
self.assertEqual(p.relative_to(P('c:/')), P('Foo/Bar'))
self.assertEqual(p.relative_to('c:/'), P('Foo/Bar'))
self.assertEqual(p.relative_to(P('c:/foO')), P('Bar'))
self.assertEqual(p.relative_to('c:/foO'), P('Bar'))
self.assertEqual(p.relative_to('c:/foO/'), P('Bar'))
self.assertEqual(p.relative_to(P('c:/foO/baR')), P())
self.assertEqual(p.relative_to('c:/foO/baR'), P())
self.assertRaises(ValueError, p.relative_to, P('C:/Baz'))
self.assertRaises(ValueError, p.relative_to, P('C:/Foo/Bar/Baz'))
self.assertRaises(ValueError, p.relative_to, P('C:/Foo/Baz'))
self.assertRaises(ValueError, p.relative_to, P('C:Foo'))
self.assertRaises(ValueError, p.relative_to, P('d:'))
self.assertRaises(ValueError, p.relative_to, P('d:/'))
self.assertRaises(ValueError, p.relative_to, P('/'))
self.assertRaises(ValueError, p.relative_to, P('/Foo'))
self.assertRaises(ValueError, p.relative_to, P('//C/Foo'))
p = P('//Server/Share/Foo/Bar')
self.assertEqual(p.relative_to(P('//sErver/sHare')), P('Foo/Bar'))
self.assertEqual(p.relative_to('//sErver/sHare'), P('Foo/Bar'))
self.assertEqual(p.relative_to('//sErver/sHare/'), P('Foo/Bar'))
self.assertEqual(p.relative_to(P('//sErver/sHare/Foo')), P('Bar'))
self.assertEqual(p.relative_to('//sErver/sHare/Foo'), P('Bar'))
self.assertEqual(p.relative_to('//sErver/sHare/Foo/'), P('Bar'))
self.assertEqual(p.relative_to(P('//sErver/sHare/Foo/Bar')), P())
self.assertEqual(p.relative_to('//sErver/sHare/Foo/Bar'), P())
self.assertRaises(ValueError, p.relative_to, P('/Server/Share/Foo'))
self.assertRaises(ValueError, p.relative_to, P('c:/Server/Share/Foo'))
self.assertRaises(ValueError, p.relative_to, P('//z/Share/Foo'))
self.assertRaises(ValueError, p.relative_to, P('//Server/z/Foo'))
def test_is_absolute(self):
P = self.cls
self.assertFalse(P().is_absolute())
self.assertFalse(P('a').is_absolute())
self.assertFalse(P('a/b/').is_absolute())
self.assertFalse(P('/').is_absolute())
self.assertFalse(P('/a').is_absolute())
self.assertFalse(P('/a/b/').is_absolute())
self.assertFalse(P('c:').is_absolute())
self.assertFalse(P('c:a').is_absolute())
self.assertFalse(P('c:a/b/').is_absolute())
self.assertTrue(P('c:/').is_absolute())
self.assertTrue(P('c:/a').is_absolute())
self.assertTrue(P('c:/a/b/').is_absolute())
self.assertTrue(P('//a/b').is_absolute())
self.assertTrue(P('//a/b/').is_absolute())
self.assertTrue(P('//a/b/c').is_absolute())
self.assertTrue(P('//a/b/c/d').is_absolute())
def test_join(self):
P = self.cls
p = P('C:/a/b')
pp = p.joinpath('x/y')
self.assertEqual(pp, P('C:/a/b/x/y'))
pp = p.joinpath('/x/y')
self.assertEqual(pp, P('C:/x/y'))
pp = p.joinpath('D:x/y')
self.assertEqual(pp, P('D:x/y'))
pp = p.joinpath('D:/x/y')
self.assertEqual(pp, P('D:/x/y'))
pp = p.joinpath('//host/share/x/y')
self.assertEqual(pp, P('//host/share/x/y'))
pp = p.joinpath('c:x/y')
self.assertEqual(pp, P('C:/a/b/x/y'))
pp = p.joinpath('c:/x/y')
self.assertEqual(pp, P('C:/x/y'))
def test_div(self):
P = self.cls
p = P('C:/a/b')
self.assertEqual(p / 'x/y', P('C:/a/b/x/y'))
self.assertEqual(p / 'x' / 'y', P('C:/a/b/x/y'))
self.assertEqual(p / '/x/y', P('C:/x/y'))
self.assertEqual(p / '/x' / 'y', P('C:/x/y'))
self.assertEqual(p / 'D:x/y', P('D:x/y'))
self.assertEqual(p / 'D:' / 'x/y', P('D:x/y'))
self.assertEqual(p / 'D:/x/y', P('D:/x/y'))
self.assertEqual(p / 'D:' / '/x/y', P('D:/x/y'))
self.assertEqual(p / '//host/share/x/y', P('//host/share/x/y'))
self.assertEqual(p / 'c:x/y', P('C:/a/b/x/y'))
self.assertEqual(p / 'c:/x/y', P('C:/x/y'))
def test_is_reserved(self):
P = self.cls
self.assertIs(False, P('').is_reserved())
self.assertIs(False, P('/').is_reserved())
self.assertIs(False, P('/foo/bar').is_reserved())
self.assertIs(True, P('con').is_reserved())
self.assertIs(True, P('NUL').is_reserved())
self.assertIs(True, P('NUL.txt').is_reserved())
self.assertIs(True, P('com1').is_reserved())
self.assertIs(True, P('com9.bar').is_reserved())
self.assertIs(False, P('bar.com9').is_reserved())
self.assertIs(True, P('lpt1').is_reserved())
self.assertIs(True, P('lpt9.bar').is_reserved())
self.assertIs(False, P('bar.lpt9').is_reserved())
self.assertIs(False, P('c:/NUL/con/baz').is_reserved())
self.assertIs(False, P('//my/share/nul/con/aux').is_reserved())
class PurePathTest(_BasePurePathTest, unittest.TestCase):
cls = pathlib.PurePath
def test_concrete_class(self):
p = self.cls('a')
self.assertIs(type(p), pathlib.PureWindowsPath if os.name == 'nt' else
pathlib.PurePosixPath)
def test_different_flavours_unequal(self):
p = pathlib.PurePosixPath('a')
q = pathlib.PureWindowsPath('a')
self.assertNotEqual(p, q)
def test_different_flavours_unordered(self):
p = pathlib.PurePosixPath('a')
q = pathlib.PureWindowsPath('a')
with self.assertRaises(TypeError):
p < q
with self.assertRaises(TypeError):
p <= q
with self.assertRaises(TypeError):
p > q
with self.assertRaises(TypeError):
p >= q
BASE = os.path.realpath(TESTFN)
join = lambda *x: os.path.join(BASE, *x)
rel_join = lambda *x: os.path.join(TESTFN, *x)
def symlink_skip_reason():
if not pathlib.supports_symlinks:
return 'no system support for symlinks'
try:
os.symlink(__file__, BASE)
except OSError as e:
return str(e)
else:
support.unlink(BASE)
return None
symlink_skip_reason = symlink_skip_reason()
only_nt = unittest.skipIf(os.name != 'nt',
'test requires a Windows-compatible system')
only_posix = unittest.skipIf(os.name == 'nt',
'test requires a POSIX-compatible system')
with_symlinks = unittest.skipIf(symlink_skip_reason, symlink_skip_reason)
@only_posix
class PosixPathAsPureTest(PurePosixPathTest):
cls = pathlib.PosixPath
@only_nt
class WindowsPathAsPureTest(PureWindowsPathTest):
cls = pathlib.WindowsPath
def test_owner(self):
P = self.cls
with self.assertRaises(NotImplementedError):
P('c:/').owner()
def test_group(self):
P = self.cls
with self.assertRaises(NotImplementedError):
P('c:/').group()
class _BasePathTest(object):
"""Tests for the FS-accessing functionalities of the Path classes."""
def setUp(self):
def cleanup():
os.chmod(join('dirE'), 511)
support.rmtree(BASE)
self.addCleanup(cleanup)
os.mkdir(BASE)
os.mkdir(join('dirA'))
os.mkdir(join('dirB'))
os.mkdir(join('dirC'))
os.mkdir(join('dirC', 'dirD'))
os.mkdir(join('dirE'))
with open(join('fileA'), 'wb') as f:
f.write(b'this is file A\n')
with open(join('dirB', 'fileB'), 'wb') as f:
f.write(b'this is file B\n')
with open(join('dirC', 'fileC'), 'wb') as f:
f.write(b'this is file C\n')
with open(join('dirC', 'dirD', 'fileD'), 'wb') as f:
f.write(b'this is file D\n')
os.chmod(join('dirE'), 0)
if not symlink_skip_reason:
os.symlink('fileA', join('linkA'))
os.symlink('non-existing', join('brokenLink'))
self.dirlink('dirB', join('linkB'))
self.dirlink(os.path.join('..', 'dirB'), join('dirA', 'linkC'))
self.dirlink(os.path.join('..', 'dirB'), join('dirB', 'linkD'))
if os.name == 'nt':
def dirlink(self, src, dest):
os.symlink(src, dest, target_is_directory=True)
else:
def dirlink(self, src, dest):
os.symlink(src, dest)
def assertSame(self, path_a, path_b):
self.assertTrue(os.path.samefile(str(path_a), str(path_b)),
"%r and %r don't point to the same file" % (path_a, path_b))
def assertFileNotFound(self, func, *args, **kwargs):
with self.assertRaises(FileNotFoundError) as cm:
func(*args, **kwargs)
self.assertEqual(cm.exception.errno, errno.ENOENT)
def _test_cwd(self, p):
q = self.cls(os.getcwd())
self.assertEqual(p, q)
self.assertEqual(str(p), str(q))
self.assertIs(type(p), type(q))
self.assertTrue(p.is_absolute())
def test_cwd(self):
p = self.cls.cwd()
self._test_cwd(p)
def _test_home(self, p):
q = self.cls(os.path.expanduser('~'))
self.assertEqual(p, q)
self.assertEqual(str(p), str(q))
self.assertIs(type(p), type(q))
self.assertTrue(p.is_absolute())
def test_home(self):
p = self.cls.home()
self._test_home(p)
def test_samefile(self):
fileA_path = os.path.join(BASE, 'fileA')
fileB_path = os.path.join(BASE, 'dirB', 'fileB')
p = self.cls(fileA_path)
pp = self.cls(fileA_path)
q = self.cls(fileB_path)
self.assertTrue(p.samefile(fileA_path))
self.assertTrue(p.samefile(pp))
self.assertFalse(p.samefile(fileB_path))
self.assertFalse(p.samefile(q))
non_existent = os.path.join(BASE, 'foo')
r = self.cls(non_existent)
self.assertRaises(FileNotFoundError, p.samefile, r)
self.assertRaises(FileNotFoundError, p.samefile, non_existent)
self.assertRaises(FileNotFoundError, r.samefile, p)
self.assertRaises(FileNotFoundError, r.samefile, non_existent)
self.assertRaises(FileNotFoundError, r.samefile, r)
self.assertRaises(FileNotFoundError, r.samefile, non_existent)
def test_empty_path(self):
p = self.cls('')
self.assertEqual(p.stat(), os.stat('.'))
def test_expanduser_common(self):
P = self.cls
p = P('~')
self.assertEqual(p.expanduser(), P(os.path.expanduser('~')))
p = P('foo')
self.assertEqual(p.expanduser(), p)
p = P('/~')
self.assertEqual(p.expanduser(), p)
p = P('../~')
self.assertEqual(p.expanduser(), p)
p = P(P('').absolute().anchor) / '~'
self.assertEqual(p.expanduser(), p)
def test_exists(self):
P = self.cls
p = P(BASE)
self.assertIs(True, p.exists())
self.assertIs(True, (p / 'dirA').exists())
self.assertIs(True, (p / 'fileA').exists())
self.assertIs(False, (p / 'fileA' / 'bah').exists())
if not symlink_skip_reason:
self.assertIs(True, (p / 'linkA').exists())
self.assertIs(True, (p / 'linkB').exists())
self.assertIs(True, (p / 'linkB' / 'fileB').exists())
self.assertIs(False, (p / 'linkA' / 'bah').exists())
self.assertIs(False, (p / 'foo').exists())
self.assertIs(False, P('/xyzzy').exists())
def test_open_common(self):
p = self.cls(BASE)
with (p / 'fileA').open('r') as f:
self.assertIsInstance(f, io.TextIOBase)
self.assertEqual(f.read(), 'this is file A\n')
with (p / 'fileA').open('rb') as f:
self.assertIsInstance(f, io.BufferedIOBase)
self.assertEqual(f.read().strip(), b'this is file A')
with (p / 'fileA').open('rb', buffering=0) as f:
self.assertIsInstance(f, io.RawIOBase)
self.assertEqual(f.read().strip(), b'this is file A')
def test_read_write_bytes(self):
p = self.cls(BASE)
(p / 'fileA').write_bytes(b'abcdefg')
self.assertEqual((p / 'fileA').read_bytes(), b'abcdefg')
self.assertRaises(TypeError, (p / 'fileA').write_bytes, 'somestr')
self.assertEqual((p / 'fileA').read_bytes(), b'abcdefg')
def test_read_write_text(self):
p = self.cls(BASE)
(p / 'fileA').write_text('äbcdefg', encoding='latin-1')
self.assertEqual((p / 'fileA').read_text(encoding='utf-8', errors=
'ignore'), 'bcdefg')
self.assertRaises(TypeError, (p / 'fileA').write_text, b'somebytes')
self.assertEqual((p / 'fileA').read_text(encoding='latin-1'), 'äbcdefg'
)
def test_iterdir(self):
P = self.cls
p = P(BASE)
it = p.iterdir()
paths = set(it)
expected = ['dirA', 'dirB', 'dirC', 'dirE', 'fileA']
if not symlink_skip_reason:
expected += ['linkA', 'linkB', 'brokenLink']
self.assertEqual(paths, {P(BASE, q) for q in expected})
@with_symlinks
def test_iterdir_symlink(self):
P = self.cls
p = P(BASE, 'linkB')
paths = set(p.iterdir())
expected = {P(BASE, 'linkB', q) for q in ['fileB', 'linkD']}
self.assertEqual(paths, expected)
def test_iterdir_nodir(self):
p = self.cls(BASE, 'fileA')
with self.assertRaises(OSError) as cm:
next(p.iterdir())
self.assertIn(cm.exception.errno, (errno.ENOTDIR, errno.ENOENT,
errno.EINVAL))
def test_glob_common(self):
def _check(glob, expected):
self.assertEqual(set(glob), {P(BASE, q) for q in expected})
P = self.cls
p = P(BASE)
it = p.glob('fileA')
self.assertIsInstance(it, collections.Iterator)
_check(it, ['fileA'])
_check(p.glob('fileB'), [])
_check(p.glob('dir*/file*'), ['dirB/fileB', 'dirC/fileC'])
if symlink_skip_reason:
_check(p.glob('*A'), ['dirA', 'fileA'])
else:
_check(p.glob('*A'), ['dirA', 'fileA', 'linkA'])
if symlink_skip_reason:
_check(p.glob('*B/*'), ['dirB/fileB'])
else:
_check(p.glob('*B/*'), ['dirB/fileB', 'dirB/linkD',
'linkB/fileB', 'linkB/linkD'])
if symlink_skip_reason:
_check(p.glob('*/fileB'), ['dirB/fileB'])
else:
_check(p.glob('*/fileB'), ['dirB/fileB', 'linkB/fileB'])
def test_rglob_common(self):
def _check(glob, expected):
self.assertEqual(set(glob), {P(BASE, q) for q in expected})
P = self.cls
p = P(BASE)
it = p.rglob('fileA')
self.assertIsInstance(it, collections.Iterator)
_check(it, ['fileA'])
_check(p.rglob('fileB'), ['dirB/fileB'])
_check(p.rglob('*/fileA'), [])
if symlink_skip_reason:
_check(p.rglob('*/fileB'), ['dirB/fileB'])
else:
_check(p.rglob('*/fileB'), ['dirB/fileB', 'dirB/linkD/fileB',
'linkB/fileB', 'dirA/linkC/fileB'])
_check(p.rglob('file*'), ['fileA', 'dirB/fileB', 'dirC/fileC',
'dirC/dirD/fileD'])
p = P(BASE, 'dirC')
_check(p.rglob('file*'), ['dirC/fileC', 'dirC/dirD/fileD'])
_check(p.rglob('*/*'), ['dirC/dirD/fileD'])
@with_symlinks
def test_rglob_symlink_loop(self):
P = self.cls
p = P(BASE)
given = set(p.rglob('*'))
expect = {'brokenLink', 'dirA', 'dirA/linkC', 'dirB', 'dirB/fileB',
'dirB/linkD', 'dirC', 'dirC/dirD', 'dirC/dirD/fileD',
'dirC/fileC', 'dirE', 'fileA', 'linkA', 'linkB'}
self.assertEqual(given, {(p / x) for x in expect})
def test_glob_dotdot(self):
P = self.cls
p = P(BASE)
self.assertEqual(set(p.glob('..')), {P(BASE, '..')})
self.assertEqual(set(p.glob('dirA/../file*')), {P(BASE,
'dirA/../fileA')})
self.assertEqual(set(p.glob('../xyzzy')), set())
def _check_resolve(self, p, expected, strict=True):
q = p.resolve(strict)
self.assertEqual(q, expected)
_check_resolve_relative = _check_resolve_absolute = _check_resolve
@with_symlinks
def test_resolve_common(self):
P = self.cls
p = P(BASE, 'foo')
with self.assertRaises(OSError) as cm:
p.resolve(strict=True)
self.assertEqual(cm.exception.errno, errno.ENOENT)
self.assertEqual(str(p.resolve(strict=False)), os.path.join(BASE,
'foo'))
p = P(BASE, 'foo', 'in', 'spam')
self.assertEqual(str(p.resolve(strict=False)), os.path.join(BASE,
'foo', 'in', 'spam'))
p = P(BASE, '..', 'foo', 'in', 'spam')
self.assertEqual(str(p.resolve(strict=False)), os.path.abspath(os.
path.join('foo', 'in', 'spam')))
p = P(BASE, 'dirB', 'fileB')
self._check_resolve_relative(p, p)
p = P(BASE, 'linkA')
self._check_resolve_relative(p, P(BASE, 'fileA'))
p = P(BASE, 'dirA', 'linkC', 'fileB')
self._check_resolve_relative(p, P(BASE, 'dirB', 'fileB'))
p = P(BASE, 'dirB', 'linkD', 'fileB')
self._check_resolve_relative(p, P(BASE, 'dirB', 'fileB'))
p = P(BASE, 'dirA', 'linkC', 'fileB', 'foo', 'in', 'spam')
self._check_resolve_relative(p, P(BASE, 'dirB', 'fileB', 'foo',
'in', 'spam'), False)
p = P(BASE, 'dirA', 'linkC', '..', 'foo', 'in', 'spam')
if os.name == 'nt':
self._check_resolve_relative(p, P(BASE, 'dirA', 'foo', 'in',
'spam'), False)
else:
self._check_resolve_relative(p, P(BASE, 'foo', 'in', 'spam'), False
)
d = tempfile.mkdtemp(suffix='-dirD')
self.addCleanup(support.rmtree, d)
os.symlink(os.path.join(d), join('dirA', 'linkX'))
os.symlink(join('dirB'), os.path.join(d, 'linkY'))
p = P(BASE, 'dirA', 'linkX', 'linkY', 'fileB')
self._check_resolve_absolute(p, P(BASE, 'dirB', 'fileB'))
p = P(BASE, 'dirA', 'linkX', 'linkY', 'foo', 'in', 'spam')
self._check_resolve_relative(p, P(BASE, 'dirB', 'foo', 'in', 'spam'
), False)
p = P(BASE, 'dirA', 'linkX', 'linkY', '..', 'foo', 'in', 'spam')
if os.name == 'nt':
self._check_resolve_relative(p, P(d, 'foo', 'in', 'spam'), False)
else:
self._check_resolve_relative(p, P(BASE, 'foo', 'in', 'spam'), False
)
@with_symlinks
def test_resolve_dot(self):
p = self.cls(BASE)
self.dirlink('.', join('0'))
self.dirlink(os.path.join('0', '0'), join('1'))
self.dirlink(os.path.join('1', '1'), join('2'))
q = p / '2'
self.assertEqual(q.resolve(strict=True), p)
r = q / '3' / '4'
self.assertRaises(FileNotFoundError, r.resolve, strict=True)
self.assertEqual(r.resolve(strict=False), p / '3' / '4')
def test_with(self):
p = self.cls(BASE)
it = p.iterdir()
it2 = p.iterdir()
next(it2)
with p:
pass
self.assertRaises(ValueError, next, it)
self.assertRaises(ValueError, next, it2)
self.assertRaises(ValueError, p.open)
self.assertRaises(ValueError, p.resolve)
self.assertRaises(ValueError, p.absolute)
self.assertRaises(ValueError, p.__enter__)
def test_chmod(self):
p = self.cls(BASE) / 'fileA'
mode = p.stat().st_mode
new_mode = mode & ~146
p.chmod(new_mode)
self.assertEqual(p.stat().st_mode, new_mode)
new_mode = mode | 146
p.chmod(new_mode)
self.assertEqual(p.stat().st_mode, new_mode)
def test_stat(self):
p = self.cls(BASE) / 'fileA'
st = p.stat()
self.assertEqual(p.stat(), st)
p.chmod(st.st_mode ^ 146)
self.addCleanup(p.chmod, st.st_mode)
self.assertNotEqual(p.stat(), st)
@with_symlinks
def test_lstat(self):
p = self.cls(BASE) / 'linkA'
st = p.stat()
self.assertNotEqual(st, p.lstat())
def test_lstat_nosymlink(self):
p = self.cls(BASE) / 'fileA'
st = p.stat()
self.assertEqual(st, p.lstat())
@unittest.skipUnless(pwd, 'the pwd module is needed for this test')
def test_owner(self):
p = self.cls(BASE) / 'fileA'
uid = p.stat().st_uid
try:
name = pwd.getpwuid(uid).pw_name
except KeyError:
self.skipTest(
"user %d doesn't have an entry in the system database" % uid)
self.assertEqual(name, p.owner())
@unittest.skipUnless(grp, 'the grp module is needed for this test')
def test_group(self):
p = self.cls(BASE) / 'fileA'
gid = p.stat().st_gid
try:
name = grp.getgrgid(gid).gr_name
except KeyError:
self.skipTest(
"group %d doesn't have an entry in the system database" % gid)
self.assertEqual(name, p.group())
def test_unlink(self):
p = self.cls(BASE) / 'fileA'
p.unlink()
self.assertFileNotFound(p.stat)
self.assertFileNotFound(p.unlink)
def test_rmdir(self):
p = self.cls(BASE) / 'dirA'
for q in p.iterdir():
q.unlink()
p.rmdir()
self.assertFileNotFound(p.stat)
self.assertFileNotFound(p.unlink)
def test_rename(self):
P = self.cls(BASE)
p = P / 'fileA'
size = p.stat().st_size
q = P / 'dirA' / 'fileAA'
p.rename(q)
self.assertEqual(q.stat().st_size, size)
self.assertFileNotFound(p.stat)
r = rel_join('fileAAA')
q.rename(r)
self.assertEqual(os.stat(r).st_size, size)
self.assertFileNotFound(q.stat)
def test_replace(self):
P = self.cls(BASE)
p = P / 'fileA'
size = p.stat().st_size
q = P / 'dirA' / 'fileAA'
p.replace(q)
self.assertEqual(q.stat().st_size, size)
self.assertFileNotFound(p.stat)
r = rel_join('dirB', 'fileB')
q.replace(r)
self.assertEqual(os.stat(r).st_size, size)
self.assertFileNotFound(q.stat)
def test_touch_common(self):
P = self.cls(BASE)
p = P / 'newfileA'
self.assertFalse(p.exists())
p.touch()
self.assertTrue(p.exists())
st = p.stat()
old_mtime = st.st_mtime
old_mtime_ns = st.st_mtime_ns
os.utime(str(p), (old_mtime - 10, old_mtime - 10))
p.touch()
st = p.stat()
self.assertGreaterEqual(st.st_mtime_ns, old_mtime_ns)
self.assertGreaterEqual(st.st_mtime, old_mtime)
p = P / 'newfileB'
self.assertFalse(p.exists())
p.touch(mode=448, exist_ok=False)
self.assertTrue(p.exists())
self.assertRaises(OSError, p.touch, exist_ok=False)
def test_touch_nochange(self):
P = self.cls(BASE)
p = P / 'fileA'
p.touch()
with p.open('rb') as f:
self.assertEqual(f.read().strip(), b'this is file A')
def test_mkdir(self):
P = self.cls(BASE)
p = P / 'newdirA'
self.assertFalse(p.exists())
p.mkdir()
self.assertTrue(p.exists())
self.assertTrue(p.is_dir())
with self.assertRaises(OSError) as cm:
p.mkdir()
self.assertEqual(cm.exception.errno, errno.EEXIST)
def test_mkdir_parents(self):
p = self.cls(BASE, 'newdirB', 'newdirC')
self.assertFalse(p.exists())
with self.assertRaises(OSError) as cm:
p.mkdir()
self.assertEqual(cm.exception.errno, errno.ENOENT)
p.mkdir(parents=True)
self.assertTrue(p.exists())
self.assertTrue(p.is_dir())
with self.assertRaises(OSError) as cm:
p.mkdir(parents=True)
self.assertEqual(cm.exception.errno, errno.EEXIST)
mode = stat.S_IMODE(p.stat().st_mode)
p = self.cls(BASE, 'newdirD', 'newdirE')
p.mkdir(365, parents=True)
self.assertTrue(p.exists())
self.assertTrue(p.is_dir())
if os.name != 'nt':
self.assertEqual(stat.S_IMODE(p.stat().st_mode), 3949 & mode)
self.assertEqual(stat.S_IMODE(p.parent.stat().st_mode), mode)
def test_mkdir_exist_ok(self):
p = self.cls(BASE, 'dirB')
st_ctime_first = p.stat().st_ctime
self.assertTrue(p.exists())
self.assertTrue(p.is_dir())
with self.assertRaises(FileExistsError) as cm:
p.mkdir()
self.assertEqual(cm.exception.errno, errno.EEXIST)
p.mkdir(exist_ok=True)
self.assertTrue(p.exists())
self.assertEqual(p.stat().st_ctime, st_ctime_first)
def test_mkdir_exist_ok_with_parent(self):
p = self.cls(BASE, 'dirC')
self.assertTrue(p.exists())
with self.assertRaises(FileExistsError) as cm:
p.mkdir()
self.assertEqual(cm.exception.errno, errno.EEXIST)
p = p / 'newdirC'
p.mkdir(parents=True)
st_ctime_first = p.stat().st_ctime
self.assertTrue(p.exists())
with self.assertRaises(FileExistsError) as cm:
p.mkdir(parents=True)
self.assertEqual(cm.exception.errno, errno.EEXIST)
p.mkdir(parents=True, exist_ok=True)
self.assertTrue(p.exists())
self.assertEqual(p.stat().st_ctime, st_ctime_first)
def test_mkdir_exist_ok_root(self):
self.cls('/').resolve().mkdir(exist_ok=True)
self.cls('/').resolve().mkdir(parents=True, exist_ok=True)
@only_nt
def test_mkdir_with_unknown_drive(self):
for d in 'ZYXWVUTSRQPONMLKJIHGFEDCBA':
p = self.cls(d + ':\\')
if not p.is_dir():
break
else:
self.skipTest("cannot find a drive that doesn't exist")
with self.assertRaises(OSError):
(p / 'child' / 'path').mkdir(parents=True)
def test_mkdir_with_child_file(self):
p = self.cls(BASE, 'dirB', 'fileB')
self.assertTrue(p.exists())
with self.assertRaises(FileExistsError) as cm:
p.mkdir(parents=True)
self.assertEqual(cm.exception.errno, errno.EEXIST)
with self.assertRaises(FileExistsError) as cm:
p.mkdir(parents=True, exist_ok=True)
self.assertEqual(cm.exception.errno, errno.EEXIST)
def test_mkdir_no_parents_file(self):
p = self.cls(BASE, 'fileA')
self.assertTrue(p.exists())
with self.assertRaises(FileExistsError) as cm:
p.mkdir()
self.assertEqual(cm.exception.errno, errno.EEXIST)
with self.assertRaises(FileExistsError) as cm:
p.mkdir(exist_ok=True)
self.assertEqual(cm.exception.errno, errno.EEXIST)
def test_mkdir_concurrent_parent_creation(self):
for pattern_num in range(32):
p = self.cls(BASE, 'dirCPC%d' % pattern_num)
self.assertFalse(p.exists())
def my_mkdir(path, mode=511):
path = str(path)
if pattern.pop():
os.mkdir(path, mode)
concurrently_created.add(path)
os.mkdir(path, mode)
pattern = [bool(pattern_num & 1 << n) for n in range(5)]
concurrently_created = set()
p12 = p / 'dir1' / 'dir2'
try:
with mock.patch('pathlib._normal_accessor.mkdir', my_mkdir):
p12.mkdir(parents=True, exist_ok=False)
except FileExistsError:
self.assertIn(str(p12), concurrently_created)
else:
self.assertNotIn(str(p12), concurrently_created)
self.assertTrue(p.exists())
@with_symlinks
def test_symlink_to(self):
P = self.cls(BASE)
target = P / 'fileA'
link = P / 'dirA' / 'linkAA'
link.symlink_to(target)
self.assertEqual(link.stat(), target.stat())
self.assertNotEqual(link.lstat(), target.stat())
link = P / 'dirA' / 'linkAAA'
link.symlink_to(str(target))
self.assertEqual(link.stat(), target.stat())
self.assertNotEqual(link.lstat(), target.stat())
self.assertFalse(link.is_dir())
target = P / 'dirB'
link = P / 'dirA' / 'linkAAAA'
link.symlink_to(target, target_is_directory=True)
self.assertEqual(link.stat(), target.stat())
self.assertNotEqual(link.lstat(), target.stat())
self.assertTrue(link.is_dir())
self.assertTrue(list(link.iterdir()))
def test_is_dir(self):
P = self.cls(BASE)
self.assertTrue((P / 'dirA').is_dir())
self.assertFalse((P / 'fileA').is_dir())
self.assertFalse((P / 'non-existing').is_dir())
self.assertFalse((P / 'fileA' / 'bah').is_dir())
if not symlink_skip_reason:
self.assertFalse((P / 'linkA').is_dir())
self.assertTrue((P / 'linkB').is_dir())
self.assertFalse((P / 'brokenLink').is_dir())
def test_is_file(self):
P = self.cls(BASE)
self.assertTrue((P / 'fileA').is_file())
self.assertFalse((P / 'dirA').is_file())
self.assertFalse((P / 'non-existing').is_file())
self.assertFalse((P / 'fileA' / 'bah').is_file())
if not symlink_skip_reason:
self.assertTrue((P / 'linkA').is_file())
self.assertFalse((P / 'linkB').is_file())
self.assertFalse((P / 'brokenLink').is_file())
def test_is_symlink(self):
P = self.cls(BASE)
self.assertFalse((P / 'fileA').is_symlink())
self.assertFalse((P / 'dirA').is_symlink())
self.assertFalse((P / 'non-existing').is_symlink())
self.assertFalse((P / 'fileA' / 'bah').is_symlink())
if not symlink_skip_reason:
self.assertTrue((P / 'linkA').is_symlink())
self.assertTrue((P / 'linkB').is_symlink())
self.assertTrue((P / 'brokenLink').is_symlink())
def test_is_fifo_false(self):
P = self.cls(BASE)
self.assertFalse((P / 'fileA').is_fifo())
self.assertFalse((P / 'dirA').is_fifo())
self.assertFalse((P / 'non-existing').is_fifo())
self.assertFalse((P / 'fileA' / 'bah').is_fifo())
@unittest.skipUnless(hasattr(os, 'mkfifo'), 'os.mkfifo() required')
@unittest.skipIf(android_not_root, 'mkfifo not allowed, non root user')
def test_is_fifo_true(self):
P = self.cls(BASE, 'myfifo')
os.mkfifo(str(P))
self.assertTrue(P.is_fifo())
self.assertFalse(P.is_socket())
self.assertFalse(P.is_file())
def test_is_socket_false(self):
P = self.cls(BASE)
self.assertFalse((P / 'fileA').is_socket())
self.assertFalse((P / 'dirA').is_socket())
self.assertFalse((P / 'non-existing').is_socket())
self.assertFalse((P / 'fileA' / 'bah').is_socket())
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'Unix sockets required')
def test_is_socket_true(self):
P = self.cls(BASE, 'mysock')
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.addCleanup(sock.close)
try:
sock.bind(str(P))
except OSError as e:
if isinstance(e, PermissionError
) or 'AF_UNIX path too long' in str(e):
self.skipTest('cannot bind Unix socket: ' + str(e))
self.assertTrue(P.is_socket())
self.assertFalse(P.is_fifo())
self.assertFalse(P.is_file())
def test_is_block_device_false(self):
P = self.cls(BASE)
self.assertFalse((P / 'fileA').is_block_device())
self.assertFalse((P / 'dirA').is_block_device())
self.assertFalse((P / 'non-existing').is_block_device())
self.assertFalse((P / 'fileA' / 'bah').is_block_device())
def test_is_char_device_false(self):
P = self.cls(BASE)
self.assertFalse((P / 'fileA').is_char_device())
self.assertFalse((P / 'dirA').is_char_device())
self.assertFalse((P / 'non-existing').is_char_device())
self.assertFalse((P / 'fileA' / 'bah').is_char_device())
def test_is_char_device_true(self):
P = self.cls('/dev/null')
if not P.exists():
self.skipTest('/dev/null required')
self.assertTrue(P.is_char_device())
self.assertFalse(P.is_block_device())
self.assertFalse(P.is_file())
def test_pickling_common(self):
p = self.cls(BASE, 'fileA')
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
dumped = pickle.dumps(p, proto)
pp = pickle.loads(dumped)
self.assertEqual(pp.stat(), p.stat())
def test_parts_interning(self):
P = self.cls
p = P('/usr/bin/foo')
q = P('/usr/local/bin')
self.assertIs(p.parts[1], q.parts[1])
self.assertIs(p.parts[2], q.parts[3])
def _check_complex_symlinks(self, link0_target):
P = self.cls(BASE)
self.dirlink(os.path.join('link0', 'link0'), join('link1'))
self.dirlink(os.path.join('link1', 'link1'), join('link2'))
self.dirlink(os.path.join('link2', 'link2'), join('link3'))
self.dirlink(link0_target, join('link0'))
p = (P / 'link0').resolve()
self.assertEqual(p, P)
self.assertEqual(str(p), BASE)
p = (P / 'link1').resolve()
self.assertEqual(p, P)
self.assertEqual(str(p), BASE)
p = (P / 'link2').resolve()
self.assertEqual(p, P)
self.assertEqual(str(p), BASE)
p = (P / 'link3').resolve()
self.assertEqual(p, P)
self.assertEqual(str(p), BASE)
old_path = os.getcwd()
os.chdir(BASE)
try:
p = self.cls('link0').resolve()
self.assertEqual(p, P)
self.assertEqual(str(p), BASE)
p = self.cls('link1').resolve()
self.assertEqual(p, P)
self.assertEqual(str(p), BASE)
p = self.cls('link2').resolve()
self.assertEqual(p, P)
self.assertEqual(str(p), BASE)
p = self.cls('link3').resolve()
self.assertEqual(p, P)
self.assertEqual(str(p), BASE)
finally:
os.chdir(old_path)
@with_symlinks
def test_complex_symlinks_absolute(self):
self._check_complex_symlinks(BASE)
@with_symlinks
def test_complex_symlinks_relative(self):
self._check_complex_symlinks('.')
@with_symlinks
def test_complex_symlinks_relative_dot_dot(self):
self._check_complex_symlinks(os.path.join('dirA', '..'))
class PathTest(_BasePathTest, unittest.TestCase):
cls = pathlib.Path
def test_concrete_class(self):
p = self.cls('a')
self.assertIs(type(p), pathlib.WindowsPath if os.name == 'nt' else
pathlib.PosixPath)
def test_unsupported_flavour(self):
if os.name == 'nt':
self.assertRaises(NotImplementedError, pathlib.PosixPath)
else:
self.assertRaises(NotImplementedError, pathlib.WindowsPath)
def test_glob_empty_pattern(self):
p = self.cls()
with self.assertRaisesRegex(ValueError, 'Unacceptable pattern'):
list(p.glob(''))
@only_posix
class PosixPathTest(_BasePathTest, unittest.TestCase):
cls = pathlib.PosixPath
def _check_symlink_loop(self, *args, strict=True):
path = self.cls(*args)
with self.assertRaises(RuntimeError):
print(path.resolve(strict))
def test_open_mode(self):
old_mask = os.umask(0)
self.addCleanup(os.umask, old_mask)
p = self.cls(BASE)
with (p / 'new_file').open('wb'):
pass
st = os.stat(join('new_file'))
self.assertEqual(stat.S_IMODE(st.st_mode), 438)
os.umask(18)
with (p / 'other_new_file').open('wb'):
pass
st = os.stat(join('other_new_file'))
self.assertEqual(stat.S_IMODE(st.st_mode), 420)
def test_touch_mode(self):
old_mask = os.umask(0)
self.addCleanup(os.umask, old_mask)
p = self.cls(BASE)
(p / 'new_file').touch()
st = os.stat(join('new_file'))
self.assertEqual(stat.S_IMODE(st.st_mode), 438)
os.umask(18)
(p / 'other_new_file').touch()
st = os.stat(join('other_new_file'))
self.assertEqual(stat.S_IMODE(st.st_mode), 420)
(p / 'masked_new_file').touch(mode=488)
st = os.stat(join('masked_new_file'))
self.assertEqual(stat.S_IMODE(st.st_mode), 488)
@with_symlinks
def test_resolve_loop(self):
os.symlink('linkX/inside', join('linkX'))
self._check_symlink_loop(BASE, 'linkX')
os.symlink('linkY', join('linkY'))
self._check_symlink_loop(BASE, 'linkY')
os.symlink('linkZ/../linkZ', join('linkZ'))
self._check_symlink_loop(BASE, 'linkZ')
self._check_symlink_loop(BASE, 'linkZ', 'foo', strict=False)
os.symlink(join('linkU/inside'), join('linkU'))
self._check_symlink_loop(BASE, 'linkU')
os.symlink(join('linkV'), join('linkV'))
self._check_symlink_loop(BASE, 'linkV')
os.symlink(join('linkW/../linkW'), join('linkW'))
self._check_symlink_loop(BASE, 'linkW')
self._check_symlink_loop(BASE, 'linkW', 'foo', strict=False)
def test_glob(self):
P = self.cls
p = P(BASE)
given = set(p.glob('FILEa'))
expect = set() if not support.fs_is_case_insensitive(BASE) else given
self.assertEqual(given, expect)
self.assertEqual(set(p.glob('FILEa*')), set())
def test_rglob(self):
P = self.cls
p = P(BASE, 'dirC')
given = set(p.rglob('FILEd'))
expect = set() if not support.fs_is_case_insensitive(BASE) else given
self.assertEqual(given, expect)
self.assertEqual(set(p.rglob('FILEd*')), set())
@unittest.skipUnless(hasattr(pwd, 'getpwall'),
'pwd module does not expose getpwall()')
def test_expanduser(self):
P = self.cls
support.import_module('pwd')
import pwd
pwdent = pwd.getpwuid(os.getuid())
username = pwdent.pw_name
userhome = pwdent.pw_dir.rstrip('/') or '/'
for pwdent in pwd.getpwall():
othername = pwdent.pw_name
otherhome = pwdent.pw_dir.rstrip('/')
if othername != username and otherhome:
break
p1 = P('~/Documents')
p2 = P('~' + username + '/Documents')
p3 = P('~' + othername + '/Documents')
p4 = P('../~' + username + '/Documents')
p5 = P('/~' + username + '/Documents')
p6 = P('')
p7 = P('~fakeuser/Documents')
with support.EnvironmentVarGuard() as env:
env.pop('HOME', None)
self.assertEqual(p1.expanduser(), P(userhome) / 'Documents')
self.assertEqual(p2.expanduser(), P(userhome) / 'Documents')
self.assertEqual(p3.expanduser(), P(otherhome) / 'Documents')
self.assertEqual(p4.expanduser(), p4)
self.assertEqual(p5.expanduser(), p5)
self.assertEqual(p6.expanduser(), p6)
self.assertRaises(RuntimeError, p7.expanduser)
env['HOME'] = '/tmp'
self.assertEqual(p1.expanduser(), P('/tmp/Documents'))
self.assertEqual(p2.expanduser(), P(userhome) / 'Documents')
self.assertEqual(p3.expanduser(), P(otherhome) / 'Documents')
self.assertEqual(p4.expanduser(), p4)
self.assertEqual(p5.expanduser(), p5)
self.assertEqual(p6.expanduser(), p6)
self.assertRaises(RuntimeError, p7.expanduser)
@only_nt
class WindowsPathTest(_BasePathTest, unittest.TestCase):
cls = pathlib.WindowsPath
def test_glob(self):
P = self.cls
p = P(BASE)
self.assertEqual(set(p.glob('FILEa')), {P(BASE, 'fileA')})
def test_rglob(self):
P = self.cls
p = P(BASE, 'dirC')
self.assertEqual(set(p.rglob('FILEd')), {P(BASE, 'dirC/dirD/fileD')})
def test_expanduser(self):
P = self.cls
with support.EnvironmentVarGuard() as env:
env.pop('HOME', None)
env.pop('USERPROFILE', None)
env.pop('HOMEPATH', None)
env.pop('HOMEDRIVE', None)
env['USERNAME'] = 'alice'
p1 = P('~/My Documents')
p2 = P('~alice/My Documents')
p3 = P('~bob/My Documents')
p4 = P('/~/My Documents')
p5 = P('d:~/My Documents')
p6 = P('')
self.assertRaises(RuntimeError, p1.expanduser)
self.assertRaises(RuntimeError, p2.expanduser)
self.assertRaises(RuntimeError, p3.expanduser)
self.assertEqual(p4.expanduser(), p4)
self.assertEqual(p5.expanduser(), p5)
self.assertEqual(p6.expanduser(), p6)
def check():
env.pop('USERNAME', None)
self.assertEqual(p1.expanduser(), P(
'C:/Users/alice/My Documents'))
self.assertRaises(KeyError, p2.expanduser)
env['USERNAME'] = 'alice'
self.assertEqual(p2.expanduser(), P(
'C:/Users/alice/My Documents'))
self.assertEqual(p3.expanduser(), P(
'C:/Users/bob/My Documents'))
self.assertEqual(p4.expanduser(), p4)
self.assertEqual(p5.expanduser(), p5)
self.assertEqual(p6.expanduser(), p6)
env['HOME'] = 'C:\\Users\\alice'
check()
env.pop('HOME', None)
env['HOMEPATH'] = 'C:\\Users\\alice'
check()
env['HOMEDRIVE'] = 'C:\\'
env['HOMEPATH'] = 'Users\\alice'
check()
env.pop('HOMEDRIVE', None)
env.pop('HOMEPATH', None)
env['USERPROFILE'] = 'C:\\Users\\alice'
check()
if __name__ == '__main__':
unittest.main()
| 39.689976 | 79 | 0.536493 |
d069283fdd4b29874e858a93a0d513b85350e070 | 246 | py | Python | .history/50.pow-x-n_20211120025949.py | chenhuiyu/LeetCode-python | 2ceb4af821a9b7344c00549abbfdd5f72a5d595b | [
"Apache-2.0"
] | null | null | null | .history/50.pow-x-n_20211120025949.py | chenhuiyu/LeetCode-python | 2ceb4af821a9b7344c00549abbfdd5f72a5d595b | [
"Apache-2.0"
] | null | null | null | .history/50.pow-x-n_20211120025949.py | chenhuiyu/LeetCode-python | 2ceb4af821a9b7344c00549abbfdd5f72a5d595b | [
"Apache-2.0"
] | null | null | null | #
# @lc app=leetcode.cn id=50 lang=python3
#
# [50] Pow(x, n)
#
n = 5
x = 2
# @lc code=start
class Solution:
def myPow(self, x: float, n: int) -> float:
if n > 0:
while x > 0:
x = x >> 1
# @lc code=end
| 12.947368 | 47 | 0.46748 |
a5496de9ab2da96e3779ff635a4240c80b164095 | 613 | py | Python | hanbai/hanbai/migrations/0002_auto_20210211_1451.py | Checkroth/kurumahanbai | d7ab0a59161e104cc4701cb6ed3138f394999b9f | [
"MIT"
] | null | null | null | hanbai/hanbai/migrations/0002_auto_20210211_1451.py | Checkroth/kurumahanbai | d7ab0a59161e104cc4701cb6ed3138f394999b9f | [
"MIT"
] | null | null | null | hanbai/hanbai/migrations/0002_auto_20210211_1451.py | Checkroth/kurumahanbai | d7ab0a59161e104cc4701cb6ed3138f394999b9f | [
"MIT"
] | null | null | null | # Generated by Django 3.1.5 on 2021-02-11 05:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hanbai', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='extrafield',
name='field_name',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AlterField(
model_name='extrafield',
name='value_type',
field=models.IntegerField(choices=[('1', 'String'), ('2', 'Integer')], default='1'),
),
]
| 25.541667 | 96 | 0.574225 |
05ea501a54669c175ece48b8ae02e52889534617 | 778 | py | Python | fytnet/migrations/0016_auto_20210327_1025.py | Code-Institute-Submissions/danielboots-fytletic | 67c3000a4b681d7f76255ab11db841a7f2ba613e | [
"OLDAP-2.3"
] | 1 | 2021-03-31T18:54:25.000Z | 2021-03-31T18:54:25.000Z | fytnet/migrations/0016_auto_20210327_1025.py | Code-Institute-Submissions/danielboots-fytletic | 67c3000a4b681d7f76255ab11db841a7f2ba613e | [
"OLDAP-2.3"
] | null | null | null | fytnet/migrations/0016_auto_20210327_1025.py | Code-Institute-Submissions/danielboots-fytletic | 67c3000a4b681d7f76255ab11db841a7f2ba613e | [
"OLDAP-2.3"
] | 1 | 2021-03-31T11:00:11.000Z | 2021-03-31T11:00:11.000Z | # Generated by Django 3.1.6 on 2021-03-27 10:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('fytnet', '0015_auto_20210326_2053'),
]
operations = [
migrations.AlterField(
model_name='fighter',
name='email',
field=models.CharField(default='user@fytletic.com', max_length=50),
),
migrations.AlterField(
model_name='fighter',
name='nick_name',
field=models.CharField(default='Fytletic Fighter', max_length=50),
),
migrations.AlterField(
model_name='fighter',
name='whatsapp',
field=models.CharField(default='07555555555', max_length=50),
),
]
| 26.827586 | 79 | 0.584833 |
1e440cf9e066f721f10fd116471d0263e011a84c | 18,758 | py | Python | guillotina/tests/fixtures.py | psanlorenzo/guillotina | 0840cf39914d23a9e26e35bd40939511d3ca78d7 | [
"BSD-2-Clause"
] | null | null | null | guillotina/tests/fixtures.py | psanlorenzo/guillotina | 0840cf39914d23a9e26e35bd40939511d3ca78d7 | [
"BSD-2-Clause"
] | null | null | null | guillotina/tests/fixtures.py | psanlorenzo/guillotina | 0840cf39914d23a9e26e35bd40939511d3ca78d7 | [
"BSD-2-Clause"
] | null | null | null | from async_asgi_testclient import TestClient
from guillotina import task_vars
from guillotina import testing
from guillotina.component import get_utility
from guillotina.component import globalregistry
from guillotina.const import ROOT_ID
from guillotina.const import TRASHED_ID
from guillotina.db.interfaces import ICockroachStorage
from guillotina.db.interfaces import IPostgresStorage
from guillotina.db.storages.cockroach import CockroachStorage
from guillotina.factory import make_app
from guillotina.interfaces import IApplication
from guillotina.interfaces import IDatabase
from guillotina.tests import mocks
from guillotina.tests.utils import ContainerRequesterAsyncContextManager
from guillotina.tests.utils import get_mocked_request
from guillotina.tests.utils import login
from guillotina.tests.utils import logout
from guillotina.tests.utils import wrap_request
from guillotina.transactions import get_tm
from guillotina.transactions import transaction
from guillotina.utils import merge_dicts
from unittest import mock
import aiohttp
import asyncio
import json
import os
import pytest
_dir = os.path.dirname(os.path.realpath(__file__))
IS_TRAVIS = "TRAVIS" in os.environ
DATABASE = os.environ.get("DATABASE", "DUMMY")
DB_SCHEMA = os.environ.get("DB_SCHEMA", "public")
annotations = {"testdatabase": DATABASE, "test_dbschema": DB_SCHEMA, "redis": None, "travis": IS_TRAVIS}
def base_settings_configurator(settings):
settings["load_utilities"]["catalog"] = {
"provides": "guillotina.interfaces.ICatalogUtility",
"factory": "guillotina.catalog.catalog.DefaultSearchUtility",
}
testing.configure_with(base_settings_configurator)
@pytest.yield_fixture
def event_loop():
"""Create an instance of the default event loop for each test case."""
# https://github.com/pytest-dev/pytest-asyncio/issues/30#issuecomment-226947196
policy = asyncio.get_event_loop_policy()
res = policy.new_event_loop()
asyncio.set_event_loop(res)
res._close = res.close
res.close = lambda: None
yield res
res._close()
def get_dummy_settings(pytest_node=None):
settings = testing.get_settings()
settings["databases"]["db"]["storage"] = "DUMMY"
settings["databases"]["db"]["dsn"] = {}
settings = _update_from_pytest_markers(settings, pytest_node)
return settings
def configure_db(
obj,
scheme="postgres",
dbname="guillotina",
user="postgres",
host="localhost",
port=5432,
password="",
storage="postgresql",
):
obj.update({"storage": storage, "partition": "guillotina.interfaces.IResource"})
obj["dsn"] = {
"scheme": scheme,
"dbname": dbname,
"user": user,
"host": host,
"port": port,
"password": password,
}
def _update_from_pytest_markers(settings, pytest_node):
if not pytest_node:
return settings
# Update test app settings from pytest markers
marks = []
try:
marks.extend([mark for mark in pytest_node.iter_markers(name="app_settings")])
except AttributeError:
# Older pytest versions
mark = pytest_node.get_marker("app_settings")
if mark is not None:
marks.append(mark)
for mark in marks:
to_update = mark.args[0]
settings = merge_dicts(settings, to_update)
return settings
def get_db_settings(pytest_node=None):
settings = testing.get_settings()
if annotations["redis"] is not None:
if "redis" not in settings:
settings["redis"] = {}
settings["redis"]["host"] = annotations["redis"][0]
settings["redis"]["port"] = annotations["redis"][1]
if annotations["testdatabase"] == "DUMMY":
return _update_from_pytest_markers(settings, pytest_node)
settings["databases"]["db"]["storage"] = "postgresql"
settings["databases"]["db"]["db_schema"] = annotations["test_dbschema"]
settings["databases"]["db"]["dsn"] = {
"scheme": "postgres",
"dbname": annotations.get("pg_db", "guillotina"),
"user": "postgres",
"host": annotations.get("pg_host", "localhost"),
"port": annotations.get("pg_port", 5432),
"password": "",
}
options = dict(
host=annotations.get("pg_host", "localhost"),
port=annotations.get("pg_port", 5432),
dbname=annotations.get("pg_db", "guillotina"),
)
settings = _update_from_pytest_markers(settings, pytest_node)
if annotations["testdatabase"] == "cockroachdb":
configure_db(settings["databases"]["db"], **options, user="root", storage="cockroach")
configure_db(settings["databases"]["db-custom"], **options, user="root", storage="cockroach")
configure_db(settings["storages"]["db"], **options, user="root", storage="cockroach")
else:
configure_db(settings["databases"]["db"], **options)
configure_db(settings["databases"]["db-custom"], **options)
configure_db(settings["storages"]["db"], **options)
return settings
@pytest.fixture(scope="session")
def db():
"""
detect travis, use travis's postgres; otherwise, use docker
"""
if annotations["testdatabase"] == "DUMMY":
yield
else:
import pytest_docker_fixtures
if annotations["testdatabase"] == "cockroachdb":
host, port = pytest_docker_fixtures.cockroach_image.run()
else:
if not annotations["travis"]:
host, port = pytest_docker_fixtures.pg_image.run()
else:
host = "localhost"
port = int(os.environ.get("PGPORT", 5432))
annotations["pg_host"] = host
annotations["pg_port"] = port
yield host, port # provide the fixture value
if annotations["testdatabase"] == "cockroachdb":
pytest_docker_fixtures.cockroach_image.stop()
elif not annotations["travis"]:
pytest_docker_fixtures.pg_image.stop()
class GuillotinaDBAsgiRequester(object):
def __init__(self, client):
self.client = client
self.root = get_utility(IApplication, name="root")
self.db = self.root["db"]
async def __call__(
self,
method,
path,
params=None,
data=None,
authenticated=True,
auth_type="Basic",
headers={},
cookies={},
token=testing.ADMIN_TOKEN,
accept="application/json",
allow_redirects=True,
):
value, status, _ = await self.make_request(
method,
path,
params=params,
data=data,
authenticated=authenticated,
auth_type=auth_type,
headers=headers,
cookies=cookies,
token=token,
accept=accept,
allow_redirects=allow_redirects,
)
return value, status
async def make_request(
self,
method,
path,
params=None,
data=None,
authenticated=True,
auth_type="Basic",
headers={},
cookies={},
token=testing.ADMIN_TOKEN,
accept="application/json",
allow_redirects=True,
):
settings = {}
headers = headers.copy()
settings["headers"] = headers
if accept is not None:
settings["headers"]["ACCEPT"] = accept
if authenticated and token is not None:
settings["headers"]["AUTHORIZATION"] = "{} {}".format(auth_type, token)
settings["query_string"] = params
settings["data"] = data
settings["allow_redirects"] = allow_redirects
settings["cookies"] = cookies
operation = getattr(self.client, method.lower(), None)
resp = await operation(path, **settings)
if "Content-Range" in resp.headers:
value = resp.content
else:
try:
value = resp.json()
except json.decoder.JSONDecodeError:
value = resp.content
status = resp.status_code
return value, status, resp.headers
def transaction(self, request=None):
if request is None:
request = get_mocked_request(db=self.db)
login()
return wrap_request(request, transaction(db=self.db, adopt_parent_txn=True))
async def close(self):
pass
class GuillotinaDBHttpRequester(object):
def __init__(self, host, port):
self.host = host
self.port = port
self.client = aiohttp.ClientSession()
self.root = get_utility(IApplication, name="root")
self.db = self.root["db"]
async def __call__(
self,
method,
path,
params=None,
data=None,
authenticated=True,
auth_type="Basic",
headers=None,
token=testing.ADMIN_TOKEN,
accept="application/json",
allow_redirects=True,
):
value, status, _ = await self.make_request(
method,
path,
params=params,
data=data,
authenticated=authenticated,
auth_type=auth_type,
headers=headers,
token=token,
accept=accept,
allow_redirects=allow_redirects,
)
return value, status
async def make_request(
self,
method,
path,
params=None,
data=None,
authenticated=True,
auth_type="Basic",
headers=None,
token=testing.ADMIN_TOKEN,
accept="application/json",
allow_redirects=True,
):
if headers is None:
headers = {}
settings = {}
headers = headers.copy()
settings["headers"] = headers
if accept is not None:
settings["headers"]["ACCEPT"] = accept
if authenticated and token is not None:
settings["headers"]["AUTHORIZATION"] = "{} {}".format(auth_type, token)
settings["params"] = params
settings["data"] = data
settings["allow_redirects"] = allow_redirects
async with aiohttp.ClientSession() as session:
operation = getattr(session, method.lower(), None)
async with operation(self.make_url(path), **settings) as resp:
try:
value = await resp.json()
except aiohttp.client_exceptions.ContentTypeError:
value = await resp.read()
status = resp.status
return value, status, resp.headers
def transaction(self, request=None):
if request is None:
request = get_mocked_request(db=self.db)
login()
return wrap_request(request, transaction(db=self.db, adopt_parent_txn=True))
def make_url(self, path):
return f"http://{self.host}:{self.port}{path}"
async def close(self):
if not self.client.closed:
await self.client.close()
def clear_task_vars():
for var in (
"request",
"txn",
"tm",
"futures",
"authenticated_user",
"security_policies",
"container",
"registry",
"db",
):
getattr(task_vars, var).set(None)
@pytest.fixture(scope="function")
async def dummy_guillotina(event_loop, request):
globalregistry.reset()
app = make_app(settings=get_dummy_settings(request.node), loop=event_loop)
async with TestClient(app):
yield app
logout()
clear_task_vars()
class DummyRequestAsyncContextManager(object):
def __init__(self, dummy_request, loop):
self.request = dummy_request
self.loop = loop
async def __aenter__(self):
task = asyncio.Task.current_task(loop=self.loop)
if task is not None:
task.request = self.request
return self.request
async def __aexit__(self, exc_type, exc, tb):
task = asyncio.Task.current_task(loop=self.loop)
del task.request
@pytest.fixture(scope="function")
def dummy_request(dummy_guillotina, monkeypatch):
from guillotina.interfaces import IApplication
from guillotina.component import get_utility
root = get_utility(IApplication, name="root")
db = root["db"]
request = get_mocked_request(db=db)
task_vars.request.set(request)
return request
class RootAsyncContextManager:
def __init__(self, request):
self.request = request
self.root = None
self.txn = None
async def __aenter__(self):
tm = get_tm()
self.txn = await tm.begin()
self.root = await tm.get_root()
return self.root
async def __aexit__(self, exc_type, exc, tb):
await self.txn.abort()
@pytest.fixture(scope="function")
async def dummy_txn_root(dummy_request):
return RootAsyncContextManager(dummy_request)
@pytest.fixture(scope="function")
def mock_txn():
txn = mocks.MockTransaction()
task_vars.txn.set(txn)
yield txn
task_vars.txn.set(None)
async def _clear_dbs(root):
# make sure to completely clear db before carrying on...
for _, db in root:
if not IDatabase.providedBy(db):
continue
storage = db.storage
if IPostgresStorage.providedBy(storage) or ICockroachStorage.providedBy(storage):
async with storage.pool.acquire() as conn:
await conn.execute(
"""
DELETE from {}
WHERE zoid != '{}' AND zoid != '{}'
""".format(
storage._objects_table_name, ROOT_ID, TRASHED_ID
)
)
await conn.execute(
"""
SELECT 'DROP INDEX ' || string_agg(indexrelid::regclass::text, ', ')
FROM pg_index i
LEFT JOIN pg_depend d ON d.objid = i.indexrelid
AND d.deptype = 'i'
WHERE i.indrelid = '{}'::regclass
AND d.objid IS NULL
""".format(
storage._objects_table_name
)
)
@pytest.fixture(scope="function")
async def app(event_loop, db, request):
globalregistry.reset()
settings = get_db_settings(request.node)
app = make_app(settings=settings, loop=event_loop)
server_settings = settings.get("test_server_settings", {})
host = server_settings.get("host", "127.0.0.1")
port = int(server_settings.get("port", 8000))
from uvicorn import Config, Server
config = Config(app, host=host, port=port, lifespan="on")
server = Server(config=config)
task = asyncio.ensure_future(server.serve(), loop=event_loop)
while app.app is None and not task.done():
# Wait for app initialization
await asyncio.sleep(0.05)
if task.done():
task.result()
await _clear_dbs(app.app.root)
yield host, port
server.should_exit = True
await asyncio.sleep(1) # There is no other way to wait for server shutdown
clear_task_vars()
@pytest.fixture(scope="function")
async def app_client(event_loop, db, request):
globalregistry.reset()
app = make_app(settings=get_db_settings(request.node), loop=event_loop)
async with TestClient(app, timeout=30) as client:
await _clear_dbs(app.app.root)
yield app, client
clear_task_vars()
@pytest.fixture(scope="function")
async def guillotina_main(app_client):
app, _ = app_client
return app
@pytest.fixture(scope="function")
async def guillotina(app_client):
_, client = app_client
return GuillotinaDBAsgiRequester(client)
@pytest.fixture(scope="function")
def guillotina_server(app):
host, port = app
return GuillotinaDBHttpRequester(host, port)
@pytest.fixture(scope="function")
def container_requester(guillotina):
return ContainerRequesterAsyncContextManager(guillotina)
@pytest.fixture(scope="function")
def container_install_requester(guillotina, install_addons):
return ContainerRequesterAsyncContextManager(guillotina, install_addons)
@pytest.fixture(scope="function")
def container_requester_server(guillotina_server):
return ContainerRequesterAsyncContextManager(guillotina_server)
async def _bomb_shelter(future, timeout=2):
try:
return await asyncio.shield(asyncio.wait_for(future, timeout))
except asyncio.TimeoutError:
pass
class CockroachStorageAsyncContextManager(object):
def __init__(self, request, loop, db):
self.loop = loop
self.request = request
self.storage = None
self.db = db
async def __aenter__(self):
dsn = "postgres://root:@{}:{}/guillotina?sslmode=disable".format(self.db[0], self.db[1])
self.storage = CockroachStorage(dsn=dsn, name="db", pool_size=25, conn_acquire_timeout=0.1)
await self.storage.initialize(self.loop)
return self.storage
async def __aexit__(self, exc_type, exc, tb):
async with self.storage.pool.acquire() as conn:
await _bomb_shelter(conn.execute("DROP DATABASE IF EXISTS guillotina;"))
await _bomb_shelter(conn.execute("CREATE DATABASE guillotina;"))
try:
await self.storage.finalize()
except asyncio.CancelledError: # pragma: no cover
pass
@pytest.fixture(scope="function")
def cockroach_storage(db, dummy_request, event_loop):
return CockroachStorageAsyncContextManager(dummy_request, event_loop, db)
@pytest.fixture(scope="function")
def command_arguments():
arguments = mock.MagicMock()
arguments.line_profiler = False
arguments.monitor = False
arguments.profile = False
return arguments
@pytest.fixture(scope="function")
def container_command(db):
import psycopg2 # type: ignore
settings = get_db_settings()
host = settings["databases"]["db"]["dsn"]["host"]
port = settings["databases"]["db"]["dsn"]["port"]
conn = psycopg2.connect(f"dbname=guillotina user=postgres host={host} port={port}")
cur = conn.cursor()
cur.execute(open(os.path.join(_dir, "data/tables.sql"), "r").read())
cur.execute("COMMIT;")
cur.close()
conn.close()
yield {"settings": settings}
conn = psycopg2.connect(f"dbname=guillotina user=postgres host={host} port={port}")
cur = conn.cursor()
cur.execute(
"""
DELETE FROM objects;
DELETe FROM blobs;
COMMIT;"""
)
@pytest.fixture(scope="session")
def redis_container():
import pytest_docker_fixtures
host, port = pytest_docker_fixtures.redis_image.run()
annotations["redis"] = (host, port)
yield host, port # provide the fixture value
pytest_docker_fixtures.redis_image.stop()
annotations["redis"] = None
@pytest.fixture(scope="function")
async def dbusers_requester(guillotina):
return ContainerRequesterAsyncContextManager(guillotina, ["dbusers"])
| 29.633491 | 104 | 0.64074 |
c675315aa37b152257cc3d84b886bb13a6e1bd34 | 7,065 | py | Python | official/projects/volumetric_models/modeling/backbones/unet_3d.py | mcasanova1445/models | 37be0fdb4abccca633bb3199a4e6f3f71cd174d9 | [
"Apache-2.0"
] | 1 | 2020-05-20T11:40:56.000Z | 2020-05-20T11:40:56.000Z | official/projects/volumetric_models/modeling/backbones/unet_3d.py | mcasanova1445/models | 37be0fdb4abccca633bb3199a4e6f3f71cd174d9 | [
"Apache-2.0"
] | null | null | null | official/projects/volumetric_models/modeling/backbones/unet_3d.py | mcasanova1445/models | 37be0fdb4abccca633bb3199a4e6f3f71cd174d9 | [
"Apache-2.0"
] | 1 | 2022-03-21T13:47:02.000Z | 2022-03-21T13:47:02.000Z | # Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains definitions of 3D UNet Model encoder part.
[1] Özgün Çiçek, Ahmed Abdulkadir, Soeren S. Lienkamp, Thomas Brox, Olaf
Ronneberger. 3D U-Net: Learning Dense Volumetric Segmentation from Sparse
Annotation. arXiv:1606.06650.
"""
from typing import Any, Mapping, Sequence
# Import libraries
import tensorflow as tf
from official.modeling import hyperparams
from official.projects.volumetric_models.modeling import nn_blocks_3d
from official.vision.modeling.backbones import factory
layers = tf.keras.layers
@tf.keras.utils.register_keras_serializable(package='Vision')
class UNet3D(tf.keras.Model):
"""Class to build 3D UNet backbone."""
def __init__(
self,
model_id: int,
input_specs: layers = layers.InputSpec(shape=[None, None, None, None, 3]),
pool_size: Sequence[int] = (2, 2, 2),
kernel_size: Sequence[int] = (3, 3, 3),
base_filters: int = 32,
kernel_regularizer: tf.keras.regularizers.Regularizer = None,
activation: str = 'relu',
norm_momentum: float = 0.99,
norm_epsilon: float = 0.001,
use_sync_bn: bool = False,
use_batch_normalization: bool = False, # type: ignore # typed-keras
**kwargs):
"""3D UNet backbone initialization function.
Args:
model_id: The depth of UNet3D backbone model. The greater the depth, the
more max pooling layers will be added to the model. Lowering the depth
may reduce the amount of memory required for training.
input_specs: The specs of the input tensor. It specifies a 5D input of
[batch, height, width, volume, channel] for `channel_last` data format
or [batch, channel, height, width, volume] for `channel_first` data
format.
pool_size: The pooling size for the max pooling operations.
kernel_size: The kernel size for 3D convolution.
base_filters: The number of filters that the first layer in the
convolution network will have. Following layers will contain a multiple
of this number. Lowering this number will likely reduce the amount of
memory required to train the model.
kernel_regularizer: A tf.keras.regularizers.Regularizer object for Conv2D.
Default to None.
activation: The name of the activation function.
norm_momentum: The normalization momentum for the moving average.
norm_epsilon: A float added to variance to avoid dividing by zero.
use_sync_bn: If True, use synchronized batch normalization.
use_batch_normalization: If set to True, use batch normalization after
convolution and before activation. Default to False.
**kwargs: Keyword arguments to be passed.
"""
self._model_id = model_id
self._input_specs = input_specs
self._pool_size = pool_size
self._kernel_size = kernel_size
self._activation = activation
self._base_filters = base_filters
self._norm_momentum = norm_momentum
self._norm_epsilon = norm_epsilon
self._use_sync_bn = use_sync_bn
if use_sync_bn:
self._norm = layers.experimental.SyncBatchNormalization
else:
self._norm = layers.BatchNormalization
self._kernel_regularizer = kernel_regularizer
self._use_batch_normalization = use_batch_normalization
# Build 3D UNet.
inputs = tf.keras.Input(
shape=input_specs.shape[1:], dtype=input_specs.dtype)
x = inputs
endpoints = {}
# Add levels with max pooling to downsample input.
for layer_depth in range(model_id):
# Two convoluions are applied sequentially without downsampling.
filter_num = base_filters * (2**layer_depth)
x2 = nn_blocks_3d.BasicBlock3DVolume(
filters=[filter_num, filter_num * 2],
strides=(1, 1, 1),
kernel_size=self._kernel_size,
kernel_regularizer=self._kernel_regularizer,
activation=self._activation,
use_sync_bn=self._use_sync_bn,
norm_momentum=self._norm_momentum,
norm_epsilon=self._norm_epsilon,
use_batch_normalization=self._use_batch_normalization)(
x)
if layer_depth < model_id - 1:
x = layers.MaxPool3D(
pool_size=pool_size,
strides=(2, 2, 2),
padding='valid',
data_format=tf.keras.backend.image_data_format())(
x2)
else:
x = x2
endpoints[str(layer_depth + 1)] = x2
self._output_specs = {l: endpoints[l].get_shape() for l in endpoints}
super(UNet3D, self).__init__(inputs=inputs, outputs=endpoints, **kwargs)
def get_config(self) -> Mapping[str, Any]:
return {
'model_id': self._model_id,
'pool_size': self._pool_size,
'kernel_size': self._kernel_size,
'activation': self._activation,
'base_filters': self._base_filters,
'norm_momentum': self._norm_momentum,
'norm_epsilon': self._norm_epsilon,
'use_sync_bn': self._use_sync_bn,
'kernel_regularizer': self._kernel_regularizer,
'use_batch_normalization': self._use_batch_normalization
}
@classmethod
def from_config(cls, config: Mapping[str, Any], custom_objects=None):
return cls(**config)
@property
def output_specs(self) -> Mapping[str, tf.TensorShape]:
"""Returns a dict of {level: TensorShape} pairs for the model output."""
return self._output_specs
@factory.register_backbone_builder('unet_3d')
def build_unet3d(
input_specs: tf.keras.layers.InputSpec,
backbone_config: hyperparams.Config,
norm_activation_config: hyperparams.Config,
l2_regularizer: tf.keras.regularizers.Regularizer = None) -> tf.keras.Model: # pytype: disable=annotation-type-mismatch # typed-keras
"""Builds 3D UNet backbone from a config."""
backbone_type = backbone_config.type
backbone_cfg = backbone_config.get()
assert backbone_type == 'unet_3d', (f'Inconsistent backbone type '
f'{backbone_type}')
return UNet3D(
model_id=backbone_cfg.model_id,
input_specs=input_specs,
pool_size=backbone_cfg.pool_size,
base_filters=backbone_cfg.base_filters,
kernel_regularizer=l2_regularizer,
activation=norm_activation_config.activation,
norm_momentum=norm_activation_config.norm_momentum,
norm_epsilon=norm_activation_config.norm_epsilon,
use_sync_bn=norm_activation_config.use_sync_bn,
use_batch_normalization=backbone_cfg.use_batch_normalization)
| 39.915254 | 139 | 0.707006 |
fa642eba3709c73226bda3d5e0a59bb42853e958 | 129 | py | Python | IMC-GAE_one_sample/graph.py | WesleyClode/MBIMC-GAE | fd4d6df3467b57c0fa4d5e6e11f79085be32ed51 | [
"Apache-2.0"
] | null | null | null | IMC-GAE_one_sample/graph.py | WesleyClode/MBIMC-GAE | fd4d6df3467b57c0fa4d5e6e11f79085be32ed51 | [
"Apache-2.0"
] | null | null | null | IMC-GAE_one_sample/graph.py | WesleyClode/MBIMC-GAE | fd4d6df3467b57c0fa4d5e6e11f79085be32ed51 | [
"Apache-2.0"
] | null | null | null | import dgl
g = dgl.graph(([0, 1, 2], [1, 2, 3]))
neg_sampler = dgl.dataloading.negative_sampler.Uniform(2)
neg_sampler(g, [0, 1]) | 32.25 | 57 | 0.674419 |
8acc882bf921f8fb343055797c46c5d540921fde | 729 | py | Python | elemental/model/defaults.py | angstwad/elemental | 26827572a64fbcb02654642ef247669e2188bc10 | [
"Apache-2.0"
] | 15 | 2015-11-07T03:05:03.000Z | 2021-04-28T22:32:18.000Z | elemental/model/defaults.py | ktosiu/elemental | 26827572a64fbcb02654642ef247669e2188bc10 | [
"Apache-2.0"
] | null | null | null | elemental/model/defaults.py | ktosiu/elemental | 26827572a64fbcb02654642ef247669e2188bc10 | [
"Apache-2.0"
] | 7 | 2016-01-08T03:15:28.000Z | 2018-07-29T04:37:34.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of Elemental.
#
# Copyright 2014 Paul Durivage <pauldurivage+git@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
USER_ACCOUNT_TYPES = ['user', 'admin']
| 34.714286 | 74 | 0.742112 |
5bfbded5e1a29e5103e31afc12fcb80efab25018 | 32,364 | py | Python | tests/test_server.py | isaac-philip/rasa | 923db75e03921921a6f1f3489a2c5574138ee685 | [
"Apache-2.0"
] | 5 | 2020-08-28T06:44:31.000Z | 2021-03-14T18:46:01.000Z | tests/test_server.py | isaac-philip/rasa | 923db75e03921921a6f1f3489a2c5574138ee685 | [
"Apache-2.0"
] | 71 | 2020-08-25T02:46:08.000Z | 2022-03-01T13:23:38.000Z | tests/test_server.py | isaac-philip/rasa | 923db75e03921921a6f1f3489a2c5574138ee685 | [
"Apache-2.0"
] | 1 | 2020-07-01T12:07:55.000Z | 2020-07-01T12:07:55.000Z | import os
from multiprocessing.managers import DictProxy
import requests
import time
import tempfile
import uuid
from typing import List, Text, Type, Generator, NoReturn
from contextlib import ExitStack
from _pytest import pathlib
from aioresponses import aioresponses
import pytest
from freezegun import freeze_time
from mock import MagicMock
from multiprocessing import Process, Manager
import rasa
import rasa.constants
import rasa.utils.io
from rasa.core import events, utils
from rasa.core.agent import Agent
from rasa.core.channels import CollectingOutputChannel, RestInput, SlackInput
from rasa.core.channels.slack import SlackBot
from rasa.core.events import Event, UserUttered, SlotSet, BotUttered
from rasa.core.trackers import DialogueStateTracker
from rasa.model import unpack_model
from rasa.utils.endpoints import EndpointConfig
from sanic import Sanic
from sanic.testing import SanicTestClient
from tests.nlu.utilities import ResponseTest
from tests.conftest import get_test_client
# a couple of event instances that we can use for testing
test_events = [
Event.from_parameters(
{
"event": UserUttered.type_name,
"text": "/goodbye",
"parse_data": {
"intent": {"confidence": 1.0, "name": "greet"},
"entities": [],
},
}
),
BotUttered("Welcome!", {"test": True}),
SlotSet("cuisine", 34),
SlotSet("cuisine", "34"),
SlotSet("location", None),
SlotSet("location", [34, "34", None]),
]
@pytest.fixture
def rasa_app_without_api(rasa_server_without_api: Sanic) -> SanicTestClient:
return get_test_client(rasa_server_without_api)
@pytest.fixture
def rasa_app(rasa_server: Sanic) -> SanicTestClient:
return get_test_client(rasa_server)
@pytest.fixture
def rasa_app_nlu(rasa_nlu_server: Sanic) -> SanicTestClient:
return get_test_client(rasa_nlu_server)
@pytest.fixture
def rasa_app_core(rasa_core_server: Sanic) -> SanicTestClient:
return get_test_client(rasa_core_server)
@pytest.fixture
def rasa_secured_app(rasa_server_secured: Sanic) -> SanicTestClient:
return get_test_client(rasa_server_secured)
def test_root(rasa_app: SanicTestClient):
_, response = rasa_app.get("/")
assert response.status == 200
assert response.text.startswith("Hello from Rasa:")
def test_root_without_enable_api(rasa_app_without_api: SanicTestClient):
_, response = rasa_app_without_api.get("/")
assert response.status == 200
assert response.text.startswith("Hello from Rasa:")
def test_root_secured(rasa_secured_app: SanicTestClient):
_, response = rasa_secured_app.get("/")
assert response.status == 200
assert response.text.startswith("Hello from Rasa:")
def test_version(rasa_app: SanicTestClient):
_, response = rasa_app.get("/version")
content = response.json
assert response.status == 200
assert content.get("version") == rasa.__version__
assert (
content.get("minimum_compatible_version")
== rasa.constants.MINIMUM_COMPATIBLE_VERSION
)
def test_status(rasa_app: SanicTestClient, trained_rasa_model: Text):
_, response = rasa_app.get("/status")
model_file = response.json["model_file"]
assert response.status == 200
assert "fingerprint" in response.json
assert os.path.isfile(model_file)
assert model_file == trained_rasa_model
def test_status_nlu_only(rasa_app_nlu: SanicTestClient, trained_nlu_model: Text):
_, response = rasa_app_nlu.get("/status")
model_file = response.json["model_file"]
assert response.status == 200
assert "fingerprint" in response.json
assert "model_file" in response.json
assert model_file == trained_nlu_model
def test_status_secured(rasa_secured_app: SanicTestClient):
_, response = rasa_secured_app.get("/status")
assert response.status == 401
def test_status_not_ready_agent(rasa_app: SanicTestClient):
rasa_app.app.agent = None
_, response = rasa_app.get("/status")
assert response.status == 409
@pytest.fixture
def shared_statuses() -> DictProxy:
return Manager().dict()
@pytest.fixture
def background_server(
shared_statuses: DictProxy, tmpdir: pathlib.Path
) -> Generator[Process, None, None]:
# Create a fake model archive which the mocked train function can return
from pathlib import Path
fake_model = Path(tmpdir) / "fake_model.tar.gz"
fake_model.touch()
fake_model_path = str(fake_model)
# Fake training function which blocks until we tell it to stop blocking
# If we can send a status request while this is blocking, we can be sure that the
# actual training is also not blocking
def mocked_training_function(*_, **__) -> Text:
# Tell the others that we are now blocking
shared_statuses["started_training"] = True
# Block until somebody tells us to not block anymore
while shared_statuses.get("stop_training") is not True:
time.sleep(1)
return fake_model_path
def run_server() -> NoReturn:
import rasa
rasa.train = mocked_training_function
from rasa import __main__
import sys
sys.argv = ["rasa", "run", "--enable-api"]
__main__.main()
server = Process(target=run_server)
yield server
server.terminate()
@pytest.fixture()
def training_request(shared_statuses: DictProxy) -> Generator[Process, None, None]:
def send_request() -> None:
with ExitStack() as stack:
formbot_data = dict(
domain="examples/formbot/domain.yml",
config="examples/formbot/config.yml",
stories="examples/formbot/data/stories.md",
nlu="examples/formbot/data/nlu.md",
)
payload = {
key: stack.enter_context(open(path)).read()
for key, path in formbot_data.items()
}
payload["force"] = True
response = requests.post("http://localhost:5005/model/train", json=payload)
shared_statuses["training_result"] = response.status_code
train_request = Process(target=send_request)
yield train_request
train_request.terminate()
# due to unknown reasons this test can not be run in pycharm, it
# results in segfaults...will skip in that case - test will still get run on CI
@pytest.mark.skipif("PYCHARM_HOSTED" in os.environ, reason="results in segfault")
def test_train_status_is_not_blocked_by_training(
background_server: Process, shared_statuses: DictProxy, training_request: Process
):
background_server.start()
def is_server_ready() -> bool:
try:
return requests.get("http://localhost:5005/status").status_code == 200
except Exception:
return False
# wait until server is up before sending train request and status test loop
while not is_server_ready():
time.sleep(1)
training_request.start()
# Wait until the blocking training function was called
while shared_statuses.get("started_training") is not True:
time.sleep(1)
# Check if the number of currently running trainings was incremented
response = requests.get("http://localhost:5005/status")
assert response.status_code == 200
assert response.json()["num_active_training_jobs"] == 1
# Tell the blocking training function to stop
shared_statuses["stop_training"] = True
while shared_statuses.get("training_result") is None:
time.sleep(1)
# Check that the training worked correctly
assert shared_statuses["training_result"] == 200
# Check if the number of currently running trainings was decremented
response = requests.get("http://localhost:5005/status")
assert response.status_code == 200
assert response.json()["num_active_training_jobs"] == 0
@pytest.mark.parametrize(
"response_test",
[
ResponseTest(
"/model/parse",
{
"entities": [],
"intent": {"confidence": 1.0, "name": "greet"},
"text": "hello",
},
payload={"text": "hello"},
),
ResponseTest(
"/model/parse",
{
"entities": [],
"intent": {"confidence": 1.0, "name": "greet"},
"text": "hello",
},
payload={"text": "hello"},
),
ResponseTest(
"/model/parse",
{
"entities": [],
"intent": {"confidence": 1.0, "name": "greet"},
"text": "hello ńöñàśçií",
},
payload={"text": "hello ńöñàśçií"},
),
],
)
def test_parse(rasa_app, response_test):
_, response = rasa_app.post(response_test.endpoint, json=response_test.payload)
rjs = response.json
assert response.status == 200
assert all(prop in rjs for prop in ["entities", "intent", "text"])
assert rjs["entities"] == response_test.expected_response["entities"]
assert rjs["text"] == response_test.expected_response["text"]
assert rjs["intent"] == response_test.expected_response["intent"]
@pytest.mark.parametrize(
"response_test",
[
ResponseTest(
"/model/parse?emulation_mode=wit",
{
"entities": [],
"intent": {"confidence": 1.0, "name": "greet"},
"text": "hello",
},
payload={"text": "hello"},
),
ResponseTest(
"/model/parse?emulation_mode=dialogflow",
{
"entities": [],
"intent": {"confidence": 1.0, "name": "greet"},
"text": "hello",
},
payload={"text": "hello"},
),
ResponseTest(
"/model/parse?emulation_mode=luis",
{
"entities": [],
"intent": {"confidence": 1.0, "name": "greet"},
"text": "hello ńöñàśçií",
},
payload={"text": "hello ńöñàśçií"},
),
],
)
def test_parse_with_different_emulation_mode(rasa_app, response_test):
_, response = rasa_app.post(response_test.endpoint, json=response_test.payload)
assert response.status == 200
def test_parse_without_nlu_model(rasa_app_core: SanicTestClient):
_, response = rasa_app_core.post("/model/parse", json={"text": "hello"})
assert response.status == 200
rjs = response.json
assert all(prop in rjs for prop in ["entities", "intent", "text"])
def test_parse_on_invalid_emulation_mode(rasa_app_nlu: SanicTestClient):
_, response = rasa_app_nlu.post(
"/model/parse?emulation_mode=ANYTHING", json={"text": "hello"}
)
assert response.status == 400
def test_train_stack_success(
rasa_app,
default_domain_path,
default_stories_file,
default_stack_config,
default_nlu_data,
):
with ExitStack() as stack:
domain_file = stack.enter_context(open(default_domain_path))
config_file = stack.enter_context(open(default_stack_config))
stories_file = stack.enter_context(open(default_stories_file))
nlu_file = stack.enter_context(open(default_nlu_data))
payload = dict(
domain=domain_file.read(),
config=config_file.read(),
stories=stories_file.read(),
nlu=nlu_file.read(),
)
_, response = rasa_app.post("/model/train", json=payload)
assert response.status == 200
assert response.headers["filename"] is not None
# save model to temporary file
tempdir = tempfile.mkdtemp()
model_path = os.path.join(tempdir, "model.tar.gz")
with open(model_path, "wb") as f:
f.write(response.body)
# unpack model and ensure fingerprint is present
model_path = unpack_model(model_path)
assert os.path.exists(os.path.join(model_path, "fingerprint.json"))
def test_train_nlu_success(
rasa_app, default_stack_config, default_nlu_data, default_domain_path
):
with ExitStack() as stack:
domain_file = stack.enter_context(open(default_domain_path))
config_file = stack.enter_context(open(default_stack_config))
nlu_file = stack.enter_context(open(default_nlu_data))
payload = dict(
domain=domain_file.read(), config=config_file.read(), nlu=nlu_file.read()
)
_, response = rasa_app.post("/model/train", json=payload)
assert response.status == 200
# save model to temporary file
tempdir = tempfile.mkdtemp()
model_path = os.path.join(tempdir, "model.tar.gz")
with open(model_path, "wb") as f:
f.write(response.body)
# unpack model and ensure fingerprint is present
model_path = unpack_model(model_path)
assert os.path.exists(os.path.join(model_path, "fingerprint.json"))
def test_train_core_success(
rasa_app, default_stack_config, default_stories_file, default_domain_path
):
with ExitStack() as stack:
domain_file = stack.enter_context(open(default_domain_path))
config_file = stack.enter_context(open(default_stack_config))
core_file = stack.enter_context(open(default_stories_file))
payload = dict(
domain=domain_file.read(),
config=config_file.read(),
stories=core_file.read(),
)
_, response = rasa_app.post("/model/train", json=payload)
assert response.status == 200
# save model to temporary file
tempdir = tempfile.mkdtemp()
model_path = os.path.join(tempdir, "model.tar.gz")
with open(model_path, "wb") as f:
f.write(response.body)
# unpack model and ensure fingerprint is present
model_path = unpack_model(model_path)
assert os.path.exists(os.path.join(model_path, "fingerprint.json"))
def test_train_with_retrieval_events_success(rasa_app, default_stack_config):
with ExitStack() as stack:
domain_file = stack.enter_context(
open("data/test_domains/default_retrieval_intents.yml")
)
config_file = stack.enter_context(open(default_stack_config))
core_file = stack.enter_context(
open("data/test_stories/stories_retrieval_intents.md")
)
responses_file = stack.enter_context(open("data/test_responses/default.md"))
nlu_file = stack.enter_context(
open("data/test_nlu/default_retrieval_intents.md")
)
payload = dict(
domain=domain_file.read(),
config=config_file.read(),
stories=core_file.read(),
responses=responses_file.read(),
nlu=nlu_file.read(),
)
_, response = rasa_app.post("/model/train", json=payload)
assert response.status == 200
# save model to temporary file
tempdir = tempfile.mkdtemp()
model_path = os.path.join(tempdir, "model.tar.gz")
with open(model_path, "wb") as f:
f.write(response.body)
# unpack model and ensure fingerprint is present
model_path = unpack_model(model_path)
assert os.path.exists(os.path.join(model_path, "fingerprint.json"))
def test_train_missing_config(rasa_app: SanicTestClient):
payload = dict(domain="domain data", config=None)
_, response = rasa_app.post("/model/train", json=payload)
assert response.status == 400
def test_train_missing_training_data(rasa_app: SanicTestClient):
payload = dict(domain="domain data", config="config data")
_, response = rasa_app.post("/model/train", json=payload)
assert response.status == 400
def test_train_internal_error(rasa_app: SanicTestClient):
payload = dict(domain="domain data", config="config data", nlu="nlu data")
_, response = rasa_app.post("/model/train", json=payload)
assert response.status == 500
def test_evaluate_stories(rasa_app, default_stories_file):
stories = rasa.utils.io.read_file(default_stories_file)
_, response = rasa_app.post("/model/test/stories", data=stories)
assert response.status == 200
js = response.json
assert set(js.keys()) == {
"report",
"precision",
"f1",
"accuracy",
"actions",
"in_training_data_fraction",
"is_end_to_end_evaluation",
}
assert not js["is_end_to_end_evaluation"]
assert set(js["actions"][0].keys()) == {
"action",
"predicted",
"confidence",
"policy",
}
def test_evaluate_stories_not_ready_agent(
rasa_app_nlu: SanicTestClient, default_stories_file
):
stories = rasa.utils.io.read_file(default_stories_file)
_, response = rasa_app_nlu.post("/model/test/stories", data=stories)
assert response.status == 409
def test_evaluate_stories_end_to_end(rasa_app, end_to_end_story_file):
stories = rasa.utils.io.read_file(end_to_end_story_file)
_, response = rasa_app.post("/model/test/stories?e2e=true", data=stories)
assert response.status == 200
js = response.json
assert set(js.keys()) == {
"report",
"precision",
"f1",
"accuracy",
"actions",
"in_training_data_fraction",
"is_end_to_end_evaluation",
}
assert js["is_end_to_end_evaluation"]
assert set(js["actions"][0].keys()) == {
"action",
"predicted",
"confidence",
"policy",
}
def test_evaluate_intent(rasa_app, default_nlu_data):
nlu_data = rasa.utils.io.read_file(default_nlu_data)
_, response = rasa_app.post("/model/test/intents", data=nlu_data)
assert response.status == 200
assert set(response.json.keys()) == {
"intent_evaluation",
"entity_evaluation",
"response_selection_evaluation",
}
def test_evaluate_intent_on_just_nlu_model(
rasa_app_nlu: SanicTestClient, default_nlu_data
):
nlu_data = rasa.utils.io.read_file(default_nlu_data)
_, response = rasa_app_nlu.post("/model/test/intents", data=nlu_data)
assert response.status == 200
assert set(response.json.keys()) == {
"intent_evaluation",
"entity_evaluation",
"response_selection_evaluation",
}
def test_evaluate_intent_with_query_param(
rasa_app, trained_nlu_model, default_nlu_data
):
_, response = rasa_app.get("/status")
previous_model_file = response.json["model_file"]
nlu_data = rasa.utils.io.read_file(default_nlu_data)
_, response = rasa_app.post(
f"/model/test/intents?model={trained_nlu_model}", data=nlu_data
)
assert response.status == 200
assert set(response.json.keys()) == {
"intent_evaluation",
"entity_evaluation",
"response_selection_evaluation",
}
_, response = rasa_app.get("/status")
assert previous_model_file == response.json["model_file"]
def test_predict(rasa_app: SanicTestClient):
data = {
"Events": {
"value": [
{"event": "action", "name": "action_listen"},
{
"event": "user",
"text": "hello",
"parse_data": {
"entities": [],
"intent": {"confidence": 0.57, "name": "greet"},
"text": "hello",
},
},
]
}
}
_, response = rasa_app.post(
"/model/predict", json=data, headers={"Content-Type": "application/json"}
)
content = response.json
assert response.status == 200
assert "scores" in content
assert "tracker" in content
assert "policy" in content
@freeze_time("2018-01-01")
def test_requesting_non_existent_tracker(rasa_app: SanicTestClient):
_, response = rasa_app.get("/conversations/madeupid/tracker")
content = response.json
assert response.status == 200
assert content["paused"] is False
assert content["slots"] == {"name": None}
assert content["sender_id"] == "madeupid"
assert content["events"] == [
{
"event": "action",
"name": "action_session_start",
"policy": None,
"confidence": None,
"timestamp": 1514764800,
},
{"event": "session_started", "timestamp": 1514764800},
{
"event": "action",
"name": "action_listen",
"policy": None,
"confidence": None,
"timestamp": 1514764800,
},
]
assert content["latest_message"] == {
"text": None,
"intent": {},
"entities": [],
"message_id": None,
"metadata": {},
}
@pytest.mark.parametrize("event", test_events)
def test_pushing_event(rasa_app: SanicTestClient, event: Event):
sender_id = str(uuid.uuid1())
conversation = f"/conversations/{sender_id}"
serialized_event = event.as_dict()
# Remove timestamp so that a new one is assigned on the server
serialized_event.pop("timestamp")
time_before_adding_events = time.time()
_, response = rasa_app.post(
f"{conversation}/tracker/events",
json=serialized_event,
headers={"Content-Type": "application/json"},
)
assert response.json is not None
assert response.status == 200
_, tracker_response = rasa_app.get(f"/conversations/{sender_id}/tracker")
tracker = tracker_response.json
assert tracker is not None
assert len(tracker.get("events")) == 1
evt = tracker.get("events")[0]
deserialised_event = Event.from_parameters(evt)
assert deserialised_event == event
assert deserialised_event.timestamp > time_before_adding_events
def test_push_multiple_events(rasa_app: SanicTestClient):
conversation_id = str(uuid.uuid1())
conversation = f"/conversations/{conversation_id}"
events = [e.as_dict() for e in test_events]
_, response = rasa_app.post(
f"{conversation}/tracker/events",
json=events,
headers={"Content-Type": "application/json"},
)
assert response.json is not None
assert response.status == 200
_, tracker_response = rasa_app.get(f"/conversations/{conversation_id}/tracker")
tracker = tracker_response.json
assert tracker is not None
# there is also an `ACTION_LISTEN` event at the start
assert tracker.get("events") == events
def test_put_tracker(rasa_app: SanicTestClient):
data = [event.as_dict() for event in test_events]
_, response = rasa_app.put(
"/conversations/pushtracker/tracker/events",
json=data,
headers={"Content-Type": "application/json"},
)
content = response.json
assert response.status == 200
assert len(content["events"]) == len(test_events)
assert content["sender_id"] == "pushtracker"
_, tracker_response = rasa_app.get("/conversations/pushtracker/tracker")
tracker = tracker_response.json
assert tracker is not None
evts = tracker.get("events")
assert events.deserialise_events(evts) == test_events
def test_sorted_predict(rasa_app: SanicTestClient):
_create_tracker_for_sender(rasa_app, "sortedpredict")
_, response = rasa_app.post("/conversations/sortedpredict/predict")
scores = response.json["scores"]
sorted_scores = sorted(scores, key=lambda k: (-k["score"], k["action"]))
assert scores == sorted_scores
def _create_tracker_for_sender(app: SanicTestClient, sender_id: Text) -> None:
data = [event.as_dict() for event in test_events[:3]]
_, response = app.put(
f"/conversations/{sender_id}/tracker/events",
json=data,
headers={"Content-Type": "application/json"},
)
assert response.status == 200
def test_get_tracker_with_jwt(rasa_secured_app):
# token generated with secret "core" and algorithm HS256
# on https://jwt.io/
# {"user": {"username": "testadmin", "role": "admin"}}
jwt_header = {
"Authorization": "Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9."
"eyJ1c2VyIjp7InVzZXJuYW1lIjoidGVzdGFkbWluIiwic"
"m9sZSI6ImFkbWluIn19.NAQr0kbtSrY7d28XTqRzawq2u"
"QRre7IWTuIDrCn5AIw"
}
_, response = rasa_secured_app.get(
"/conversations/testadmin/tracker", headers=jwt_header
)
assert response.status == 200
_, response = rasa_secured_app.get(
"/conversations/testuser/tracker", headers=jwt_header
)
assert response.status == 200
# {"user": {"username": "testuser", "role": "user"}}
jwt_header = {
"Authorization": "Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9."
"eyJ1c2VyIjp7InVzZXJuYW1lIjoidGVzdHVzZXIiLCJyb"
"2xlIjoidXNlciJ9fQ.JnMTLYd56qut2w9h7hRQlDm1n3l"
"HJHOxxC_w7TtwCrs"
}
_, response = rasa_secured_app.get(
"/conversations/testadmin/tracker", headers=jwt_header
)
assert response.status == 403
_, response = rasa_secured_app.get(
"/conversations/testuser/tracker", headers=jwt_header
)
assert response.status == 200
def test_list_routes(default_agent: Agent):
from rasa import server
app = server.create_app(default_agent, auth_token=None)
routes = utils.list_routes(app)
assert set(routes.keys()) == {
"hello",
"version",
"status",
"retrieve_tracker",
"append_events",
"replace_events",
"retrieve_story",
"execute_action",
"trigger_intent",
"predict",
"add_message",
"train",
"evaluate_stories",
"evaluate_intents",
"tracker_predict",
"parse",
"load_model",
"unload_model",
"get_domain",
}
def test_unload_model_error(rasa_app: SanicTestClient):
_, response = rasa_app.get("/status")
assert response.status == 200
assert "model_file" in response.json and response.json["model_file"] is not None
_, response = rasa_app.delete("/model")
assert response.status == 204
def test_get_domain(rasa_app: SanicTestClient):
_, response = rasa_app.get("/domain", headers={"accept": "application/json"})
content = response.json
assert response.status == 200
assert "config" in content
assert "intents" in content
assert "entities" in content
assert "slots" in content
assert "responses" in content
assert "actions" in content
def test_get_domain_invalid_accept_header(rasa_app: SanicTestClient):
_, response = rasa_app.get("/domain")
assert response.status == 406
def test_load_model(rasa_app: SanicTestClient, trained_core_model):
_, response = rasa_app.get("/status")
assert response.status == 200
assert "fingerprint" in response.json
old_fingerprint = response.json["fingerprint"]
data = {"model_file": trained_core_model}
_, response = rasa_app.put("/model", json=data)
assert response.status == 204
_, response = rasa_app.get("/status")
assert response.status == 200
assert "fingerprint" in response.json
assert old_fingerprint != response.json["fingerprint"]
def test_load_model_from_model_server(rasa_app: SanicTestClient, trained_core_model):
_, response = rasa_app.get("/status")
assert response.status == 200
assert "fingerprint" in response.json
old_fingerprint = response.json["fingerprint"]
endpoint = EndpointConfig("https://example.com/model/trained_core_model")
with open(trained_core_model, "rb") as f:
with aioresponses(passthrough=["http://127.0.0.1"]) as mocked:
headers = {}
fs = os.fstat(f.fileno())
headers["Content-Length"] = str(fs[6])
mocked.get(
"https://example.com/model/trained_core_model",
content_type="application/x-tar",
body=f.read(),
)
data = {"model_server": {"url": endpoint.url}}
_, response = rasa_app.put("/model", json=data)
assert response.status == 204
_, response = rasa_app.get("/status")
assert response.status == 200
assert "fingerprint" in response.json
assert old_fingerprint != response.json["fingerprint"]
import rasa.core.jobs
rasa.core.jobs.__scheduler = None
def test_load_model_invalid_request_body(rasa_app: SanicTestClient):
_, response = rasa_app.put("/model")
assert response.status == 400
def test_load_model_invalid_configuration(rasa_app: SanicTestClient):
data = {"model_file": "some-random-path"}
_, response = rasa_app.put("/model", json=data)
assert response.status == 400
def test_execute(rasa_app: SanicTestClient):
_create_tracker_for_sender(rasa_app, "test_execute")
data = {"name": "utter_greet"}
_, response = rasa_app.post("/conversations/test_execute/execute", json=data)
assert response.status == 200
parsed_content = response.json
assert parsed_content["tracker"]
assert parsed_content["messages"]
def test_execute_with_missing_action_name(rasa_app: SanicTestClient):
test_sender = "test_execute_with_missing_action_name"
_create_tracker_for_sender(rasa_app, test_sender)
data = {"wrong-key": "utter_greet"}
_, response = rasa_app.post(f"/conversations/{test_sender}/execute", json=data)
assert response.status == 400
def test_execute_with_not_existing_action(rasa_app: SanicTestClient):
test_sender = "test_execute_with_not_existing_action"
_create_tracker_for_sender(rasa_app, test_sender)
data = {"name": "ka[pa[opi[opj[oj[oija"}
_, response = rasa_app.post(f"/conversations/{test_sender}/execute", json=data)
assert response.status == 500
def test_trigger_intent(rasa_app: SanicTestClient):
data = {"name": "greet"}
_, response = rasa_app.post("/conversations/test_trigger/trigger_intent", json=data)
assert response.status == 200
parsed_content = response.json
assert parsed_content["tracker"]
assert parsed_content["messages"]
def test_trigger_intent_with_missing_intent_name(rasa_app: SanicTestClient):
test_sender = "test_trigger_intent_with_missing_action_name"
data = {"wrong-key": "greet"}
_, response = rasa_app.post(
f"/conversations/{test_sender}/trigger_intent", json=data
)
assert response.status == 400
def test_trigger_intent_with_not_existing_intent(rasa_app: SanicTestClient):
test_sender = "test_trigger_intent_with_not_existing_intent"
_create_tracker_for_sender(rasa_app, test_sender)
data = {"name": "ka[pa[opi[opj[oj[oija"}
_, response = rasa_app.post(
f"/conversations/{test_sender}/trigger_intent", json=data
)
assert response.status == 404
@pytest.mark.parametrize(
"input_channels, output_channel_to_use, expected_channel",
[
(None, "slack", CollectingOutputChannel),
([], None, CollectingOutputChannel),
([RestInput()], "slack", CollectingOutputChannel),
([RestInput()], "rest", CollectingOutputChannel),
([RestInput(), SlackInput("test")], "slack", SlackBot),
],
)
def test_get_output_channel(
input_channels: List[Text], output_channel_to_use, expected_channel: Type
):
request = MagicMock()
app = MagicMock()
app.input_channels = input_channels
request.app = app
request.args = {"output_channel": output_channel_to_use}
actual = rasa.server._get_output_channel(request, None)
assert isinstance(actual, expected_channel)
@pytest.mark.parametrize(
"input_channels, expected_channel",
[
([], CollectingOutputChannel),
([RestInput()], CollectingOutputChannel),
([RestInput(), SlackInput("test")], SlackBot),
],
)
def test_get_latest_output_channel(input_channels: List[Text], expected_channel: Type):
request = MagicMock()
app = MagicMock()
app.input_channels = input_channels
request.app = app
request.args = {"output_channel": "latest"}
tracker = DialogueStateTracker.from_events(
"default", [UserUttered("text", input_channel="slack")]
)
actual = rasa.server._get_output_channel(request, tracker)
assert isinstance(actual, expected_channel)
def test_app_when_app_has_no_input_channels():
request = MagicMock()
class NoInputChannels:
pass
request.app = NoInputChannels()
actual = rasa.server._get_output_channel(
request, DialogueStateTracker.from_events("default", [])
)
assert isinstance(actual, CollectingOutputChannel)
| 30.764259 | 88 | 0.66095 |
919663882be8fb99d61d9c8370e48e948cccaaf5 | 1,574 | py | Python | prepare_input.py | Dani-97/tbcnn | 1e19e20099188dea42c97f09de39a0fadbcba92f | [
"MIT"
] | 30 | 2019-01-26T09:19:17.000Z | 2022-01-27T06:52:56.000Z | prepare_input.py | Dani-97/tbcnn | 1e19e20099188dea42c97f09de39a0fadbcba92f | [
"MIT"
] | 9 | 2019-07-26T07:00:57.000Z | 2021-05-11T10:08:40.000Z | prepare_input.py | Dani-97/tbcnn | 1e19e20099188dea42c97f09de39a0fadbcba92f | [
"MIT"
] | 17 | 2019-08-20T09:46:00.000Z | 2022-02-16T19:44:26.000Z | import os
import numpy as np
import imageio
def prepare(inDir, outFile):
"""Prepare input: convert to float with unit variance and zero mean,
extract labels and pack everything into a big numpy array to be used for training
outFile => path without extension (more than one file will be created)
"""
if os.path.exists(outFile + '.npy'):
return print("Input was already prepared")
files = sorted(os.listdir(inDir))
num = len(files)
name_list = []
label_list = []
image_list = []
for f in files:
in_path = os.path.join(inDir, f)
filename = os.path.splitext(f)[0]
pieces = filename.split('_')
name = pieces[1]
label = int(pieces[2]) # 1 tbc, 0 nothing
img = imageio.imread(in_path)
# Convert to float
img_float = img.astype(np.float32)
label_list.append(label)
name_list.append(name)
image_list.append(img_float)
# Now we have all images in an array
# First convert it to a single ndarray instead of a list
images = np.stack(image_list)
labels = np.array(label_list, dtype=np.int32)
# Input normalization
# Remove mean
images -= np.mean(images)
# Divide by standard deviation
images /= np.std(images)
# Add dummy channel layer
images = images.reshape((images.shape[0], images.shape[1], images.shape[2], 1))
# Write data
np.save(outFile + '.npy', images)
np.save(outFile + '_labels.npy', labels)
np.save(outFile + '_patients.npy', name_list)
| 28.107143 | 85 | 0.6277 |
8475877f9b90584b9383a9666d48bff7e0425e88 | 24,079 | py | Python | pandas/core/reshape/concat.py | RakhithJK/pandas | 0eeda645212c240d6cbdef8e3ba4834c3763553b | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 28,899 | 2016-10-13T03:32:12.000Z | 2022-03-31T21:39:05.000Z | pandas/core/reshape/concat.py | RakhithJK/pandas | 0eeda645212c240d6cbdef8e3ba4834c3763553b | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 31,004 | 2016-10-12T23:22:27.000Z | 2022-03-31T23:17:38.000Z | pandas/core/reshape/concat.py | RakhithJK/pandas | 0eeda645212c240d6cbdef8e3ba4834c3763553b | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 15,149 | 2016-10-13T03:21:31.000Z | 2022-03-31T18:46:47.000Z | """
Concat routines.
"""
from __future__ import annotations
from collections import abc
from typing import (
TYPE_CHECKING,
Hashable,
Iterable,
Literal,
Mapping,
cast,
overload,
)
import numpy as np
from pandas._typing import Axis
from pandas.util._decorators import (
cache_readonly,
deprecate_nonkeyword_arguments,
)
from pandas.core.dtypes.concat import concat_compat
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCSeries,
)
from pandas.core.dtypes.missing import isna
from pandas.core.arrays.categorical import (
factorize_from_iterable,
factorize_from_iterables,
)
import pandas.core.common as com
from pandas.core.indexes.api import (
Index,
MultiIndex,
all_indexes_same,
default_index,
ensure_index,
get_objs_combined_axis,
get_unanimous_names,
)
from pandas.core.internals import concatenate_managers
if TYPE_CHECKING:
from pandas import (
DataFrame,
Series,
)
from pandas.core.generic import NDFrame
# ---------------------------------------------------------------------
# Concatenate DataFrame objects
@overload
def concat(
objs: Iterable[DataFrame] | Mapping[Hashable, DataFrame],
axis: Literal[0, "index"] = ...,
join: str = ...,
ignore_index: bool = ...,
keys=...,
levels=...,
names=...,
verify_integrity: bool = ...,
sort: bool = ...,
copy: bool = ...,
) -> DataFrame:
...
@overload
def concat(
objs: Iterable[Series] | Mapping[Hashable, Series],
axis: Literal[0, "index"] = ...,
join: str = ...,
ignore_index: bool = ...,
keys=...,
levels=...,
names=...,
verify_integrity: bool = ...,
sort: bool = ...,
copy: bool = ...,
) -> Series:
...
@overload
def concat(
objs: Iterable[NDFrame] | Mapping[Hashable, NDFrame],
axis: Literal[0, "index"] = ...,
join: str = ...,
ignore_index: bool = ...,
keys=...,
levels=...,
names=...,
verify_integrity: bool = ...,
sort: bool = ...,
copy: bool = ...,
) -> DataFrame | Series:
...
@overload
def concat(
objs: Iterable[NDFrame] | Mapping[Hashable, NDFrame],
axis: Literal[1, "columns"],
join: str = ...,
ignore_index: bool = ...,
keys=...,
levels=...,
names=...,
verify_integrity: bool = ...,
sort: bool = ...,
copy: bool = ...,
) -> DataFrame:
...
@overload
def concat(
objs: Iterable[NDFrame] | Mapping[Hashable, NDFrame],
axis: Axis = ...,
join: str = ...,
ignore_index: bool = ...,
keys=None,
levels=None,
names=None,
verify_integrity: bool = False,
sort: bool = False,
copy: bool = True,
) -> DataFrame | Series:
...
@deprecate_nonkeyword_arguments(version=None, allowed_args=["objs"])
def concat(
objs: Iterable[NDFrame] | Mapping[Hashable, NDFrame],
axis: Axis = 0,
join: str = "outer",
ignore_index: bool = False,
keys=None,
levels=None,
names=None,
verify_integrity: bool = False,
sort: bool = False,
copy: bool = True,
) -> DataFrame | Series:
"""
Concatenate pandas objects along a particular axis with optional set logic
along the other axes.
Can also add a layer of hierarchical indexing on the concatenation axis,
which may be useful if the labels are the same (or overlapping) on
the passed axis number.
Parameters
----------
objs : a sequence or mapping of Series or DataFrame objects
If a mapping is passed, the sorted keys will be used as the `keys`
argument, unless it is passed, in which case the values will be
selected (see below). Any None objects will be dropped silently unless
they are all None in which case a ValueError will be raised.
axis : {0/'index', 1/'columns'}, default 0
The axis to concatenate along.
join : {'inner', 'outer'}, default 'outer'
How to handle indexes on other axis (or axes).
ignore_index : bool, default False
If True, do not use the index values along the concatenation axis. The
resulting axis will be labeled 0, ..., n - 1. This is useful if you are
concatenating objects where the concatenation axis does not have
meaningful indexing information. Note the index values on the other
axes are still respected in the join.
keys : sequence, default None
If multiple levels passed, should contain tuples. Construct
hierarchical index using the passed keys as the outermost level.
levels : list of sequences, default None
Specific levels (unique values) to use for constructing a
MultiIndex. Otherwise they will be inferred from the keys.
names : list, default None
Names for the levels in the resulting hierarchical index.
verify_integrity : bool, default False
Check whether the new concatenated axis contains duplicates. This can
be very expensive relative to the actual data concatenation.
sort : bool, default False
Sort non-concatenation axis if it is not already aligned when `join`
is 'outer'.
This has no effect when ``join='inner'``, which already preserves
the order of the non-concatenation axis.
.. versionchanged:: 1.0.0
Changed to not sort by default.
copy : bool, default True
If False, do not copy data unnecessarily.
Returns
-------
object, type of objs
When concatenating all ``Series`` along the index (axis=0), a
``Series`` is returned. When ``objs`` contains at least one
``DataFrame``, a ``DataFrame`` is returned. When concatenating along
the columns (axis=1), a ``DataFrame`` is returned.
See Also
--------
Series.append : Concatenate Series.
DataFrame.append : Concatenate DataFrames.
DataFrame.join : Join DataFrames using indexes.
DataFrame.merge : Merge DataFrames by indexes or columns.
Notes
-----
The keys, levels, and names arguments are all optional.
A walkthrough of how this method fits in with other tools for combining
pandas objects can be found `here
<https://pandas.pydata.org/pandas-docs/stable/user_guide/merging.html>`__.
Examples
--------
Combine two ``Series``.
>>> s1 = pd.Series(['a', 'b'])
>>> s2 = pd.Series(['c', 'd'])
>>> pd.concat([s1, s2])
0 a
1 b
0 c
1 d
dtype: object
Clear the existing index and reset it in the result
by setting the ``ignore_index`` option to ``True``.
>>> pd.concat([s1, s2], ignore_index=True)
0 a
1 b
2 c
3 d
dtype: object
Add a hierarchical index at the outermost level of
the data with the ``keys`` option.
>>> pd.concat([s1, s2], keys=['s1', 's2'])
s1 0 a
1 b
s2 0 c
1 d
dtype: object
Label the index keys you create with the ``names`` option.
>>> pd.concat([s1, s2], keys=['s1', 's2'],
... names=['Series name', 'Row ID'])
Series name Row ID
s1 0 a
1 b
s2 0 c
1 d
dtype: object
Combine two ``DataFrame`` objects with identical columns.
>>> df1 = pd.DataFrame([['a', 1], ['b', 2]],
... columns=['letter', 'number'])
>>> df1
letter number
0 a 1
1 b 2
>>> df2 = pd.DataFrame([['c', 3], ['d', 4]],
... columns=['letter', 'number'])
>>> df2
letter number
0 c 3
1 d 4
>>> pd.concat([df1, df2])
letter number
0 a 1
1 b 2
0 c 3
1 d 4
Combine ``DataFrame`` objects with overlapping columns
and return everything. Columns outside the intersection will
be filled with ``NaN`` values.
>>> df3 = pd.DataFrame([['c', 3, 'cat'], ['d', 4, 'dog']],
... columns=['letter', 'number', 'animal'])
>>> df3
letter number animal
0 c 3 cat
1 d 4 dog
>>> pd.concat([df1, df3], sort=False)
letter number animal
0 a 1 NaN
1 b 2 NaN
0 c 3 cat
1 d 4 dog
Combine ``DataFrame`` objects with overlapping columns
and return only those that are shared by passing ``inner`` to
the ``join`` keyword argument.
>>> pd.concat([df1, df3], join="inner")
letter number
0 a 1
1 b 2
0 c 3
1 d 4
Combine ``DataFrame`` objects horizontally along the x axis by
passing in ``axis=1``.
>>> df4 = pd.DataFrame([['bird', 'polly'], ['monkey', 'george']],
... columns=['animal', 'name'])
>>> pd.concat([df1, df4], axis=1)
letter number animal name
0 a 1 bird polly
1 b 2 monkey george
Prevent the result from including duplicate index values with the
``verify_integrity`` option.
>>> df5 = pd.DataFrame([1], index=['a'])
>>> df5
0
a 1
>>> df6 = pd.DataFrame([2], index=['a'])
>>> df6
0
a 2
>>> pd.concat([df5, df6], verify_integrity=True)
Traceback (most recent call last):
...
ValueError: Indexes have overlapping values: ['a']
"""
op = _Concatenator(
objs,
axis=axis,
ignore_index=ignore_index,
join=join,
keys=keys,
levels=levels,
names=names,
verify_integrity=verify_integrity,
copy=copy,
sort=sort,
)
return op.get_result()
class _Concatenator:
"""
Orchestrates a concatenation operation for BlockManagers
"""
def __init__(
self,
objs: Iterable[NDFrame] | Mapping[Hashable, NDFrame],
axis=0,
join: str = "outer",
keys=None,
levels=None,
names=None,
ignore_index: bool = False,
verify_integrity: bool = False,
copy: bool = True,
sort=False,
):
if isinstance(objs, (ABCSeries, ABCDataFrame, str)):
raise TypeError(
"first argument must be an iterable of pandas "
f'objects, you passed an object of type "{type(objs).__name__}"'
)
if join == "outer":
self.intersect = False
elif join == "inner":
self.intersect = True
else: # pragma: no cover
raise ValueError(
"Only can inner (intersect) or outer (union) join the other axis"
)
if isinstance(objs, abc.Mapping):
if keys is None:
keys = list(objs.keys())
objs = [objs[k] for k in keys]
else:
objs = list(objs)
if len(objs) == 0:
raise ValueError("No objects to concatenate")
if keys is None:
objs = list(com.not_none(*objs))
else:
# #1649
clean_keys = []
clean_objs = []
for k, v in zip(keys, objs):
if v is None:
continue
clean_keys.append(k)
clean_objs.append(v)
objs = clean_objs
if isinstance(keys, MultiIndex):
# TODO: retain levels?
keys = type(keys).from_tuples(clean_keys, names=keys.names)
else:
name = getattr(keys, "name", None)
keys = Index(clean_keys, name=name)
if len(objs) == 0:
raise ValueError("All objects passed were None")
# figure out what our result ndim is going to be
ndims = set()
for obj in objs:
if not isinstance(obj, (ABCSeries, ABCDataFrame)):
msg = (
f"cannot concatenate object of type '{type(obj)}'; "
"only Series and DataFrame objs are valid"
)
raise TypeError(msg)
ndims.add(obj.ndim)
# get the sample
# want the highest ndim that we have, and must be non-empty
# unless all objs are empty
sample: NDFrame | None = None
if len(ndims) > 1:
max_ndim = max(ndims)
for obj in objs:
if obj.ndim == max_ndim and np.sum(obj.shape):
sample = obj
break
else:
# filter out the empties if we have not multi-index possibilities
# note to keep empty Series as it affect to result columns / name
non_empties = [
obj for obj in objs if sum(obj.shape) > 0 or isinstance(obj, ABCSeries)
]
if len(non_empties) and (
keys is None and names is None and levels is None and not self.intersect
):
objs = non_empties
sample = objs[0]
if sample is None:
sample = objs[0]
self.objs = objs
# Standardize axis parameter to int
if isinstance(sample, ABCSeries):
axis = sample._constructor_expanddim._get_axis_number(axis)
else:
axis = sample._get_axis_number(axis)
# Need to flip BlockManager axis in the DataFrame special case
self._is_frame = isinstance(sample, ABCDataFrame)
if self._is_frame:
axis = sample._get_block_manager_axis(axis)
self._is_series = isinstance(sample, ABCSeries)
if not 0 <= axis <= sample.ndim:
raise AssertionError(
f"axis must be between 0 and {sample.ndim}, input was {axis}"
)
# if we have mixed ndims, then convert to highest ndim
# creating column numbers as needed
if len(ndims) > 1:
current_column = 0
max_ndim = sample.ndim
self.objs, objs = [], self.objs
for obj in objs:
ndim = obj.ndim
if ndim == max_ndim:
pass
elif ndim != max_ndim - 1:
raise ValueError(
"cannot concatenate unaligned mixed "
"dimensional NDFrame objects"
)
else:
name = getattr(obj, "name", None)
if ignore_index or name is None:
name = current_column
current_column += 1
# doing a row-wise concatenation so need everything
# to line up
if self._is_frame and axis == 1:
name = 0
# mypy needs to know sample is not an NDFrame
sample = cast("DataFrame | Series", sample)
obj = sample._constructor({name: obj})
self.objs.append(obj)
# note: this is the BlockManager axis (since DataFrame is transposed)
self.bm_axis = axis
self.axis = 1 - self.bm_axis if self._is_frame else 0
self.keys = keys
self.names = names or getattr(keys, "names", None)
self.levels = levels
self.sort = sort
self.ignore_index = ignore_index
self.verify_integrity = verify_integrity
self.copy = copy
self.new_axes = self._get_new_axes()
def get_result(self):
cons: type[DataFrame | Series]
sample: DataFrame | Series
# series only
if self._is_series:
sample = cast("Series", self.objs[0])
# stack blocks
if self.bm_axis == 0:
name = com.consensus_name_attr(self.objs)
cons = sample._constructor
arrs = [ser._values for ser in self.objs]
res = concat_compat(arrs, axis=0)
result = cons(res, index=self.new_axes[0], name=name, dtype=res.dtype)
return result.__finalize__(self, method="concat")
# combine as columns in a frame
else:
data = dict(zip(range(len(self.objs)), self.objs))
# GH28330 Preserves subclassed objects through concat
cons = sample._constructor_expanddim
index, columns = self.new_axes
df = cons(data, index=index, copy=self.copy)
df.columns = columns
return df.__finalize__(self, method="concat")
# combine block managers
else:
sample = cast("DataFrame", self.objs[0])
mgrs_indexers = []
for obj in self.objs:
indexers = {}
for ax, new_labels in enumerate(self.new_axes):
# ::-1 to convert BlockManager ax to DataFrame ax
if ax == self.bm_axis:
# Suppress reindexing on concat axis
continue
# 1-ax to convert BlockManager axis to DataFrame axis
obj_labels = obj.axes[1 - ax]
if not new_labels.equals(obj_labels):
indexers[ax] = obj_labels.get_indexer(new_labels)
mgrs_indexers.append((obj._mgr, indexers))
new_data = concatenate_managers(
mgrs_indexers, self.new_axes, concat_axis=self.bm_axis, copy=self.copy
)
if not self.copy:
new_data._consolidate_inplace()
cons = sample._constructor
return cons(new_data).__finalize__(self, method="concat")
def _get_result_dim(self) -> int:
if self._is_series and self.bm_axis == 1:
return 2
else:
return self.objs[0].ndim
def _get_new_axes(self) -> list[Index]:
ndim = self._get_result_dim()
return [
self._get_concat_axis if i == self.bm_axis else self._get_comb_axis(i)
for i in range(ndim)
]
def _get_comb_axis(self, i: int) -> Index:
data_axis = self.objs[0]._get_block_manager_axis(i)
return get_objs_combined_axis(
self.objs,
axis=data_axis,
intersect=self.intersect,
sort=self.sort,
copy=self.copy,
)
@cache_readonly
def _get_concat_axis(self) -> Index:
"""
Return index to be used along concatenation axis.
"""
if self._is_series:
if self.bm_axis == 0:
indexes = [x.index for x in self.objs]
elif self.ignore_index:
idx = default_index(len(self.objs))
return idx
elif self.keys is None:
names: list[Hashable] = [None] * len(self.objs)
num = 0
has_names = False
for i, x in enumerate(self.objs):
if not isinstance(x, ABCSeries):
raise TypeError(
f"Cannot concatenate type 'Series' with "
f"object of type '{type(x).__name__}'"
)
if x.name is not None:
names[i] = x.name
has_names = True
else:
names[i] = num
num += 1
if has_names:
return Index(names)
else:
return default_index(len(self.objs))
else:
return ensure_index(self.keys).set_names(self.names)
else:
indexes = [x.axes[self.axis] for x in self.objs]
if self.ignore_index:
idx = default_index(sum(len(i) for i in indexes))
return idx
if self.keys is None:
concat_axis = _concat_indexes(indexes)
else:
concat_axis = _make_concat_multiindex(
indexes, self.keys, self.levels, self.names
)
self._maybe_check_integrity(concat_axis)
return concat_axis
def _maybe_check_integrity(self, concat_index: Index):
if self.verify_integrity:
if not concat_index.is_unique:
overlap = concat_index[concat_index.duplicated()].unique()
raise ValueError(f"Indexes have overlapping values: {overlap}")
def _concat_indexes(indexes) -> Index:
return indexes[0].append(indexes[1:])
def _make_concat_multiindex(indexes, keys, levels=None, names=None) -> MultiIndex:
if (levels is None and isinstance(keys[0], tuple)) or (
levels is not None and len(levels) > 1
):
zipped = list(zip(*keys))
if names is None:
names = [None] * len(zipped)
if levels is None:
_, levels = factorize_from_iterables(zipped)
else:
levels = [ensure_index(x) for x in levels]
else:
zipped = [keys]
if names is None:
names = [None]
if levels is None:
levels = [ensure_index(keys)]
else:
levels = [ensure_index(x) for x in levels]
if not all_indexes_same(indexes) or not all(level.is_unique for level in levels):
codes_list = []
# things are potentially different sizes, so compute the exact codes
# for each level and pass those to MultiIndex.from_arrays
for hlevel, level in zip(zipped, levels):
to_concat = []
for key, index in zip(hlevel, indexes):
# Find matching codes, include matching nan values as equal.
mask = (isna(level) & isna(key)) | (level == key)
if not mask.any():
raise ValueError(f"Key {key} not in level {level}")
i = np.nonzero(mask)[0][0]
to_concat.append(np.repeat(i, len(index)))
codes_list.append(np.concatenate(to_concat))
concat_index = _concat_indexes(indexes)
# these go at the end
if isinstance(concat_index, MultiIndex):
levels.extend(concat_index.levels)
codes_list.extend(concat_index.codes)
else:
codes, categories = factorize_from_iterable(concat_index)
levels.append(categories)
codes_list.append(codes)
if len(names) == len(levels):
names = list(names)
else:
# make sure that all of the passed indices have the same nlevels
if not len({idx.nlevels for idx in indexes}) == 1:
raise AssertionError(
"Cannot concat indices that do not have the same number of levels"
)
# also copies
names = list(names) + list(get_unanimous_names(*indexes))
return MultiIndex(
levels=levels, codes=codes_list, names=names, verify_integrity=False
)
new_index = indexes[0]
n = len(new_index)
kpieces = len(indexes)
# also copies
new_names = list(names)
new_levels = list(levels)
# construct codes
new_codes = []
# do something a bit more speedy
for hlevel, level in zip(zipped, levels):
hlevel = ensure_index(hlevel)
mapped = level.get_indexer(hlevel)
mask = mapped == -1
if mask.any():
raise ValueError(f"Values not found in passed level: {hlevel[mask]!s}")
new_codes.append(np.repeat(mapped, n))
if isinstance(new_index, MultiIndex):
new_levels.extend(new_index.levels)
new_codes.extend([np.tile(lab, kpieces) for lab in new_index.codes])
else:
new_levels.append(new_index.unique())
single_codes = new_index.unique().get_indexer(new_index)
new_codes.append(np.tile(single_codes, kpieces))
if len(new_names) < len(new_levels):
new_names.extend(new_index.names)
return MultiIndex(
levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False
)
| 30.870513 | 88 | 0.551061 |
c30a10b7d74670d8f6b406b6a60afc1a6a993d5e | 3,088 | py | Python | metric/c_f1.py | lizhaoliu-Lec/Revisiting_Deep_Metric_Learning_PyTorch | 27f2a914d818fef56f33767e5287a3dc3f2091a4 | [
"MIT"
] | null | null | null | metric/c_f1.py | lizhaoliu-Lec/Revisiting_Deep_Metric_Learning_PyTorch | 27f2a914d818fef56f33767e5287a3dc3f2091a4 | [
"MIT"
] | null | null | null | metric/c_f1.py | lizhaoliu-Lec/Revisiting_Deep_Metric_Learning_PyTorch | 27f2a914d818fef56f33767e5287a3dc3f2091a4 | [
"MIT"
] | null | null | null | import numpy as np
import torch
from scipy.special import comb
class Metric:
def __init__(self, **kwargs):
self.requires = ['kmeans_cosine', 'kmeans_nearest_cosine', 'features_cosine', 'target_labels']
self.name = 'c_f1'
def __call__(self, target_labels, computed_cluster_labels_cosine, features_cosine, centroids_cosine):
if isinstance(features_cosine, torch.Tensor):
features_cosine = features_cosine.detach().cpu().numpy()
d = np.zeros(len(features_cosine))
for i in range(len(features_cosine)):
d[i] = np.linalg.norm(features_cosine[i, :] - centroids_cosine[computed_cluster_labels_cosine[i], :])
labels_pred = np.zeros(len(features_cosine))
for i in np.unique(computed_cluster_labels_cosine):
index = np.where(computed_cluster_labels_cosine == i)[0]
ind = np.argmin(d[index])
cid = index[ind]
labels_pred[index] = cid
N = len(target_labels)
# cluster n_labels
avail_labels = np.unique(target_labels)
n_labels = len(avail_labels)
# count the number of objects in each cluster
count_cluster = np.zeros(n_labels)
for i in range(n_labels):
count_cluster[i] = len(np.where(target_labels == avail_labels[i])[0])
# build a mapping from item_id to item index
keys = np.unique(labels_pred)
num_item = len(keys)
values = range(num_item)
item_map = dict()
for i in range(len(keys)):
item_map.update([(keys[i], values[i])])
# count the number of objects of each item
count_item = np.zeros(num_item)
for i in range(N):
index = item_map[labels_pred[i]]
count_item[index] = count_item[index] + 1
# compute True Positive (TP) plus False Positive (FP)
# tp_fp = 0
tp_fp = comb(count_cluster, 2).sum()
# for k in range(n_labels):
# if count_cluster[k] > 1:
# tp_fp = tp_fp + comb(count_cluster[k], 2)
# compute True Positive (TP)
tp = 0
for k in range(n_labels):
member = np.where(target_labels == avail_labels[k])[0]
member_ids = labels_pred[member]
count = np.zeros(num_item)
for j in range(len(member)):
index = item_map[member_ids[j]]
count[index] = count[index] + 1
# for i in range(num_item):
# if count[i] > 1:
# tp = tp + comb(count[i], 2)
tp += comb(count, 2).sum()
# False Positive (FP)
fp = tp_fp - tp
# Compute False Negative (FN)
count = comb(count_item, 2).sum()
# count = 0
# for j in range(num_item):
# if count_item[j] > 1:
# count = count + comb(count_item[j], 2)
fn = count - tp
# compute F measure
P = tp / (tp + fp)
R = tp / (tp + fn)
beta = 1
F = (beta * beta + 1) * P * R / (beta * beta * P + R)
return F
| 35.494253 | 113 | 0.562176 |
a3c3cff744ab2c0dfc676a6454ce7229b3cd0187 | 6,072 | py | Python | vispy/color/color_space.py | izaid/vispy | 402cf95bfef88d70c9c45bb27c532ed72944e14a | [
"BSD-3-Clause"
] | null | null | null | vispy/color/color_space.py | izaid/vispy | 402cf95bfef88d70c9c45bb27c532ed72944e14a | [
"BSD-3-Clause"
] | null | null | null | vispy/color/color_space.py | izaid/vispy | 402cf95bfef88d70c9c45bb27c532ed72944e14a | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2014, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
from __future__ import division # just to be safe...
import numpy as np
from ..ext.six import string_types
###############################################################################
# Utility functions
def _check_color_dim(val):
"""Ensure val is Nx(n_col), usually Nx3"""
val = np.atleast_2d(val)
if val.shape[1] not in (3, 4):
raise RuntimeError('Value must have second dimension of size 3 or 4')
return val, val.shape[1]
###############################################################################
# RGB<->HEX conversion
def _hex_to_rgba(hexs):
"""Convert hex to rgba, permitting alpha values in hex"""
hexs = np.atleast_1d(np.array(hexs, '|U9'))
out = np.ones((len(hexs), 4), np.float32)
for hi, h in enumerate(hexs):
assert isinstance(h, string_types)
off = 1 if h[0] == '#' else 0
assert len(h) in (6+off, 8+off)
e = (len(h)-off) // 2
out[hi, :e] = [int(h[i:i+2], 16) / 255.
for i in range(off, len(h), 2)]
return out
def _rgb_to_hex(rgbs):
"""Convert rgb to hex triplet"""
rgbs, n_dim = _check_color_dim(rgbs)
return np.array(['#%02x%02x%02x' % tuple((255*rgb[:3]).astype(np.uint8))
for rgb in rgbs], '|U7')
###############################################################################
# RGB<->HSV conversion
def _rgb_to_hsv(rgbs):
"""Convert Nx3 or Nx4 rgb to hsv"""
rgbs, n_dim = _check_color_dim(rgbs)
hsvs = list()
for rgb in rgbs:
rgb = rgb[:3] # don't use alpha here
idx = np.argmax(rgb)
val = rgb[idx]
c = val - np.min(rgb)
if c == 0:
hue = 0
sat = 0
else:
if idx == 0: # R == max
hue = ((rgb[1] - rgb[2]) / c) % 6
elif idx == 1: # G == max
hue = (rgb[2] - rgb[0]) / c + 2
else: # B == max
hue = (rgb[0] - rgb[1]) / c + 4
hue *= 60
sat = c / val
hsv = [hue, sat, val]
hsvs.append(hsv)
hsvs = np.array(hsvs, dtype=np.float32)
if n_dim == 4:
hsvs = np.concatenate((hsvs, rgbs[:, 3]), axis=1)
return hsvs
def _hsv_to_rgb(hsvs):
"""Convert Nx3 or Nx4 hsv to rgb"""
hsvs, n_dim = _check_color_dim(hsvs)
# In principle, we *might* be able to vectorize this, but might as well
# wait until a compelling use case appears
rgbs = list()
for hsv in hsvs:
c = hsv[1] * hsv[2]
m = hsv[2] - c
hp = hsv[0] / 60
x = c * (1 - abs(hp % 2 - 1))
if 0 <= hp < 1:
r, g, b = c, x, 0
elif hp < 2:
r, g, b = x, c, 0
elif hp < 3:
r, g, b = 0, c, x
elif hp < 4:
r, g, b = 0, x, c
elif hp < 5:
r, g, b = x, 0, c
else:
r, g, b = c, 0, x
rgb = [r + m, g + m, b + m]
rgbs.append(rgb)
rgbs = np.array(rgbs, dtype=np.float32)
if n_dim == 4:
rgbs = np.concatenate((rgbs, hsvs[:, 3]), axis=1)
return rgbs
###############################################################################
# RGB<->CIELab conversion
# These numbers are adapted from MIT-licensed MATLAB code for
# Lab<->RGB conversion. They provide an XYZ<->RGB conversion matrices,
# w/D65 white point normalization built in.
#_rgb2xyz = np.array([[0.412453, 0.357580, 0.180423],
# [0.212671, 0.715160, 0.072169],
# [0.019334, 0.119193, 0.950227]])
#_white_norm = np.array([0.950456, 1.0, 1.088754])
#_rgb2xyz /= _white_norm[:, np.newaxis]
#_rgb2xyz_norm = _rgb2xyz.T
_rgb2xyz_norm = np.array([[0.43395276, 0.212671, 0.01775791],
[0.37621941, 0.71516, 0.10947652],
[0.18982783, 0.072169, 0.87276557]])
#_xyz2rgb = np.array([[3.240479, -1.537150, -0.498535],
# [-0.969256, 1.875992, 0.041556],
# [0.055648, -0.204043, 1.057311]])
#_white_norm = np.array([0.950456, 1., 1.088754])
#_xyz2rgb *= _white_norm[np.newaxis, :]
_xyz2rgb_norm = np.array([[3.07993271, -1.53715, -0.54278198],
[-0.92123518, 1.875992, 0.04524426],
[0.05289098, -0.204043, 1.15115158]])
def _rgb_to_lab(rgbs):
rgbs, n_dim = _check_color_dim(rgbs)
# convert RGB->XYZ
xyz = rgbs[:, :3].copy() # a misnomer for now but will end up being XYZ
over = xyz > 0.04045
xyz[over] = ((xyz[over] + 0.055) / 1.055) ** 2.4
xyz[~over] /= 12.92
xyz = np.dot(xyz, _rgb2xyz_norm)
over = xyz > 0.008856
xyz[over] = xyz[over] ** (1. / 3.)
xyz[~over] = 7.787 * xyz[~over] + 0.13793103448275862
# Convert XYZ->LAB
L = (116. * xyz[:, 1]) - 16
a = 500 * (xyz[:, 0] - xyz[:, 1])
b = 200 * (xyz[:, 1] - xyz[:, 2])
labs = [L, a, b]
# Append alpha if necessary
if n_dim == 4:
labs.append(np.atleast1d(rgbs[:, 3]))
labs = np.array(labs, order='F').T # Becomes 'C' order b/c of .T
return labs
def _lab_to_rgb(labs):
"""Convert Nx3 or Nx4 lab to rgb"""
# adapted from BSD-licensed work in MATLAB by Mark Ruzon
# Based on ITU-R Recommendation BT.709 using the D65
labs, n_dim = _check_color_dim(labs)
# Convert Lab->XYZ (silly indexing used to preserve dimensionality)
y = (labs[:, 0] + 16.) / 116.
x = (labs[:, 1] / 500.) + y
z = y - (labs[:, 2] / 200.)
xyz = np.concatenate(([x], [y], [z])) # 3xN
over = xyz > 0.2068966
xyz[over] = xyz[over] ** 3.
xyz[~over] = (xyz[~over] - 0.13793103448275862) / 7.787
# Convert XYZ->LAB
rgbs = np.dot(_xyz2rgb_norm, xyz).T
over = rgbs > 0.0031308
rgbs[over] = 1.055 * (rgbs[over] ** (1. / 2.4)) - 0.055
rgbs[~over] *= 12.92
if n_dim == 4:
rgbs = np.concatenate((rgbs, labs[:, 3]), axis=1)
rgbs = np.clip(rgbs, 0., 1.)
return rgbs
| 33 | 79 | 0.501318 |
6e8b7cb2a73fe3a0cb897e9b993057bfad15c3d0 | 6,251 | py | Python | diskimage_builder/graph/digraph.py | stackhpc/diskimage-builder | 196b44f38a0188b47ee1846f8f009cc41d68ec6e | [
"Apache-2.0"
] | null | null | null | diskimage_builder/graph/digraph.py | stackhpc/diskimage-builder | 196b44f38a0188b47ee1846f8f009cc41d68ec6e | [
"Apache-2.0"
] | null | null | null | diskimage_builder/graph/digraph.py | stackhpc/diskimage-builder | 196b44f38a0188b47ee1846f8f009cc41d68ec6e | [
"Apache-2.0"
] | null | null | null | # Copyright 2016 Andreas Florath (andreas@florath.net)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
class Digraph(object):
"""Implements a directed graph.
Each node of the digraph must have a unique name.
"""
class Node(object):
"""Directed graph node.
This holds the incoming and outgoing edges as well as the
nodes' name.
"""
def __init__(self, name):
"""Initializes a node.
Incoming and outgoing are lists of nodes. Typically one
direction is provided and the other can be automatically
computed.
"""
self.__name = name
self.__incoming = set()
self.__outgoing = set()
def get_name(self):
"""Returns the name of the node."""
return self.__name
def add_incoming(self, node):
"""Add node to the incoming list."""
self.__incoming.add(node)
def add_outgoing(self, node):
"""Add node to the incoming list."""
self.__outgoing.add(node)
def get_iter_outgoing(self):
"""Return an iterator over the outgoing nodes."""
return iter(self.__outgoing)
@staticmethod
def __as_named_list(inlist):
"""Return given list as list of names."""
return map(lambda x: x.get_name(), inlist)
def get_outgoing_as_named_list(self):
"""Return the names of all outgoing nodes as a list."""
return self.__as_named_list(self.__outgoing)
def __init__(self):
"""Create a empty digraph."""
self._named_nodes = {}
def create_from_dict(self, init_dgraph, node_gen_func=Node):
"""Creates a new digraph based on the given information."""
# First run: create all nodes
for node_name in init_dgraph:
# Create the node and put it into the object list of all
# nodes and into the local dictionary of named nodes.
named_node = node_gen_func(node_name)
self.add_node(named_node)
# Second run: run through all nodes and create the edges.
for node_name, outs in init_dgraph.items():
node_from = self.find(node_name)
for onode in outs:
node_to = self.find(onode)
if node_to is None:
raise RuntimeError("Node '%s' is referenced "
"but not specified" % onode)
self.create_edge(node_from, node_to)
def add_node(self, anode):
"""Adds a new node to the graph.
Checks if the node with the same name already exists.
"""
assert issubclass(anode.__class__, Digraph.Node)
for node in self._named_nodes.values():
if node.get_name() == anode.get_name():
raise RuntimeError("Node with name [%s] already "
"exists" % node.get_name())
self._named_nodes[anode.get_name()] = anode
def create_edge(self, anode, bnode):
"""Creates an edge from a to b - both must be nodes."""
assert issubclass(anode.__class__, Digraph.Node)
assert issubclass(bnode.__class__, Digraph.Node)
assert anode.get_name() in self._named_nodes.keys()
assert anode == self._named_nodes[anode.get_name()]
assert bnode.get_name() in self._named_nodes.keys()
assert bnode == self._named_nodes[bnode.get_name()]
anode.add_outgoing(bnode)
bnode.add_incoming(anode)
def get_iter_nodes_values(self):
"""Returns the nodes dict to the values.
Note: it is not possible to change things with the help of the
result of this function.
"""
return iter(self._named_nodes.values())
def find(self, name):
"""Get the node with the given name.
Return None if not available.
"""
if name not in self._named_nodes:
return None
return self._named_nodes[name]
def as_dict(self):
"""Outputs this digraph and create a dictionary."""
# Start with an empty dictionary
rval = {}
for node in self._named_nodes.values():
rval[node.get_name()] = node.get_outgoing_as_named_list()
return rval
def topological_sort(dg):
"""Digraph topological search.
This algorithm is based upon a depth first search with
'making' some special nodes.
The result is the topological sorted list of nodes.
"""
# List of topological sorted nodes
tsort = []
# List of nodes already visited.
# (This is held here - local to the algorithm - to not modify the
# nodes themselves.)
visited = []
def visit(node):
"""Recursive deep first search function."""
if node not in visited:
visited.append(node)
for onode in node.get_iter_outgoing():
visit(onode)
tsort.insert(0, node)
# The 'main' function of the topological sort
for node in dg.get_iter_nodes_values():
visit(node)
return tsort
# Utility functions
def digraph_create_from_dict(init_dgraph, node_gen_func=Digraph.Node):
"""Creates a new digraph based on the given information."""
digraph = Digraph()
digraph.create_from_dict(init_dgraph, node_gen_func)
return digraph
def node_list_to_node_name_list(node_list):
"""Converts a node list into a list of the corresponding node names."""
node_name_list = []
for n in node_list:
node_name_list.append(n.get_name())
return node_name_list
| 32.05641 | 75 | 0.610142 |
0f5b4cb689a13d6cc12426ab64bc576cfd9716a2 | 70,498 | py | Python | testing/test_terminal.py | DahlitzFlorian/pytest | 4a1557fa0e04132eb847d4a5d01c77f0ab777c5a | [
"MIT"
] | 1 | 2020-10-03T21:28:12.000Z | 2020-10-03T21:28:12.000Z | testing/test_terminal.py | DahlitzFlorian/pytest | 4a1557fa0e04132eb847d4a5d01c77f0ab777c5a | [
"MIT"
] | null | null | null | testing/test_terminal.py | DahlitzFlorian/pytest | 4a1557fa0e04132eb847d4a5d01c77f0ab777c5a | [
"MIT"
] | 1 | 2020-06-24T12:44:33.000Z | 2020-06-24T12:44:33.000Z | """
terminal reporting of the full testing process.
"""
import collections
import os
import sys
import textwrap
from io import StringIO
from typing import Dict
from typing import List
from typing import Tuple
import pluggy
import py
import _pytest.config
import pytest
from _pytest.config import ExitCode
from _pytest.pytester import Testdir
from _pytest.reports import BaseReport
from _pytest.terminal import _folded_skips
from _pytest.terminal import _get_line_with_reprcrash_message
from _pytest.terminal import _plugin_nameversions
from _pytest.terminal import getreportopt
from _pytest.terminal import TerminalReporter
DistInfo = collections.namedtuple("DistInfo", ["project_name", "version"])
TRANS_FNMATCH = str.maketrans({"[": "[[]", "]": "[]]"})
class Option:
def __init__(self, verbosity=0):
self.verbosity = verbosity
@property
def args(self):
values = []
values.append("--verbosity=%d" % self.verbosity)
return values
@pytest.fixture(
params=[Option(verbosity=0), Option(verbosity=1), Option(verbosity=-1)],
ids=["default", "verbose", "quiet"],
)
def option(request):
return request.param
@pytest.mark.parametrize(
"input,expected",
[
([DistInfo(project_name="test", version=1)], ["test-1"]),
([DistInfo(project_name="pytest-test", version=1)], ["test-1"]),
(
[
DistInfo(project_name="test", version=1),
DistInfo(project_name="test", version=1),
],
["test-1"],
),
],
ids=["normal", "prefix-strip", "deduplicate"],
)
def test_plugin_nameversion(input, expected):
pluginlist = [(None, x) for x in input]
result = _plugin_nameversions(pluginlist)
assert result == expected
class TestTerminal:
def test_pass_skip_fail(self, testdir, option):
testdir.makepyfile(
"""
import pytest
def test_ok():
pass
def test_skip():
pytest.skip("xx")
def test_func():
assert 0
"""
)
result = testdir.runpytest(*option.args)
if option.verbosity > 0:
result.stdout.fnmatch_lines(
[
"*test_pass_skip_fail.py::test_ok PASS*",
"*test_pass_skip_fail.py::test_skip SKIP*",
"*test_pass_skip_fail.py::test_func FAIL*",
]
)
elif option.verbosity == 0:
result.stdout.fnmatch_lines(["*test_pass_skip_fail.py .sF*"])
else:
result.stdout.fnmatch_lines([".sF*"])
result.stdout.fnmatch_lines(
[" def test_func():", "> assert 0", "E assert 0"]
)
def test_internalerror(self, testdir, linecomp):
modcol = testdir.getmodulecol("def test_one(): pass")
rep = TerminalReporter(modcol.config, file=linecomp.stringio)
with pytest.raises(ValueError) as excinfo:
raise ValueError("hello")
rep.pytest_internalerror(excinfo.getrepr())
linecomp.assert_contains_lines(["INTERNALERROR> *ValueError*hello*"])
def test_writeline(self, testdir, linecomp):
modcol = testdir.getmodulecol("def test_one(): pass")
rep = TerminalReporter(modcol.config, file=linecomp.stringio)
rep.write_fspath_result(modcol.nodeid, ".")
rep.write_line("hello world")
lines = linecomp.stringio.getvalue().split("\n")
assert not lines[0]
assert lines[1].endswith(modcol.name + " .")
assert lines[2] == "hello world"
def test_show_runtest_logstart(self, testdir, linecomp):
item = testdir.getitem("def test_func(): pass")
tr = TerminalReporter(item.config, file=linecomp.stringio)
item.config.pluginmanager.register(tr)
location = item.reportinfo()
tr.config.hook.pytest_runtest_logstart(
nodeid=item.nodeid, location=location, fspath=str(item.fspath)
)
linecomp.assert_contains_lines(["*test_show_runtest_logstart.py*"])
def test_runtest_location_shown_before_test_starts(self, testdir):
testdir.makepyfile(
"""
def test_1():
import time
time.sleep(20)
"""
)
child = testdir.spawn_pytest("")
child.expect(".*test_runtest_location.*py")
child.sendeof()
child.kill(15)
def test_report_collect_after_half_a_second(self, testdir):
"""Test for "collecting" being updated after 0.5s"""
testdir.makepyfile(
**{
"test1.py": """
import _pytest.terminal
_pytest.terminal.REPORT_COLLECTING_RESOLUTION = 0
def test_1():
pass
""",
"test2.py": "def test_2(): pass",
}
)
# Explicitly test colored output.
testdir.monkeypatch.setenv("PY_COLORS", "1")
child = testdir.spawn_pytest("-v test1.py test2.py")
child.expect(r"collecting \.\.\.")
child.expect(r"collecting 1 item")
child.expect(r"collecting 2 items")
child.expect(r"collected 2 items")
rest = child.read().decode("utf8")
assert "= \x1b[32m\x1b[1m2 passed\x1b[0m\x1b[32m in" in rest
def test_itemreport_subclasses_show_subclassed_file(self, testdir):
testdir.makepyfile(
**{
"tests/test_p1": """
class BaseTests(object):
fail = False
def test_p1(self):
if self.fail: assert 0
""",
"tests/test_p2": """
from test_p1 import BaseTests
class TestMore(BaseTests): pass
""",
"tests/test_p3.py": """
from test_p1 import BaseTests
BaseTests.fail = True
class TestMore(BaseTests): pass
""",
}
)
result = testdir.runpytest("tests/test_p2.py", "--rootdir=tests")
result.stdout.fnmatch_lines(["tests/test_p2.py .*", "=* 1 passed in *"])
result = testdir.runpytest("-vv", "-rA", "tests/test_p2.py", "--rootdir=tests")
result.stdout.fnmatch_lines(
[
"tests/test_p2.py::TestMore::test_p1 <- test_p1.py PASSED *",
"*= short test summary info =*",
"PASSED tests/test_p2.py::TestMore::test_p1",
]
)
result = testdir.runpytest("-vv", "-rA", "tests/test_p3.py", "--rootdir=tests")
result.stdout.fnmatch_lines(
[
"tests/test_p3.py::TestMore::test_p1 <- test_p1.py FAILED *",
"*_ TestMore.test_p1 _*",
" def test_p1(self):",
"> if self.fail: assert 0",
"E assert 0",
"",
"tests/test_p1.py:5: AssertionError",
"*= short test summary info =*",
"FAILED tests/test_p3.py::TestMore::test_p1 - assert 0",
"*= 1 failed in *",
]
)
def test_itemreport_directclasses_not_shown_as_subclasses(self, testdir):
a = testdir.mkpydir("a123")
a.join("test_hello123.py").write(
textwrap.dedent(
"""\
class TestClass(object):
def test_method(self):
pass
"""
)
)
result = testdir.runpytest("-vv")
assert result.ret == 0
result.stdout.fnmatch_lines(["*a123/test_hello123.py*PASS*"])
result.stdout.no_fnmatch_line("* <- *")
@pytest.mark.parametrize("fulltrace", ("", "--fulltrace"))
def test_keyboard_interrupt(self, testdir, fulltrace):
testdir.makepyfile(
"""
def test_foobar():
assert 0
def test_spamegg():
import py; pytest.skip('skip me please!')
def test_interrupt_me():
raise KeyboardInterrupt # simulating the user
"""
)
result = testdir.runpytest(fulltrace, no_reraise_ctrlc=True)
result.stdout.fnmatch_lines(
[
" def test_foobar():",
"> assert 0",
"E assert 0",
"*_keyboard_interrupt.py:6: KeyboardInterrupt*",
]
)
if fulltrace:
result.stdout.fnmatch_lines(
["*raise KeyboardInterrupt # simulating the user*"]
)
else:
result.stdout.fnmatch_lines(
["(to show a full traceback on KeyboardInterrupt use --full-trace)"]
)
result.stdout.fnmatch_lines(["*KeyboardInterrupt*"])
def test_keyboard_in_sessionstart(self, testdir):
testdir.makeconftest(
"""
def pytest_sessionstart():
raise KeyboardInterrupt
"""
)
testdir.makepyfile(
"""
def test_foobar():
pass
"""
)
result = testdir.runpytest(no_reraise_ctrlc=True)
assert result.ret == 2
result.stdout.fnmatch_lines(["*KeyboardInterrupt*"])
def test_collect_single_item(self, testdir):
"""Use singular 'item' when reporting a single test item"""
testdir.makepyfile(
"""
def test_foobar():
pass
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(["collected 1 item"])
def test_rewrite(self, testdir, monkeypatch):
config = testdir.parseconfig()
f = StringIO()
monkeypatch.setattr(f, "isatty", lambda *args: True)
tr = TerminalReporter(config, f)
tr._tw.fullwidth = 10
tr.write("hello")
tr.rewrite("hey", erase=True)
assert f.getvalue() == "hello" + "\r" + "hey" + (6 * " ")
def test_report_teststatus_explicit_markup(
self, testdir: Testdir, color_mapping
) -> None:
"""Test that TerminalReporter handles markup explicitly provided by
a pytest_report_teststatus hook."""
testdir.monkeypatch.setenv("PY_COLORS", "1")
testdir.makeconftest(
"""
def pytest_report_teststatus(report):
return 'foo', 'F', ('FOO', {'red': True})
"""
)
testdir.makepyfile(
"""
def test_foobar():
pass
"""
)
result = testdir.runpytest("-v")
result.stdout.fnmatch_lines(
color_mapping.format_for_fnmatch(["*{red}FOO{reset}*"])
)
class TestCollectonly:
def test_collectonly_basic(self, testdir):
testdir.makepyfile(
"""
def test_func():
pass
"""
)
result = testdir.runpytest("--collect-only")
result.stdout.fnmatch_lines(
["<Module test_collectonly_basic.py>", " <Function test_func>"]
)
def test_collectonly_skipped_module(self, testdir):
testdir.makepyfile(
"""
import pytest
pytest.skip("hello")
"""
)
result = testdir.runpytest("--collect-only", "-rs")
result.stdout.fnmatch_lines(["*ERROR collecting*"])
def test_collectonly_displays_test_description(
self, testdir: Testdir, dummy_yaml_custom_test
) -> None:
"""Used dummy_yaml_custom_test for an Item without ``obj``."""
testdir.makepyfile(
"""
def test_with_description():
''' This test has a description.
more1.
more2.'''
"""
)
result = testdir.runpytest("--collect-only", "--verbose")
result.stdout.fnmatch_lines(
[
"<YamlFile test1.yaml>",
" <YamlItem test1.yaml>",
"<Module test_collectonly_displays_test_description.py>",
" <Function test_with_description>",
" This test has a description.",
" ",
" more1.",
" more2.",
],
consecutive=True,
)
def test_collectonly_failed_module(self, testdir):
testdir.makepyfile("""raise ValueError(0)""")
result = testdir.runpytest("--collect-only")
result.stdout.fnmatch_lines(["*raise ValueError*", "*1 error*"])
def test_collectonly_fatal(self, testdir):
testdir.makeconftest(
"""
def pytest_collectstart(collector):
assert 0, "urgs"
"""
)
result = testdir.runpytest("--collect-only")
result.stdout.fnmatch_lines(["*INTERNAL*args*"])
assert result.ret == 3
def test_collectonly_simple(self, testdir):
p = testdir.makepyfile(
"""
def test_func1():
pass
class TestClass(object):
def test_method(self):
pass
"""
)
result = testdir.runpytest("--collect-only", p)
# assert stderr.startswith("inserting into sys.path")
assert result.ret == 0
result.stdout.fnmatch_lines(
[
"*<Module *.py>",
"* <Function test_func1>",
"* <Class TestClass>",
"* <Function test_method>",
]
)
def test_collectonly_error(self, testdir):
p = testdir.makepyfile("import Errlkjqweqwe")
result = testdir.runpytest("--collect-only", p)
assert result.ret == 2
result.stdout.fnmatch_lines(
textwrap.dedent(
"""\
*ERROR*
*ImportError*
*No module named *Errlk*
*1 error*
"""
).strip()
)
def test_collectonly_missing_path(self, testdir):
"""this checks issue 115,
failure in parseargs will cause session
not to have the items attribute
"""
result = testdir.runpytest("--collect-only", "uhm_missing_path")
assert result.ret == 4
result.stderr.fnmatch_lines(["*ERROR: file not found*"])
def test_collectonly_quiet(self, testdir):
testdir.makepyfile("def test_foo(): pass")
result = testdir.runpytest("--collect-only", "-q")
result.stdout.fnmatch_lines(["*test_foo*"])
def test_collectonly_more_quiet(self, testdir):
testdir.makepyfile(test_fun="def test_foo(): pass")
result = testdir.runpytest("--collect-only", "-qq")
result.stdout.fnmatch_lines(["*test_fun.py: 1*"])
class TestFixtureReporting:
def test_setup_fixture_error(self, testdir):
testdir.makepyfile(
"""
def setup_function(function):
print("setup func")
assert 0
def test_nada():
pass
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(
[
"*ERROR at setup of test_nada*",
"*setup_function(function):*",
"*setup func*",
"*assert 0*",
"*1 error*",
]
)
assert result.ret != 0
def test_teardown_fixture_error(self, testdir):
testdir.makepyfile(
"""
def test_nada():
pass
def teardown_function(function):
print("teardown func")
assert 0
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(
[
"*ERROR at teardown*",
"*teardown_function(function):*",
"*assert 0*",
"*Captured stdout*",
"*teardown func*",
"*1 passed*1 error*",
]
)
def test_teardown_fixture_error_and_test_failure(self, testdir):
testdir.makepyfile(
"""
def test_fail():
assert 0, "failingfunc"
def teardown_function(function):
print("teardown func")
assert False
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(
[
"*ERROR at teardown of test_fail*",
"*teardown_function(function):*",
"*assert False*",
"*Captured stdout*",
"*teardown func*",
"*test_fail*",
"*def test_fail():",
"*failingfunc*",
"*1 failed*1 error*",
]
)
def test_setup_teardown_output_and_test_failure(self, testdir):
""" Test for issue #442 """
testdir.makepyfile(
"""
def setup_function(function):
print("setup func")
def test_fail():
assert 0, "failingfunc"
def teardown_function(function):
print("teardown func")
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(
[
"*test_fail*",
"*def test_fail():",
"*failingfunc*",
"*Captured stdout setup*",
"*setup func*",
"*Captured stdout teardown*",
"*teardown func*",
"*1 failed*",
]
)
class TestTerminalFunctional:
def test_deselected(self, testdir):
testpath = testdir.makepyfile(
"""
def test_one():
pass
def test_two():
pass
def test_three():
pass
"""
)
result = testdir.runpytest("-k", "test_two:", testpath)
result.stdout.fnmatch_lines(
["collected 3 items / 1 deselected / 2 selected", "*test_deselected.py ..*"]
)
assert result.ret == 0
def test_deselected_with_hookwrapper(self, testdir):
testpath = testdir.makeconftest(
"""
import pytest
@pytest.hookimpl(hookwrapper=True)
def pytest_collection_modifyitems(config, items):
yield
deselected = items.pop()
config.hook.pytest_deselected(items=[deselected])
"""
)
testpath = testdir.makepyfile(
"""
def test_one():
pass
def test_two():
pass
def test_three():
pass
"""
)
result = testdir.runpytest(testpath)
result.stdout.fnmatch_lines(
[
"collected 3 items / 1 deselected / 2 selected",
"*= 2 passed, 1 deselected in*",
]
)
assert result.ret == 0
def test_show_deselected_items_using_markexpr_before_test_execution(self, testdir):
testdir.makepyfile(
test_show_deselected="""
import pytest
@pytest.mark.foo
def test_foobar():
pass
@pytest.mark.bar
def test_bar():
pass
def test_pass():
pass
"""
)
result = testdir.runpytest("-m", "not foo")
result.stdout.fnmatch_lines(
[
"collected 3 items / 1 deselected / 2 selected",
"*test_show_deselected.py ..*",
"*= 2 passed, 1 deselected in * =*",
]
)
result.stdout.no_fnmatch_line("*= 1 deselected =*")
assert result.ret == 0
def test_no_skip_summary_if_failure(self, testdir):
testdir.makepyfile(
"""
import pytest
def test_ok():
pass
def test_fail():
assert 0
def test_skip():
pytest.skip("dontshow")
"""
)
result = testdir.runpytest()
assert result.stdout.str().find("skip test summary") == -1
assert result.ret == 1
def test_passes(self, testdir):
p1 = testdir.makepyfile(
"""
def test_passes():
pass
class TestClass(object):
def test_method(self):
pass
"""
)
old = p1.dirpath().chdir()
try:
result = testdir.runpytest()
finally:
old.chdir()
result.stdout.fnmatch_lines(["test_passes.py ..*", "* 2 pass*"])
assert result.ret == 0
def test_header_trailer_info(self, testdir, request):
testdir.monkeypatch.delenv("PYTEST_DISABLE_PLUGIN_AUTOLOAD")
testdir.makepyfile(
"""
def test_passes():
pass
"""
)
result = testdir.runpytest()
verinfo = ".".join(map(str, sys.version_info[:3]))
result.stdout.fnmatch_lines(
[
"*===== test session starts ====*",
"platform %s -- Python %s*pytest-%s*py-%s*pluggy-%s"
% (
sys.platform,
verinfo,
pytest.__version__,
py.__version__,
pluggy.__version__,
),
"*test_header_trailer_info.py .*",
"=* 1 passed*in *.[0-9][0-9]s *=",
]
)
if request.config.pluginmanager.list_plugin_distinfo():
result.stdout.fnmatch_lines(["plugins: *"])
def test_header(self, testdir):
testdir.tmpdir.join("tests").ensure_dir()
testdir.tmpdir.join("gui").ensure_dir()
# no ini file
result = testdir.runpytest()
result.stdout.fnmatch_lines(["rootdir: *test_header0"])
# with inifile
testdir.makeini("""[pytest]""")
result = testdir.runpytest()
result.stdout.fnmatch_lines(["rootdir: *test_header0, inifile: tox.ini"])
# with testpaths option, and not passing anything in the command-line
testdir.makeini(
"""
[pytest]
testpaths = tests gui
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(
["rootdir: *test_header0, inifile: tox.ini, testpaths: tests, gui"]
)
# with testpaths option, passing directory in command-line: do not show testpaths then
result = testdir.runpytest("tests")
result.stdout.fnmatch_lines(["rootdir: *test_header0, inifile: tox.ini"])
def test_showlocals(self, testdir):
p1 = testdir.makepyfile(
"""
def test_showlocals():
x = 3
y = "x" * 5000
assert 0
"""
)
result = testdir.runpytest(p1, "-l")
result.stdout.fnmatch_lines(
[
# "_ _ * Locals *",
"x* = 3",
"y* = 'xxxxxx*",
]
)
def test_showlocals_short(self, testdir):
p1 = testdir.makepyfile(
"""
def test_showlocals_short():
x = 3
y = "xxxx"
assert 0
"""
)
result = testdir.runpytest(p1, "-l", "--tb=short")
result.stdout.fnmatch_lines(
[
"test_showlocals_short.py:*",
" assert 0",
"E assert 0",
" x = 3",
" y = 'xxxx'",
]
)
@pytest.fixture
def verbose_testfile(self, testdir):
return testdir.makepyfile(
"""
import pytest
def test_fail():
raise ValueError()
def test_pass():
pass
class TestClass(object):
def test_skip(self):
pytest.skip("hello")
def test_gen():
def check(x):
assert x == 1
yield check, 0
"""
)
def test_verbose_reporting(self, verbose_testfile, testdir):
result = testdir.runpytest(
verbose_testfile, "-v", "-Walways::pytest.PytestWarning"
)
result.stdout.fnmatch_lines(
[
"*test_verbose_reporting.py::test_fail *FAIL*",
"*test_verbose_reporting.py::test_pass *PASS*",
"*test_verbose_reporting.py::TestClass::test_skip *SKIP*",
"*test_verbose_reporting.py::test_gen *XFAIL*",
]
)
assert result.ret == 1
def test_verbose_reporting_xdist(self, verbose_testfile, testdir, pytestconfig):
if not pytestconfig.pluginmanager.get_plugin("xdist"):
pytest.skip("xdist plugin not installed")
testdir.monkeypatch.delenv("PYTEST_DISABLE_PLUGIN_AUTOLOAD")
result = testdir.runpytest(
verbose_testfile, "-v", "-n 1", "-Walways::pytest.PytestWarning"
)
result.stdout.fnmatch_lines(
["*FAIL*test_verbose_reporting_xdist.py::test_fail*"]
)
assert result.ret == 1
def test_quiet_reporting(self, testdir):
p1 = testdir.makepyfile("def test_pass(): pass")
result = testdir.runpytest(p1, "-q")
s = result.stdout.str()
assert "test session starts" not in s
assert p1.basename not in s
assert "===" not in s
assert "passed" in s
def test_more_quiet_reporting(self, testdir):
p1 = testdir.makepyfile("def test_pass(): pass")
result = testdir.runpytest(p1, "-qq")
s = result.stdout.str()
assert "test session starts" not in s
assert p1.basename not in s
assert "===" not in s
assert "passed" not in s
@pytest.mark.parametrize(
"params", [(), ("--collect-only",)], ids=["no-params", "collect-only"]
)
def test_report_collectionfinish_hook(self, testdir, params):
testdir.makeconftest(
"""
def pytest_report_collectionfinish(config, startdir, items):
return ['hello from hook: {0} items'.format(len(items))]
"""
)
testdir.makepyfile(
"""
import pytest
@pytest.mark.parametrize('i', range(3))
def test(i):
pass
"""
)
result = testdir.runpytest(*params)
result.stdout.fnmatch_lines(["collected 3 items", "hello from hook: 3 items"])
def test_summary_f_alias(self, testdir):
"""Test that 'f' and 'F' report chars are aliases and don't show up twice in the summary (#6334)"""
testdir.makepyfile(
"""
def test():
assert False
"""
)
result = testdir.runpytest("-rfF")
expected = "FAILED test_summary_f_alias.py::test - assert False"
result.stdout.fnmatch_lines([expected])
assert result.stdout.lines.count(expected) == 1
def test_summary_s_alias(self, testdir):
"""Test that 's' and 'S' report chars are aliases and don't show up twice in the summary"""
testdir.makepyfile(
"""
import pytest
@pytest.mark.skip
def test():
pass
"""
)
result = testdir.runpytest("-rsS")
expected = "SKIPPED [1] test_summary_s_alias.py:3: unconditional skip"
result.stdout.fnmatch_lines([expected])
assert result.stdout.lines.count(expected) == 1
def test_fail_extra_reporting(testdir, monkeypatch):
monkeypatch.setenv("COLUMNS", "80")
testdir.makepyfile("def test_this(): assert 0, 'this_failed' * 100")
result = testdir.runpytest("-rN")
result.stdout.no_fnmatch_line("*short test summary*")
result = testdir.runpytest()
result.stdout.fnmatch_lines(
[
"*test summary*",
"FAILED test_fail_extra_reporting.py::test_this - AssertionError: this_failedt...",
]
)
def test_fail_reporting_on_pass(testdir):
testdir.makepyfile("def test_this(): assert 1")
result = testdir.runpytest("-rf")
result.stdout.no_fnmatch_line("*short test summary*")
def test_pass_extra_reporting(testdir):
testdir.makepyfile("def test_this(): assert 1")
result = testdir.runpytest()
result.stdout.no_fnmatch_line("*short test summary*")
result = testdir.runpytest("-rp")
result.stdout.fnmatch_lines(["*test summary*", "PASS*test_pass_extra_reporting*"])
def test_pass_reporting_on_fail(testdir):
testdir.makepyfile("def test_this(): assert 0")
result = testdir.runpytest("-rp")
result.stdout.no_fnmatch_line("*short test summary*")
def test_pass_output_reporting(testdir):
testdir.makepyfile(
"""
def setup_module():
print("setup_module")
def teardown_module():
print("teardown_module")
def test_pass_has_output():
print("Four score and seven years ago...")
def test_pass_no_output():
pass
"""
)
result = testdir.runpytest()
s = result.stdout.str()
assert "test_pass_has_output" not in s
assert "Four score and seven years ago..." not in s
assert "test_pass_no_output" not in s
result = testdir.runpytest("-rPp")
result.stdout.fnmatch_lines(
[
"*= PASSES =*",
"*_ test_pass_has_output _*",
"*- Captured stdout setup -*",
"setup_module",
"*- Captured stdout call -*",
"Four score and seven years ago...",
"*- Captured stdout teardown -*",
"teardown_module",
"*= short test summary info =*",
"PASSED test_pass_output_reporting.py::test_pass_has_output",
"PASSED test_pass_output_reporting.py::test_pass_no_output",
"*= 2 passed in *",
]
)
def test_color_yes(testdir, color_mapping):
p1 = testdir.makepyfile(
"""
def fail():
assert 0
def test_this():
fail()
"""
)
result = testdir.runpytest("--color=yes", str(p1))
color_mapping.requires_ordered_markup(result)
result.stdout.fnmatch_lines(
color_mapping.format_for_fnmatch(
[
"{bold}=*= test session starts =*={reset}",
"collected 1 item",
"",
"test_color_yes.py {red}F{reset}{red} * [100%]{reset}",
"",
"=*= FAILURES =*=",
"{red}{bold}_*_ test_this _*_{reset}",
"",
" {kw}def{hl-reset} {function}test_this{hl-reset}():",
"> fail()",
"",
"{bold}{red}test_color_yes.py{reset}:5: ",
"_ _ * _ _*",
"",
" {kw}def{hl-reset} {function}fail{hl-reset}():",
"> {kw}assert{hl-reset} {number}0{hl-reset}",
"{bold}{red}E assert 0{reset}",
"",
"{bold}{red}test_color_yes.py{reset}:2: AssertionError",
"{red}=*= {red}{bold}1 failed{reset}{red} in *s{reset}{red} =*={reset}",
]
)
)
result = testdir.runpytest("--color=yes", "--tb=short", str(p1))
result.stdout.fnmatch_lines(
color_mapping.format_for_fnmatch(
[
"{bold}=*= test session starts =*={reset}",
"collected 1 item",
"",
"test_color_yes.py {red}F{reset}{red} * [100%]{reset}",
"",
"=*= FAILURES =*=",
"{red}{bold}_*_ test_this _*_{reset}",
"{bold}{red}test_color_yes.py{reset}:5: in test_this",
" fail()",
"{bold}{red}test_color_yes.py{reset}:2: in fail",
" {kw}assert{hl-reset} {number}0{hl-reset}",
"{bold}{red}E assert 0{reset}",
"{red}=*= {red}{bold}1 failed{reset}{red} in *s{reset}{red} =*={reset}",
]
)
)
def test_color_no(testdir):
testdir.makepyfile("def test_this(): assert 1")
result = testdir.runpytest("--color=no")
assert "test session starts" in result.stdout.str()
result.stdout.no_fnmatch_line("*\x1b[1m*")
@pytest.mark.parametrize("verbose", [True, False])
def test_color_yes_collection_on_non_atty(testdir, verbose):
"""skip collect progress report when working on non-terminals.
#1397
"""
testdir.makepyfile(
"""
import pytest
@pytest.mark.parametrize('i', range(10))
def test_this(i):
assert 1
"""
)
args = ["--color=yes"]
if verbose:
args.append("-vv")
result = testdir.runpytest(*args)
assert "test session starts" in result.stdout.str()
assert "\x1b[1m" in result.stdout.str()
result.stdout.no_fnmatch_line("*collecting 10 items*")
if verbose:
assert "collecting ..." in result.stdout.str()
assert "collected 10 items" in result.stdout.str()
def test_getreportopt():
from _pytest.terminal import _REPORTCHARS_DEFAULT
class Config:
class Option:
reportchars = _REPORTCHARS_DEFAULT
disable_warnings = False
option = Option()
config = Config()
assert _REPORTCHARS_DEFAULT == "fE"
# Default.
assert getreportopt(config) == "wfE"
config.option.reportchars = "sf"
assert getreportopt(config) == "wsf"
config.option.reportchars = "sfxw"
assert getreportopt(config) == "sfxw"
config.option.reportchars = "a"
assert getreportopt(config) == "wsxXEf"
config.option.reportchars = "N"
assert getreportopt(config) == "w"
config.option.reportchars = "NwfE"
assert getreportopt(config) == "wfE"
config.option.reportchars = "NfENx"
assert getreportopt(config) == "wx"
# Now with --disable-warnings.
config.option.disable_warnings = True
config.option.reportchars = "a"
assert getreportopt(config) == "sxXEf"
config.option.reportchars = "sfx"
assert getreportopt(config) == "sfx"
config.option.reportchars = "sfxw"
assert getreportopt(config) == "sfx"
config.option.reportchars = "a"
assert getreportopt(config) == "sxXEf"
config.option.reportchars = "A"
assert getreportopt(config) == "PpsxXEf"
config.option.reportchars = "AN"
assert getreportopt(config) == ""
config.option.reportchars = "NwfE"
assert getreportopt(config) == "fE"
def test_terminalreporter_reportopt_addopts(testdir):
testdir.makeini("[pytest]\naddopts=-rs")
testdir.makepyfile(
"""
import pytest
@pytest.fixture
def tr(request):
tr = request.config.pluginmanager.getplugin("terminalreporter")
return tr
def test_opt(tr):
assert tr.hasopt('skipped')
assert not tr.hasopt('qwe')
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(["*1 passed*"])
def test_tbstyle_short(testdir):
p = testdir.makepyfile(
"""
import pytest
@pytest.fixture
def arg(request):
return 42
def test_opt(arg):
x = 0
assert x
"""
)
result = testdir.runpytest("--tb=short")
s = result.stdout.str()
assert "arg = 42" not in s
assert "x = 0" not in s
result.stdout.fnmatch_lines(["*%s:8*" % p.basename, " assert x", "E assert*"])
result = testdir.runpytest()
s = result.stdout.str()
assert "x = 0" in s
assert "assert x" in s
def test_traceconfig(testdir):
result = testdir.runpytest("--traceconfig")
result.stdout.fnmatch_lines(["*active plugins*"])
assert result.ret == ExitCode.NO_TESTS_COLLECTED
class TestGenericReporting:
""" this test class can be subclassed with a different option
provider to run e.g. distributed tests.
"""
def test_collect_fail(self, testdir, option):
testdir.makepyfile("import xyz\n")
result = testdir.runpytest(*option.args)
result.stdout.fnmatch_lines(
["ImportError while importing*", "*No module named *xyz*", "*1 error*"]
)
def test_maxfailures(self, testdir, option):
testdir.makepyfile(
"""
def test_1():
assert 0
def test_2():
assert 0
def test_3():
assert 0
"""
)
result = testdir.runpytest("--maxfail=2", *option.args)
result.stdout.fnmatch_lines(
[
"*def test_1():*",
"*def test_2():*",
"*! stopping after 2 failures !*",
"*2 failed*",
]
)
def test_maxfailures_with_interrupted(self, testdir):
testdir.makepyfile(
"""
def test(request):
request.session.shouldstop = "session_interrupted"
assert 0
"""
)
result = testdir.runpytest("--maxfail=1", "-ra")
result.stdout.fnmatch_lines(
[
"*= short test summary info =*",
"FAILED *",
"*! stopping after 1 failures !*",
"*! session_interrupted !*",
"*= 1 failed in*",
]
)
def test_tb_option(self, testdir, option):
testdir.makepyfile(
"""
import pytest
def g():
raise IndexError
def test_func():
print(6*7)
g() # --calling--
"""
)
for tbopt in ["long", "short", "no"]:
print("testing --tb=%s..." % tbopt)
result = testdir.runpytest("-rN", "--tb=%s" % tbopt)
s = result.stdout.str()
if tbopt == "long":
assert "print(6*7)" in s
else:
assert "print(6*7)" not in s
if tbopt != "no":
assert "--calling--" in s
assert "IndexError" in s
else:
assert "FAILURES" not in s
assert "--calling--" not in s
assert "IndexError" not in s
def test_tb_crashline(self, testdir, option):
p = testdir.makepyfile(
"""
import pytest
def g():
raise IndexError
def test_func1():
print(6*7)
g() # --calling--
def test_func2():
assert 0, "hello"
"""
)
result = testdir.runpytest("--tb=line")
bn = p.basename
result.stdout.fnmatch_lines(
["*%s:3: IndexError*" % bn, "*%s:8: AssertionError: hello*" % bn]
)
s = result.stdout.str()
assert "def test_func2" not in s
def test_pytest_report_header(self, testdir, option):
testdir.makeconftest(
"""
def pytest_sessionstart(session):
session.config._somevalue = 42
def pytest_report_header(config):
return "hello: %s" % config._somevalue
"""
)
testdir.mkdir("a").join("conftest.py").write(
"""
def pytest_report_header(config, startdir):
return ["line1", str(startdir)]
"""
)
result = testdir.runpytest("a")
result.stdout.fnmatch_lines(["*hello: 42*", "line1", str(testdir.tmpdir)])
def test_show_capture(self, testdir):
testdir.makepyfile(
"""
import sys
import logging
def test_one():
sys.stdout.write('!This is stdout!')
sys.stderr.write('!This is stderr!')
logging.warning('!This is a warning log msg!')
assert False, 'Something failed'
"""
)
result = testdir.runpytest("--tb=short")
result.stdout.fnmatch_lines(
[
"!This is stdout!",
"!This is stderr!",
"*WARNING*!This is a warning log msg!",
]
)
result = testdir.runpytest("--show-capture=all", "--tb=short")
result.stdout.fnmatch_lines(
[
"!This is stdout!",
"!This is stderr!",
"*WARNING*!This is a warning log msg!",
]
)
stdout = testdir.runpytest("--show-capture=stdout", "--tb=short").stdout.str()
assert "!This is stderr!" not in stdout
assert "!This is stdout!" in stdout
assert "!This is a warning log msg!" not in stdout
stdout = testdir.runpytest("--show-capture=stderr", "--tb=short").stdout.str()
assert "!This is stdout!" not in stdout
assert "!This is stderr!" in stdout
assert "!This is a warning log msg!" not in stdout
stdout = testdir.runpytest("--show-capture=log", "--tb=short").stdout.str()
assert "!This is stdout!" not in stdout
assert "!This is stderr!" not in stdout
assert "!This is a warning log msg!" in stdout
stdout = testdir.runpytest("--show-capture=no", "--tb=short").stdout.str()
assert "!This is stdout!" not in stdout
assert "!This is stderr!" not in stdout
assert "!This is a warning log msg!" not in stdout
def test_show_capture_with_teardown_logs(self, testdir):
"""Ensure that the capturing of teardown logs honor --show-capture setting"""
testdir.makepyfile(
"""
import logging
import sys
import pytest
@pytest.fixture(scope="function", autouse="True")
def hook_each_test(request):
yield
sys.stdout.write("!stdout!")
sys.stderr.write("!stderr!")
logging.warning("!log!")
def test_func():
assert False
"""
)
result = testdir.runpytest("--show-capture=stdout", "--tb=short").stdout.str()
assert "!stdout!" in result
assert "!stderr!" not in result
assert "!log!" not in result
result = testdir.runpytest("--show-capture=stderr", "--tb=short").stdout.str()
assert "!stdout!" not in result
assert "!stderr!" in result
assert "!log!" not in result
result = testdir.runpytest("--show-capture=log", "--tb=short").stdout.str()
assert "!stdout!" not in result
assert "!stderr!" not in result
assert "!log!" in result
result = testdir.runpytest("--show-capture=no", "--tb=short").stdout.str()
assert "!stdout!" not in result
assert "!stderr!" not in result
assert "!log!" not in result
@pytest.mark.xfail("not hasattr(os, 'dup')")
def test_fdopen_kept_alive_issue124(testdir):
testdir.makepyfile(
"""
import os, sys
k = []
def test_open_file_and_keep_alive(capfd):
stdout = os.fdopen(1, 'w', 1)
k.append(stdout)
def test_close_kept_alive_file():
stdout = k.pop()
stdout.close()
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(["*2 passed*"])
def test_tbstyle_native_setup_error(testdir):
testdir.makepyfile(
"""
import pytest
@pytest.fixture
def setup_error_fixture():
raise Exception("error in exception")
def test_error_fixture(setup_error_fixture):
pass
"""
)
result = testdir.runpytest("--tb=native")
result.stdout.fnmatch_lines(
['*File *test_tbstyle_native_setup_error.py", line *, in setup_error_fixture*']
)
def test_terminal_summary(testdir):
testdir.makeconftest(
"""
def pytest_terminal_summary(terminalreporter, exitstatus):
w = terminalreporter
w.section("hello")
w.line("world")
w.line("exitstatus: {0}".format(exitstatus))
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(
"""
*==== hello ====*
world
exitstatus: 5
"""
)
@pytest.mark.filterwarnings("default")
def test_terminal_summary_warnings_are_displayed(testdir):
"""Test that warnings emitted during pytest_terminal_summary are displayed.
(#1305).
"""
testdir.makeconftest(
"""
import warnings
def pytest_terminal_summary(terminalreporter):
warnings.warn(UserWarning('internal warning'))
"""
)
testdir.makepyfile(
"""
def test_failure():
import warnings
warnings.warn("warning_from_" + "test")
assert 0
"""
)
result = testdir.runpytest("-ra")
result.stdout.fnmatch_lines(
[
"*= warnings summary =*",
"*warning_from_test*",
"*= short test summary info =*",
"*= warnings summary (final) =*",
"*conftest.py:3:*internal warning",
"*== 1 failed, 2 warnings in *",
]
)
result.stdout.no_fnmatch_line("*None*")
stdout = result.stdout.str()
assert stdout.count("warning_from_test") == 1
assert stdout.count("=== warnings summary ") == 2
@pytest.mark.filterwarnings("default")
def test_terminal_summary_warnings_header_once(testdir):
testdir.makepyfile(
"""
def test_failure():
import warnings
warnings.warn("warning_from_" + "test")
assert 0
"""
)
result = testdir.runpytest("-ra")
result.stdout.fnmatch_lines(
[
"*= warnings summary =*",
"*warning_from_test*",
"*= short test summary info =*",
"*== 1 failed, 1 warning in *",
]
)
result.stdout.no_fnmatch_line("*None*")
stdout = result.stdout.str()
assert stdout.count("warning_from_test") == 1
assert stdout.count("=== warnings summary ") == 1
@pytest.fixture(scope="session")
def tr() -> TerminalReporter:
config = _pytest.config._prepareconfig()
return TerminalReporter(config)
@pytest.mark.parametrize(
"exp_color, exp_line, stats_arg",
[
# The method under test only cares about the length of each
# dict value, not the actual contents, so tuples of anything
# suffice
# Important statuses -- the highest priority of these always wins
("red", [("1 failed", {"bold": True, "red": True})], {"failed": (1,)}),
(
"red",
[
("1 failed", {"bold": True, "red": True}),
("1 passed", {"bold": False, "green": True}),
],
{"failed": (1,), "passed": (1,)},
),
("red", [("1 error", {"bold": True, "red": True})], {"error": (1,)}),
("red", [("2 errors", {"bold": True, "red": True})], {"error": (1, 2)}),
(
"red",
[
("1 passed", {"bold": False, "green": True}),
("1 error", {"bold": True, "red": True}),
],
{"error": (1,), "passed": (1,)},
),
# (a status that's not known to the code)
("yellow", [("1 weird", {"bold": True, "yellow": True})], {"weird": (1,)}),
(
"yellow",
[
("1 passed", {"bold": False, "green": True}),
("1 weird", {"bold": True, "yellow": True}),
],
{"weird": (1,), "passed": (1,)},
),
("yellow", [("1 warning", {"bold": True, "yellow": True})], {"warnings": (1,)}),
(
"yellow",
[
("1 passed", {"bold": False, "green": True}),
("1 warning", {"bold": True, "yellow": True}),
],
{"warnings": (1,), "passed": (1,)},
),
(
"green",
[("5 passed", {"bold": True, "green": True})],
{"passed": (1, 2, 3, 4, 5)},
),
# "Boring" statuses. These have no effect on the color of the summary
# line. Thus, if *every* test has a boring status, the summary line stays
# at its default color, i.e. yellow, to warn the user that the test run
# produced no useful information
("yellow", [("1 skipped", {"bold": True, "yellow": True})], {"skipped": (1,)}),
(
"green",
[
("1 passed", {"bold": True, "green": True}),
("1 skipped", {"bold": False, "yellow": True}),
],
{"skipped": (1,), "passed": (1,)},
),
(
"yellow",
[("1 deselected", {"bold": True, "yellow": True})],
{"deselected": (1,)},
),
(
"green",
[
("1 passed", {"bold": True, "green": True}),
("1 deselected", {"bold": False, "yellow": True}),
],
{"deselected": (1,), "passed": (1,)},
),
("yellow", [("1 xfailed", {"bold": True, "yellow": True})], {"xfailed": (1,)}),
(
"green",
[
("1 passed", {"bold": True, "green": True}),
("1 xfailed", {"bold": False, "yellow": True}),
],
{"xfailed": (1,), "passed": (1,)},
),
("yellow", [("1 xpassed", {"bold": True, "yellow": True})], {"xpassed": (1,)}),
(
"yellow",
[
("1 passed", {"bold": False, "green": True}),
("1 xpassed", {"bold": True, "yellow": True}),
],
{"xpassed": (1,), "passed": (1,)},
),
# Likewise if no tests were found at all
("yellow", [("no tests ran", {"yellow": True})], {}),
# Test the empty-key special case
("yellow", [("no tests ran", {"yellow": True})], {"": (1,)}),
(
"green",
[("1 passed", {"bold": True, "green": True})],
{"": (1,), "passed": (1,)},
),
# A couple more complex combinations
(
"red",
[
("1 failed", {"bold": True, "red": True}),
("2 passed", {"bold": False, "green": True}),
("3 xfailed", {"bold": False, "yellow": True}),
],
{"passed": (1, 2), "failed": (1,), "xfailed": (1, 2, 3)},
),
(
"green",
[
("1 passed", {"bold": True, "green": True}),
("2 skipped", {"bold": False, "yellow": True}),
("3 deselected", {"bold": False, "yellow": True}),
("2 xfailed", {"bold": False, "yellow": True}),
],
{
"passed": (1,),
"skipped": (1, 2),
"deselected": (1, 2, 3),
"xfailed": (1, 2),
},
),
],
)
def test_summary_stats(
tr: TerminalReporter,
exp_line: List[Tuple[str, Dict[str, bool]]],
exp_color: str,
stats_arg: Dict[str, List],
) -> None:
tr.stats = stats_arg
# Fake "_is_last_item" to be True.
class fake_session:
testscollected = 0
tr._session = fake_session # type: ignore[assignment] # noqa: F821
assert tr._is_last_item
# Reset cache.
tr._main_color = None
print("Based on stats: %s" % stats_arg)
print('Expect summary: "{}"; with color "{}"'.format(exp_line, exp_color))
(line, color) = tr.build_summary_stats_line()
print('Actually got: "{}"; with color "{}"'.format(line, color))
assert line == exp_line
assert color == exp_color
def test_skip_counting_towards_summary(tr):
class DummyReport(BaseReport):
count_towards_summary = True
r1 = DummyReport()
r2 = DummyReport()
tr.stats = {"failed": (r1, r2)}
tr._main_color = None
res = tr.build_summary_stats_line()
assert res == ([("2 failed", {"bold": True, "red": True})], "red")
r1.count_towards_summary = False
tr.stats = {"failed": (r1, r2)}
tr._main_color = None
res = tr.build_summary_stats_line()
assert res == ([("1 failed", {"bold": True, "red": True})], "red")
class TestClassicOutputStyle:
"""Ensure classic output style works as expected (#3883)"""
@pytest.fixture
def test_files(self, testdir):
testdir.makepyfile(
**{
"test_one.py": "def test_one(): pass",
"test_two.py": "def test_two(): assert 0",
"sub/test_three.py": """
def test_three_1(): pass
def test_three_2(): assert 0
def test_three_3(): pass
""",
}
)
def test_normal_verbosity(self, testdir, test_files):
result = testdir.runpytest("-o", "console_output_style=classic")
result.stdout.fnmatch_lines(
[
"test_one.py .",
"test_two.py F",
"sub{}test_three.py .F.".format(os.sep),
"*2 failed, 3 passed in*",
]
)
def test_verbose(self, testdir, test_files):
result = testdir.runpytest("-o", "console_output_style=classic", "-v")
result.stdout.fnmatch_lines(
[
"test_one.py::test_one PASSED",
"test_two.py::test_two FAILED",
"sub{}test_three.py::test_three_1 PASSED".format(os.sep),
"sub{}test_three.py::test_three_2 FAILED".format(os.sep),
"sub{}test_three.py::test_three_3 PASSED".format(os.sep),
"*2 failed, 3 passed in*",
]
)
def test_quiet(self, testdir, test_files):
result = testdir.runpytest("-o", "console_output_style=classic", "-q")
result.stdout.fnmatch_lines([".F.F.", "*2 failed, 3 passed in*"])
class TestProgressOutputStyle:
@pytest.fixture
def many_tests_files(self, testdir):
testdir.makepyfile(
test_bar="""
import pytest
@pytest.mark.parametrize('i', range(10))
def test_bar(i): pass
""",
test_foo="""
import pytest
@pytest.mark.parametrize('i', range(5))
def test_foo(i): pass
""",
test_foobar="""
import pytest
@pytest.mark.parametrize('i', range(5))
def test_foobar(i): pass
""",
)
def test_zero_tests_collected(self, testdir):
"""Some plugins (testmon for example) might issue pytest_runtest_logreport without any tests being
actually collected (#2971)."""
testdir.makeconftest(
"""
def pytest_collection_modifyitems(items, config):
from _pytest.runner import CollectReport
for node_id in ('nodeid1', 'nodeid2'):
rep = CollectReport(node_id, 'passed', None, None)
rep.when = 'passed'
rep.duration = 0.1
config.hook.pytest_runtest_logreport(report=rep)
"""
)
output = testdir.runpytest()
output.stdout.no_fnmatch_line("*ZeroDivisionError*")
output.stdout.fnmatch_lines(["=* 2 passed in *="])
def test_normal(self, many_tests_files, testdir):
output = testdir.runpytest()
output.stdout.re_match_lines(
[
r"test_bar.py \.{10} \s+ \[ 50%\]",
r"test_foo.py \.{5} \s+ \[ 75%\]",
r"test_foobar.py \.{5} \s+ \[100%\]",
]
)
def test_colored_progress(self, testdir, monkeypatch, color_mapping):
monkeypatch.setenv("PY_COLORS", "1")
testdir.makepyfile(
test_axfail="""
import pytest
@pytest.mark.xfail
def test_axfail(): assert 0
""",
test_bar="""
import pytest
@pytest.mark.parametrize('i', range(10))
def test_bar(i): pass
""",
test_foo="""
import pytest
import warnings
@pytest.mark.parametrize('i', range(5))
def test_foo(i):
warnings.warn(DeprecationWarning("collection"))
pass
""",
test_foobar="""
import pytest
@pytest.mark.parametrize('i', range(5))
def test_foobar(i): raise ValueError()
""",
)
result = testdir.runpytest()
result.stdout.re_match_lines(
color_mapping.format_for_rematch(
[
r"test_axfail.py {yellow}x{reset}{green} \s+ \[ 4%\]{reset}",
r"test_bar.py ({green}\.{reset}){{10}}{green} \s+ \[ 52%\]{reset}",
r"test_foo.py ({green}\.{reset}){{5}}{yellow} \s+ \[ 76%\]{reset}",
r"test_foobar.py ({red}F{reset}){{5}}{red} \s+ \[100%\]{reset}",
]
)
)
# Only xfail should have yellow progress indicator.
result = testdir.runpytest("test_axfail.py")
result.stdout.re_match_lines(
color_mapping.format_for_rematch(
[
r"test_axfail.py {yellow}x{reset}{yellow} \s+ \[100%\]{reset}",
r"^{yellow}=+ ({yellow}{bold}|{bold}{yellow})1 xfailed{reset}{yellow} in ",
]
)
)
def test_count(self, many_tests_files, testdir):
testdir.makeini(
"""
[pytest]
console_output_style = count
"""
)
output = testdir.runpytest()
output.stdout.re_match_lines(
[
r"test_bar.py \.{10} \s+ \[10/20\]",
r"test_foo.py \.{5} \s+ \[15/20\]",
r"test_foobar.py \.{5} \s+ \[20/20\]",
]
)
def test_verbose(self, many_tests_files, testdir):
output = testdir.runpytest("-v")
output.stdout.re_match_lines(
[
r"test_bar.py::test_bar\[0\] PASSED \s+ \[ 5%\]",
r"test_foo.py::test_foo\[4\] PASSED \s+ \[ 75%\]",
r"test_foobar.py::test_foobar\[4\] PASSED \s+ \[100%\]",
]
)
def test_verbose_count(self, many_tests_files, testdir):
testdir.makeini(
"""
[pytest]
console_output_style = count
"""
)
output = testdir.runpytest("-v")
output.stdout.re_match_lines(
[
r"test_bar.py::test_bar\[0\] PASSED \s+ \[ 1/20\]",
r"test_foo.py::test_foo\[4\] PASSED \s+ \[15/20\]",
r"test_foobar.py::test_foobar\[4\] PASSED \s+ \[20/20\]",
]
)
def test_xdist_normal(self, many_tests_files, testdir, monkeypatch):
pytest.importorskip("xdist")
monkeypatch.delenv("PYTEST_DISABLE_PLUGIN_AUTOLOAD", raising=False)
output = testdir.runpytest("-n2")
output.stdout.re_match_lines([r"\.{20} \s+ \[100%\]"])
def test_xdist_normal_count(self, many_tests_files, testdir, monkeypatch):
pytest.importorskip("xdist")
monkeypatch.delenv("PYTEST_DISABLE_PLUGIN_AUTOLOAD", raising=False)
testdir.makeini(
"""
[pytest]
console_output_style = count
"""
)
output = testdir.runpytest("-n2")
output.stdout.re_match_lines([r"\.{20} \s+ \[20/20\]"])
def test_xdist_verbose(self, many_tests_files, testdir, monkeypatch):
pytest.importorskip("xdist")
monkeypatch.delenv("PYTEST_DISABLE_PLUGIN_AUTOLOAD", raising=False)
output = testdir.runpytest("-n2", "-v")
output.stdout.re_match_lines_random(
[
r"\[gw\d\] \[\s*\d+%\] PASSED test_bar.py::test_bar\[1\]",
r"\[gw\d\] \[\s*\d+%\] PASSED test_foo.py::test_foo\[1\]",
r"\[gw\d\] \[\s*\d+%\] PASSED test_foobar.py::test_foobar\[1\]",
]
)
output.stdout.fnmatch_lines_random(
[
line.translate(TRANS_FNMATCH)
for line in [
"test_bar.py::test_bar[0] ",
"test_foo.py::test_foo[0] ",
"test_foobar.py::test_foobar[0] ",
"[gw?] [ 5%] PASSED test_*[?] ",
"[gw?] [ 10%] PASSED test_*[?] ",
"[gw?] [ 55%] PASSED test_*[?] ",
"[gw?] [ 60%] PASSED test_*[?] ",
"[gw?] [ 95%] PASSED test_*[?] ",
"[gw?] [100%] PASSED test_*[?] ",
]
]
)
def test_capture_no(self, many_tests_files, testdir):
output = testdir.runpytest("-s")
output.stdout.re_match_lines(
[r"test_bar.py \.{10}", r"test_foo.py \.{5}", r"test_foobar.py \.{5}"]
)
output = testdir.runpytest("--capture=no")
output.stdout.no_fnmatch_line("*%]*")
class TestProgressWithTeardown:
"""Ensure we show the correct percentages for tests that fail during teardown (#3088)"""
@pytest.fixture
def contest_with_teardown_fixture(self, testdir):
testdir.makeconftest(
"""
import pytest
@pytest.fixture
def fail_teardown():
yield
assert False
"""
)
@pytest.fixture
def many_files(self, testdir, contest_with_teardown_fixture):
testdir.makepyfile(
test_bar="""
import pytest
@pytest.mark.parametrize('i', range(5))
def test_bar(fail_teardown, i):
pass
""",
test_foo="""
import pytest
@pytest.mark.parametrize('i', range(15))
def test_foo(fail_teardown, i):
pass
""",
)
def test_teardown_simple(self, testdir, contest_with_teardown_fixture):
testdir.makepyfile(
"""
def test_foo(fail_teardown):
pass
"""
)
output = testdir.runpytest()
output.stdout.re_match_lines([r"test_teardown_simple.py \.E\s+\[100%\]"])
def test_teardown_with_test_also_failing(
self, testdir, contest_with_teardown_fixture
):
testdir.makepyfile(
"""
def test_foo(fail_teardown):
assert 0
"""
)
output = testdir.runpytest("-rfE")
output.stdout.re_match_lines(
[
r"test_teardown_with_test_also_failing.py FE\s+\[100%\]",
"FAILED test_teardown_with_test_also_failing.py::test_foo - assert 0",
"ERROR test_teardown_with_test_also_failing.py::test_foo - assert False",
]
)
def test_teardown_many(self, testdir, many_files):
output = testdir.runpytest()
output.stdout.re_match_lines(
[r"test_bar.py (\.E){5}\s+\[ 25%\]", r"test_foo.py (\.E){15}\s+\[100%\]"]
)
def test_teardown_many_verbose(
self, testdir: Testdir, many_files, color_mapping
) -> None:
result = testdir.runpytest("-v")
result.stdout.fnmatch_lines(
color_mapping.format_for_fnmatch(
[
"test_bar.py::test_bar[0] PASSED * [ 5%]",
"test_bar.py::test_bar[0] ERROR * [ 5%]",
"test_bar.py::test_bar[4] PASSED * [ 25%]",
"test_foo.py::test_foo[14] PASSED * [100%]",
"test_foo.py::test_foo[14] ERROR * [100%]",
"=* 20 passed, 20 errors in *",
]
)
)
def test_xdist_normal(self, many_files, testdir, monkeypatch):
pytest.importorskip("xdist")
monkeypatch.delenv("PYTEST_DISABLE_PLUGIN_AUTOLOAD", raising=False)
output = testdir.runpytest("-n2")
output.stdout.re_match_lines([r"[\.E]{40} \s+ \[100%\]"])
def test_skip_reasons_folding():
path = "xyz"
lineno = 3
message = "justso"
longrepr = (path, lineno, message)
class X:
pass
ev1 = X()
ev1.when = "execute"
ev1.skipped = True
ev1.longrepr = longrepr
ev2 = X()
ev2.when = "execute"
ev2.longrepr = longrepr
ev2.skipped = True
# ev3 might be a collection report
ev3 = X()
ev3.when = "collect"
ev3.longrepr = longrepr
ev3.skipped = True
values = _folded_skips(py.path.local(), [ev1, ev2, ev3])
assert len(values) == 1
num, fspath, lineno, reason = values[0]
assert num == 3
assert fspath == path
assert lineno == lineno
assert reason == message
def test_line_with_reprcrash(monkeypatch):
import _pytest.terminal
from wcwidth import wcswidth
mocked_verbose_word = "FAILED"
mocked_pos = "some::nodeid"
def mock_get_pos(*args):
return mocked_pos
monkeypatch.setattr(_pytest.terminal, "_get_pos", mock_get_pos)
class config:
pass
class rep:
def _get_verbose_word(self, *args):
return mocked_verbose_word
class longrepr:
class reprcrash:
pass
def check(msg, width, expected):
__tracebackhide__ = True
if msg:
rep.longrepr.reprcrash.message = msg
actual = _get_line_with_reprcrash_message(config, rep(), width)
assert actual == expected
if actual != "{} {}".format(mocked_verbose_word, mocked_pos):
assert len(actual) <= width
assert wcswidth(actual) <= width
# AttributeError with message
check(None, 80, "FAILED some::nodeid")
check("msg", 80, "FAILED some::nodeid - msg")
check("msg", 3, "FAILED some::nodeid")
check("msg", 24, "FAILED some::nodeid")
check("msg", 25, "FAILED some::nodeid - msg")
check("some longer msg", 24, "FAILED some::nodeid")
check("some longer msg", 25, "FAILED some::nodeid - ...")
check("some longer msg", 26, "FAILED some::nodeid - s...")
check("some\nmessage", 25, "FAILED some::nodeid - ...")
check("some\nmessage", 26, "FAILED some::nodeid - some")
check("some\nmessage", 80, "FAILED some::nodeid - some")
# Test unicode safety.
check("😄😄😄😄😄\n2nd line", 25, "FAILED some::nodeid - ...")
check("😄😄😄😄😄\n2nd line", 26, "FAILED some::nodeid - ...")
check("😄😄😄😄😄\n2nd line", 27, "FAILED some::nodeid - 😄...")
check("😄😄😄😄😄\n2nd line", 28, "FAILED some::nodeid - 😄...")
check("😄😄😄😄😄\n2nd line", 29, "FAILED some::nodeid - 😄😄...")
# NOTE: constructed, not sure if this is supported.
mocked_pos = "nodeid::😄::withunicode"
check("😄😄😄😄😄\n2nd line", 29, "FAILED nodeid::😄::withunicode")
check("😄😄😄😄😄\n2nd line", 40, "FAILED nodeid::😄::withunicode - 😄😄...")
check("😄😄😄😄😄\n2nd line", 41, "FAILED nodeid::😄::withunicode - 😄😄...")
check("😄😄😄😄😄\n2nd line", 42, "FAILED nodeid::😄::withunicode - 😄😄😄...")
check("😄😄😄😄😄\n2nd line", 80, "FAILED nodeid::😄::withunicode - 😄😄😄😄😄")
@pytest.mark.parametrize(
"seconds, expected",
[
(10.0, "10.00s"),
(10.34, "10.34s"),
(59.99, "59.99s"),
(60.55, "60.55s (0:01:00)"),
(123.55, "123.55s (0:02:03)"),
(60 * 60 + 0.5, "3600.50s (1:00:00)"),
],
)
def test_format_session_duration(seconds, expected):
from _pytest.terminal import format_session_duration
assert format_session_duration(seconds) == expected
def test_collecterror(testdir):
p1 = testdir.makepyfile("raise SyntaxError()")
result = testdir.runpytest("-ra", str(p1))
result.stdout.fnmatch_lines(
[
"collected 0 items / 1 error",
"*= ERRORS =*",
"*_ ERROR collecting test_collecterror.py _*",
"E SyntaxError: *",
"*= short test summary info =*",
"ERROR test_collecterror.py",
"*! Interrupted: 1 error during collection !*",
"*= 1 error in *",
]
)
def test_via_exec(testdir: Testdir) -> None:
p1 = testdir.makepyfile("exec('def test_via_exec(): pass')")
result = testdir.runpytest(str(p1), "-vv")
result.stdout.fnmatch_lines(
["test_via_exec.py::test_via_exec <- <string> PASSED*", "*= 1 passed in *"]
)
class TestCodeHighlight:
def test_code_highlight_simple(self, testdir: Testdir, color_mapping) -> None:
testdir.makepyfile(
"""
def test_foo():
assert 1 == 10
"""
)
result = testdir.runpytest("--color=yes")
color_mapping.requires_ordered_markup(result)
result.stdout.fnmatch_lines(
color_mapping.format_for_fnmatch(
[
" {kw}def{hl-reset} {function}test_foo{hl-reset}():",
"> {kw}assert{hl-reset} {number}1{hl-reset} == {number}10{hl-reset}",
"{bold}{red}E assert 1 == 10{reset}",
]
)
)
def test_code_highlight_continuation(self, testdir: Testdir, color_mapping) -> None:
testdir.makepyfile(
"""
def test_foo():
print('''
'''); assert 0
"""
)
result = testdir.runpytest("--color=yes")
color_mapping.requires_ordered_markup(result)
result.stdout.fnmatch_lines(
color_mapping.format_for_fnmatch(
[
" {kw}def{hl-reset} {function}test_foo{hl-reset}():",
" {print}print{hl-reset}({str}'''{hl-reset}{str}{hl-reset}",
"> {str} {hl-reset}{str}'''{hl-reset}); {kw}assert{hl-reset} {number}0{hl-reset}",
"{bold}{red}E assert 0{reset}",
]
)
)
| 32.338532 | 107 | 0.518015 |
a30f36a4a7b7b2d48a7159ce11f3977b8920d7c9 | 6,864 | py | Python | nlpaug/augmenter/word/reserved.py | lucidworks/nlpaug | 8e47fa39200db17f4dc1d61567af1419bc389071 | [
"MIT"
] | 1 | 2021-06-09T20:07:30.000Z | 2021-06-09T20:07:30.000Z | nlpaug/augmenter/word/reserved.py | lucidworks/nlpaug | 8e47fa39200db17f4dc1d61567af1419bc389071 | [
"MIT"
] | null | null | null | nlpaug/augmenter/word/reserved.py | lucidworks/nlpaug | 8e47fa39200db17f4dc1d61567af1419bc389071 | [
"MIT"
] | null | null | null | """
Augmenter that apply target word replacement operation to textual input.
"""
import re
from collections import defaultdict
from nlpaug.augmenter.word import WordAugmenter
from nlpaug.util import Action, Doc
class ReservedAug(WordAugmenter):
CONNECT_TOKEN = "nnnnn"
"""
Augmenter that apply target word replacement for augmentation.
:param float aug_p: Percentage of word will be augmented.
:param int aug_min: Minimum number of word will be augmented.
:param int aug_max: Maximum number of word will be augmented. If None is passed, number of augmentation is
calculated via aup_p. If calculated result from aug_p is smaller than aug_max, will use calculated result from
aug_p. Otherwise, using aug_max.
:param list reserved_tokens: A list of swappable tokens (a list of list). For example, "FWD", "Fwd" and "FW"
are referring to "foward" in email communcation while "Sincerely" and "Best Regards" treated as same
meaning. The input should be [["FWD", "Fwd", "FW"], ["Sincerely", "Best Regards"]].
:param bool case_sensitive: Default is True. If True, it will only replace alternative token if all cases are same.
:param func tokenizer: Customize tokenization process
:param func reverse_tokenizer: Customize reverse of tokenization process
:param str name: Name of this augmenter
>>> import nlpaug.augmenter.word as naw
>>> aug = naw.ReservedAug()
"""
def __init__(
self,
reserved_tokens,
action=Action.SUBSTITUTE,
case_sensitive=True,
name="Reserved_Aug",
aug_min=1,
aug_max=10,
aug_p=0.3,
tokenizer=None,
reverse_tokenizer=None,
verbose=0,
):
super().__init__(
action=action,
name=name,
aug_p=aug_p,
aug_min=aug_min,
aug_max=aug_max,
tokenizer=tokenizer,
reverse_tokenizer=reverse_tokenizer,
device="cpu",
verbose=verbose,
include_detail=False,
)
self.reserved_tokens = reserved_tokens
self.reserved_lower_tokens = []
self.case_sensitive = case_sensitive
self.reserved_token_dict = {}
self.reserved_phrase_dict = {}
self.reserved_phrase_concats = []
self.reserved_phrase_regexs = []
if not case_sensitive:
self.reserved_lower_tokens = [
t.lower() for tokens in reserved_tokens for t in tokens
]
reserved_phrase_dict_by_len = defaultdict(list)
for i, tokens in enumerate(reserved_tokens):
for t in tokens:
if not case_sensitive:
t = t.lower()
phrase_tokens = self.tokenizer(t)
if len(phrase_tokens) == 1:
# For single word
# If duplicates word occurs, pick the last one.
self.reserved_token_dict[t] = i
else:
# For phrase
reserved_phrase_dict_by_len[len(phrase_tokens)].append(
(t, phrase_tokens, i)
)
for i in sorted(reserved_phrase_dict_by_len.keys(), reverse=True):
for phrase, phrase_tokens, pos in reserved_phrase_dict_by_len[i]:
phrase_concat_token = self.CONNECT_TOKEN.join(phrase_tokens)
phrase_token_regex = re.compile("(" + phrase + ")", re.IGNORECASE)
self.reserved_phrase_dict[phrase_concat_token] = pos
self.reserved_phrase_concats.append(phrase_concat_token)
self.reserved_phrase_regexs.append(phrase_token_regex)
def skip_aug(self, token_idxes, tokens):
# https://arxiv.org/pdf/2007.02033.pdf
results = []
for idx in token_idxes:
t = tokens[idx]
if not self.case_sensitive:
t = t.lower()
if t in self.reserved_token_dict:
# For single word
results.append(idx)
elif t in self.reserved_phrase_dict:
# For phrase
results.append(idx)
return results
def preprocess(self, data):
for reserved_concat_phrase, reserved_phrase_regex in zip(
self.reserved_phrase_concats, self.reserved_phrase_regexs
):
data = reserved_phrase_regex.sub(reserved_concat_phrase, data)
return data
def substitute(self, data):
if not data or not data.strip():
return data
change_seq = 0
data = self.preprocess(data)
doc = Doc(data, self.tokenizer(data))
aug_idxes = self._get_aug_idxes(doc.get_original_tokens())
aug_idxes.sort(reverse=True)
tokens = doc.get_original_tokens()
if aug_idxes is None or len(aug_idxes) == 0:
if self.include_detail:
return data, []
return data
for aug_idx in aug_idxes:
original_token = doc.get_token(aug_idx).orig_token.token
if not self.case_sensitive:
original_token = original_token.lower()
if original_token in self.reserved_token_dict:
candidate_tokens = []
for t in self.reserved_tokens[self.reserved_token_dict[original_token]]:
compare_token = t.lower() if not self.case_sensitive else t
if compare_token != original_token:
candidate_tokens.append(t)
elif original_token in self.reserved_phrase_concats:
candidate_tokens = []
for t in self.reserved_tokens[
self.reserved_phrase_dict[original_token]
]:
compare_token = t.replace(" ", self.CONNECT_TOKEN)
compare_token = (
compare_token.lower()
if not self.case_sensitive
else compare_token
)
if compare_token != original_token:
candidate_tokens.append(t)
new_token = self.sample(candidate_tokens, 1)[0]
if aug_idx == 0:
new_token = self.align_capitalization(original_token, new_token)
change_seq += 1
doc.add_change_log(
aug_idx,
new_token=new_token,
action=Action.SUBSTITUTE,
change_seq=self.parent_change_seq + change_seq,
)
if self.include_detail:
return (
self.reverse_tokenizer(doc.get_augmented_tokens()),
doc.get_change_logs(),
)
else:
return self.reverse_tokenizer(doc.get_augmented_tokens())
| 37.102703 | 119 | 0.589015 |
9798aa3d1eb6d35600828e0328e9cb80868ccd65 | 77,171 | bzl | Python | repos.bzl | windayski/release | 83a4dbf666313562a891d31bda68c91980581c7c | [
"Apache-2.0"
] | null | null | null | repos.bzl | windayski/release | 83a4dbf666313562a891d31bda68c91980581c7c | [
"Apache-2.0"
] | null | null | null | repos.bzl | windayski/release | 83a4dbf666313562a891d31bda68c91980581c7c | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
load("@bazel_gazelle//:deps.bzl", "go_repository")
def go_repositories():
"""update with bazel run @io_k8s_repo_infra//hack:update-deps."""
go_repository(
name = "com_github_blang_semver",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/blang/semver",
sum = "h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ=",
version = "v3.5.1+incompatible",
)
go_repository(
name = "com_github_davecgh_go_spew",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/davecgh/go-spew",
sum = "h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=",
version = "v1.1.1",
)
go_repository(
name = "com_github_golang_protobuf",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/golang/protobuf",
sum = "h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=",
version = "v1.3.2",
)
go_repository(
name = "com_github_google_go_querystring",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/google/go-querystring",
sum = "h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk=",
version = "v1.0.0",
)
go_repository(
name = "com_github_pkg_errors",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/pkg/errors",
sum = "h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=",
version = "v0.9.1",
)
go_repository(
name = "com_github_pmezard_go_difflib",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/pmezard/go-difflib",
sum = "h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=",
version = "v1.0.0",
)
go_repository(
name = "com_github_stretchr_testify",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/stretchr/testify",
sum = "h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4=",
version = "v1.5.1",
)
go_repository(
name = "org_golang_google_appengine",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "google.golang.org/appengine",
sum = "h1:j8RI1yW0SkI+paT6uGwMlrMI/6zwYA6/CFil8rxOzGI=",
version = "v1.6.2",
)
go_repository(
name = "org_golang_x_net",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "golang.org/x/net",
sum = "h1:GuSPYbZzB5/dcLNCwLQLsg3obCJtX9IJhpXkvY7kzk0=",
version = "v0.0.0-20200301022130-244492dfa37a",
)
go_repository(
name = "org_golang_x_oauth2",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "golang.org/x/oauth2",
sum = "h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw=",
version = "v0.0.0-20200107190931-bf48bf16ab8d",
)
go_repository(
name = "co_honnef_go_tools",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "honnef.co/go/tools",
sum = "h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM=",
version = "v0.0.1-2019.2.3",
)
go_repository(
name = "com_github_alcortesm_tgz",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/alcortesm/tgz",
sum = "h1:uSoVVbwJiQipAclBbw+8quDsfcvFjOpI5iCf4p/cqCs=",
version = "v0.0.0-20161220082320-9c5fe88206d7",
)
go_repository(
name = "com_github_anmitsu_go_shlex",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/anmitsu/go-shlex",
sum = "h1:kFOfPq6dUM1hTo4JG6LR5AXSUEsOjtdm0kw0FtQtMJA=",
version = "v0.0.0-20161002113705-648efa622239",
)
go_repository(
name = "com_github_armon_consul_api",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/armon/consul-api",
sum = "h1:G1bPvciwNyF7IUmKXNt9Ak3m6u9DE1rF+RmtIkBpVdA=",
version = "v0.0.0-20180202201655-eb2c6b5be1b6",
)
go_repository(
name = "com_github_armon_go_socks5",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/armon/go-socks5",
sum = "h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=",
version = "v0.0.0-20160902184237-e75332964ef5",
)
go_repository(
name = "com_github_burntsushi_toml",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/BurntSushi/toml",
sum = "h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=",
version = "v0.3.1",
)
go_repository(
name = "com_github_client9_misspell",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/client9/misspell",
sum = "h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI=",
version = "v0.3.4",
)
go_repository(
name = "com_github_coreos_etcd",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/coreos/etcd",
sum = "h1:jFneRYjIvLMLhDLCzuTuU4rSJUjRplcJQ7pD7MnhC04=",
version = "v3.3.10+incompatible",
)
go_repository(
name = "com_github_coreos_go_etcd",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/coreos/go-etcd",
sum = "h1:bXhRBIXoTm9BYHS3gE0TtQuyNZyeEMux2sDi4oo5YOo=",
version = "v2.0.0+incompatible",
)
go_repository(
name = "com_github_coreos_go_semver",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/coreos/go-semver",
sum = "h1:3Jm3tLmsgAYcjC+4Up7hJrFBPr+n7rAqYeSw/SZazuY=",
version = "v0.2.0",
)
go_repository(
name = "com_github_cpuguy83_go_md2man",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/cpuguy83/go-md2man",
sum = "h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk=",
version = "v1.0.10",
)
go_repository(
name = "com_github_creack_pty",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/creack/pty",
sum = "h1:uDmaGzcdjhF4i/plgjmEsriH11Y0o7RKapEf/LDaM3w=",
version = "v1.1.9",
)
go_repository(
name = "com_github_emirpasic_gods",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/emirpasic/gods",
sum = "h1:QAUIPSaCu4G+POclxeqb3F+WPpdKqFGlw36+yOzGlrg=",
version = "v1.12.0",
)
go_repository(
name = "com_github_flynn_go_shlex",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/flynn/go-shlex",
sum = "h1:BHsljHzVlRcyQhjrss6TZTdY2VfCqZPbv5k3iBFa2ZQ=",
version = "v0.0.0-20150515145356-3f9db97f8568",
)
go_repository(
name = "com_github_fsnotify_fsnotify",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/fsnotify/fsnotify",
sum = "h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=",
version = "v1.4.7",
)
go_repository(
name = "com_github_gliderlabs_ssh",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/gliderlabs/ssh",
sum = "h1:6zsha5zo/TWhRhwqCD3+EarCAgZ2yN28ipRnGPnwkI0=",
version = "v0.2.2",
)
go_repository(
name = "com_github_gogo_protobuf",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/gogo/protobuf",
sum = "h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE=",
version = "v1.2.1",
)
go_repository(
name = "com_github_golang_glog",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/golang/glog",
sum = "h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=",
version = "v0.0.0-20160126235308-23def4e6c14b",
)
go_repository(
name = "com_github_golang_groupcache",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/golang/groupcache",
sum = "h1:ZgQEtGgCBiWRM39fZuwSd1LwSqqSW0hOdXCYYDX0R3I=",
version = "v0.0.0-20190702054246-869f871628b6",
)
go_repository(
name = "com_github_golang_mock",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/golang/mock",
sum = "h1:qGJ6qTW+x6xX/my+8YUVl4WNpX9B7+/l2tRsHGZ7f2s=",
version = "v1.3.1",
)
go_repository(
name = "com_github_google_btree",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/google/btree",
sum = "h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo=",
version = "v1.0.0",
)
go_repository(
name = "com_github_google_go_cmp",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/google/go-cmp",
sum = "h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg=",
version = "v0.3.1",
)
go_repository(
name = "com_github_google_martian",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/google/martian",
sum = "h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no=",
version = "v2.1.0+incompatible",
)
go_repository(
name = "com_github_google_pprof",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/google/pprof",
sum = "h1:Jnx61latede7zDD3DiiP4gmNz33uK0U5HDUaF0a/HVQ=",
version = "v0.0.0-20190515194954-54271f7e092f",
)
go_repository(
name = "com_github_google_uuid",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/google/uuid",
sum = "h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=",
version = "v1.1.1",
)
go_repository(
name = "com_github_googleapis_gax_go_v2",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/googleapis/gax-go/v2",
sum = "h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM=",
version = "v2.0.5",
)
go_repository(
name = "com_github_hashicorp_golang_lru",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/hashicorp/golang-lru",
sum = "h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=",
version = "v0.5.1",
)
go_repository(
name = "com_github_hashicorp_hcl",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/hashicorp/hcl",
sum = "h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=",
version = "v1.0.0",
)
go_repository(
name = "com_github_hpcloud_tail",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/hpcloud/tail",
sum = "h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=",
version = "v1.0.0",
)
go_repository(
name = "com_github_inconshreveable_mousetrap",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/inconshreveable/mousetrap",
sum = "h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=",
version = "v1.0.0",
)
go_repository(
name = "com_github_jbenet_go_context",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/jbenet/go-context",
sum = "h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A=",
version = "v0.0.0-20150711004518-d14ea06fba99",
)
go_repository(
name = "com_github_jessevdk_go_flags",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/jessevdk/go-flags",
sum = "h1:4IU2WS7AumrZ/40jfhf4QVDMsQwqA7VEHozFRrGARJA=",
version = "v1.4.0",
)
go_repository(
name = "com_github_jstemmer_go_junit_report",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/jstemmer/go-junit-report",
sum = "h1:rBMNdlhTLzJjJSDIjNEXX1Pz3Hmwmz91v+zycvx9PJc=",
version = "v0.0.0-20190106144839-af01ea7f8024",
)
go_repository(
name = "com_github_kevinburke_ssh_config",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/kevinburke/ssh_config",
sum = "h1:Coekwdh0v2wtGp9Gmz1Ze3eVRAWJMLokvN3QjdzCHLY=",
version = "v0.0.0-20190725054713-01f96b0aa0cd",
)
go_repository(
name = "com_github_kisielk_errcheck",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/kisielk/errcheck",
sum = "h1:ZqfnKyx9KGpRcW04j5nnPDgRgoXUeLh2YFBeFzphcA0=",
version = "v1.1.0",
)
go_repository(
name = "com_github_kisielk_gotool",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/kisielk/gotool",
sum = "h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg=",
version = "v1.0.0",
)
go_repository(
name = "com_github_konsorten_go_windows_terminal_sequences",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/konsorten/go-windows-terminal-sequences",
sum = "h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8=",
version = "v1.0.3",
)
go_repository(
name = "com_github_kr_pretty",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/kr/pretty",
sum = "h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=",
version = "v0.1.0",
)
go_repository(
name = "com_github_kr_pty",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/kr/pty",
sum = "h1:AkaSdXYQOWeaO3neb8EM634ahkXXe3jYbVh/F9lq+GI=",
version = "v1.1.8",
)
go_repository(
name = "com_github_kr_text",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/kr/text",
sum = "h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=",
version = "v0.2.0",
)
go_repository(
name = "com_github_magiconair_properties",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/magiconair/properties",
sum = "h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4=",
version = "v1.8.1",
)
go_repository(
name = "com_github_mattn_go_runewidth",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/mattn/go-runewidth",
sum = "h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0=",
version = "v0.0.9",
)
go_repository(
name = "com_github_mitchellh_go_homedir",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/mitchellh/go-homedir",
sum = "h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=",
version = "v1.1.0",
)
go_repository(
name = "com_github_mitchellh_mapstructure",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/mitchellh/mapstructure",
sum = "h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=",
version = "v1.1.2",
)
go_repository(
name = "com_github_olekukonko_tablewriter",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/olekukonko/tablewriter",
sum = "h1:vHD/YYe1Wolo78koG299f7V/VAS08c6IpCLn+Ejf/w8=",
version = "v0.0.4",
)
go_repository(
name = "com_github_onsi_ginkgo",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/onsi/ginkgo",
sum = "h1:Iw5WCbBcaAAd0fpRb1c9r5YCylv4XDoCSigm1zLevwU=",
version = "v1.12.0",
)
go_repository(
name = "com_github_onsi_gomega",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/onsi/gomega",
sum = "h1:Gwkk+PTu/nfOwNMtUB/mRUv0X7ewW5dO4AERT1ThVKo=",
version = "v1.10.0",
)
go_repository(
name = "com_github_pelletier_go_toml",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/pelletier/go-toml",
sum = "h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc=",
version = "v1.2.0",
)
go_repository(
name = "com_github_psampaz_go_mod_outdated",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/psampaz/go-mod-outdated",
sum = "h1:DXS6rdsz4rpezbPsckQflqrYSEBvsF5GAmUWP+UvnQo=",
version = "v0.6.0",
)
go_repository(
name = "com_github_russross_blackfriday",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/russross/blackfriday",
sum = "h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo=",
version = "v1.5.2",
)
go_repository(
name = "com_github_sergi_go_diff",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/sergi/go-diff",
sum = "h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0=",
version = "v1.1.0",
)
go_repository(
name = "com_github_sirupsen_logrus",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/sirupsen/logrus",
sum = "h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I=",
version = "v1.6.0",
)
go_repository(
name = "com_github_spf13_afero",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/spf13/afero",
sum = "h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc=",
version = "v1.2.2",
)
go_repository(
name = "com_github_spf13_cast",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/spf13/cast",
sum = "h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8=",
version = "v1.3.0",
)
go_repository(
name = "com_github_spf13_cobra",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/spf13/cobra",
sum = "h1:6m/oheQuQ13N9ks4hubMG6BnvwOeaJrqSPLahSnczz8=",
version = "v1.0.0",
)
go_repository(
name = "com_github_spf13_jwalterweatherman",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/spf13/jwalterweatherman",
sum = "h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk=",
version = "v1.0.0",
)
go_repository(
name = "com_github_spf13_pflag",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/spf13/pflag",
sum = "h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=",
version = "v1.0.5",
)
go_repository(
name = "com_github_spf13_viper",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/spf13/viper",
sum = "h1:VPZzIkznI1YhVMRi6vNFLHSwhnhReBfgTxIPccpfdZk=",
version = "v1.6.1",
)
go_repository(
name = "com_github_stretchr_objx",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/stretchr/objx",
sum = "h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48=",
version = "v0.2.0",
)
go_repository(
name = "com_github_ugorji_go_codec",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/ugorji/go/codec",
sum = "h1:3SVOIvH7Ae1KRYyQWRjXWJEA9sS/c/pjvH++55Gr648=",
version = "v0.0.0-20181204163529-d75b2dcb6bc8",
)
go_repository(
name = "com_github_xanzy_ssh_agent",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/xanzy/ssh-agent",
sum = "h1:TCbipTQL2JiiCprBWx9frJ2eJlCYT00NmctrHxVAr70=",
version = "v0.2.1",
)
go_repository(
name = "com_github_xlab_handysort",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/xlab/handysort",
sum = "h1:j2hhcujLRHAg872RWAV5yaUrEjHEObwDv3aImCaNLek=",
version = "v0.0.0-20150421192137-fb3537ed64a1",
)
go_repository(
name = "com_github_xordataexchange_crypt",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/xordataexchange/crypt",
sum = "h1:ESFSdwYZvkeru3RtdrYueztKhOBCSAAzS4Gf+k0tEow=",
version = "v0.0.3-0.20170626215501-b2862e3d0a77",
)
go_repository(
name = "com_google_cloud_go",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "cloud.google.com/go",
sum = "h1:0sMegbmn/8uTwpNkB0q9cLEpZ2W5a6kl+wtBQgPWBJQ=",
version = "v0.44.3",
)
go_repository(
name = "in_gopkg_check_v1",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "gopkg.in/check.v1",
sum = "h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU=",
version = "v1.0.0-20200227125254-8fa46927fb4f",
)
go_repository(
name = "in_gopkg_fsnotify_v1",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "gopkg.in/fsnotify.v1",
sum = "h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=",
version = "v1.4.7",
)
go_repository(
name = "in_gopkg_tomb_v1",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "gopkg.in/tomb.v1",
sum = "h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=",
version = "v1.0.0-20141024135613-dd632973f1e7",
)
go_repository(
name = "in_gopkg_warnings_v0",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "gopkg.in/warnings.v0",
sum = "h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME=",
version = "v0.1.2",
)
go_repository(
name = "in_gopkg_yaml_v2",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "gopkg.in/yaml.v2",
sum = "h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=",
version = "v2.2.8",
)
go_repository(
name = "io_k8s_klog",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "k8s.io/klog",
sum = "h1:0VPpR+sizsiivjIfIAQH/rl8tan6jvWkS7lU+0di3lE=",
version = "v0.3.0",
)
go_repository(
name = "io_k8s_sigs_yaml",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "sigs.k8s.io/yaml",
sum = "h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=",
version = "v1.2.0",
)
go_repository(
name = "io_opencensus_go",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "go.opencensus.io",
sum = "h1:8dP3SGL7MPB94crU3bEPplMPe83FI4EouesJUeFHv50=",
version = "v0.22.1",
)
go_repository(
name = "ml_vbom_util",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "vbom.ml/util",
sum = "h1:O69FD9pJA4WUZlEwYatBEEkRWKQ5cKodWpdKTrCS/iQ=",
version = "v0.0.0-20180919145318-efcd4e0f9787",
)
go_repository(
name = "org_golang_google_api",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "google.golang.org/api",
sum = "h1:zS+Q/CJJnVlXpXQVIz+lH0ZT2lBuT2ac7XD8Y/3w6hY=",
version = "v0.21.0",
)
go_repository(
name = "org_golang_google_genproto",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "google.golang.org/genproto",
sum = "h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE=",
version = "v0.0.0-20190819201941-24fa4b261c55",
)
go_repository(
name = "org_golang_google_grpc",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "google.golang.org/grpc",
sum = "h1:rRYRFMVgRv6E0D70Skyfsr28tDXIuuPZyWGMPdMcnXg=",
version = "v1.27.0",
)
go_repository(
name = "org_golang_x_crypto",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "golang.org/x/crypto",
sum = "h1:xMPOj6Pz6UipU1wXLkrtqpHbR0AVFnyPEQq/wRWz9lM=",
version = "v0.0.0-20200302210943-78000ba7a073",
)
go_repository(
name = "org_golang_x_exp",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "golang.org/x/exp",
sum = "h1:OeRHuibLsmZkFj773W4LcfAGsSxJgfPONhr8cmO+eLA=",
version = "v0.0.0-20190510132918-efd6b22b2522",
)
go_repository(
name = "org_golang_x_lint",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "golang.org/x/lint",
sum = "h1:QzoH/1pFpZguR8NrRHLcO6jKqfv2zpuSqZLgdm7ZmjI=",
version = "v0.0.0-20190409202823-959b441ac422",
)
go_repository(
name = "org_golang_x_sync",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "golang.org/x/sync",
sum = "h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU=",
version = "v0.0.0-20190423024810-112230192c58",
)
go_repository(
name = "org_golang_x_sys",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "golang.org/x/sys",
sum = "h1:uYVVQ9WP/Ds2ROhcaGPeIdVq0RIXVLwsHlnvJ+cT1So=",
version = "v0.0.0-20200302150141-5c8b2ff67527",
)
go_repository(
name = "org_golang_x_text",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "golang.org/x/text",
sum = "h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=",
version = "v0.3.2",
)
go_repository(
name = "org_golang_x_time",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "golang.org/x/time",
sum = "h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=",
version = "v0.0.0-20190308202827-9d24e82272b4",
)
go_repository(
name = "org_golang_x_tools",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "golang.org/x/tools",
sum = "h1:FD7jysxM+EJqg5UYYy3XYDsAiUickFsn4UiaanJkf8c=",
version = "v0.0.0-20200301222351-066e0c02454c",
)
go_repository(
name = "org_golang_x_mod",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "golang.org/x/mod",
sum = "h1:WG0RUwxtNT4qqaXX3DPA8zHFNm/D9xaBpxzHt1WcA/E=",
version = "v0.1.1-0.20191105210325-c90efee705ee",
)
go_repository(
name = "org_golang_x_xerrors",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "golang.org/x/xerrors",
sum = "h1:/atklqdjdhuosWIl6AIbOeHJjicWYPqR9bpxqxYG2pA=",
version = "v0.0.0-20191011141410-1b5146add898",
)
go_repository(
name = "com_github_joefitzgerald_rainbow_reporter",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/joefitzgerald/rainbow-reporter",
sum = "h1:AuMG652zjdzI0YCCnXAqATtRBpGXMcAnrajcaTrSeuo=",
version = "v0.1.0",
)
go_repository(
name = "com_github_maxbrunsfeld_counterfeiter_v6",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/maxbrunsfeld/counterfeiter/v6",
sum = "h1:z1lXirM9f9WTcdmzSZahKh/t+LCqPiiwK2/DB1kLlI4=",
version = "v6.2.3",
)
go_repository(
name = "com_github_sclevine_spec",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/sclevine/spec",
sum = "h1:z/Q9idDcay5m5irkZ28M7PtQM4aOISzOpj4bUPkDee8=",
version = "v1.4.0",
)
go_repository(
name = "com_github_nozzle_throttler",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/nozzle/throttler",
sum = "h1:Up6+btDp321ZG5/zdSLo48H9Iaq0UQGthrhWC6pCxzE=",
version = "v0.0.0-20180817012639-2ea982251481",
)
go_repository(
name = "com_github_burntsushi_xgb",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/BurntSushi/xgb",
sum = "h1:1BDTz0u9nC3//pOCMdNH+CiXJVYJh5UQNCOBG7jbELc=",
version = "v0.0.0-20160522181843-27f122750802",
)
go_repository(
name = "com_github_googlecloudplatform_testgrid",
build_file_generation = "off",
build_file_proto_mode = "package",
importpath = "github.com/GoogleCloudPlatform/testgrid",
sum = "h1:UBYR7CmCLwOoWJMHXVlDMurCTzZLKww/WslxsbAvFX4=",
version = "v0.0.10",
)
go_repository(
name = "com_google_cloud_go_datastore",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "cloud.google.com/go/datastore",
sum = "h1:Kt+gOPPp2LEPWp8CSfxhsM8ik9CcyE/gYu+0r+RnZvM=",
version = "v1.0.0",
)
go_repository(
name = "io_rsc_binaryregexp",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "rsc.io/binaryregexp",
sum = "h1:HfqmD5MEmC0zvwBuF187nq9mdnXjXsSivRiXN7SmRkE=",
version = "v0.2.0",
)
go_repository(
name = "org_golang_x_image",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "golang.org/x/image",
sum = "h1:KYGJGHOQy8oSi1fDlSpcZF0+juKwk/hEMv5SiwHogR0=",
version = "v0.0.0-20190227222117-0694c2d4d067",
)
go_repository(
name = "org_golang_x_mobile",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "golang.org/x/mobile",
sum = "h1:Tus/Y4w3V77xDsGwKUC8a/QrV7jScpU557J77lFffNs=",
version = "v0.0.0-20190312151609-d3739f865fa6",
)
go_repository(
name = "com_github_shurcool_sanitized_anchor_name",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/shurcooL/sanitized_anchor_name",
sum = "h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=",
version = "v1.0.0",
)
go_repository(
name = "cc_mvdan_interfacer",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "mvdan.cc/interfacer",
sum = "h1:WX1yoOaKQfddO/mLzdV4wptyWgoH/6hwLs7QHTixo0I=",
version = "v0.0.0-20180901003855-c20040233aed",
)
go_repository(
name = "cc_mvdan_lint",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "mvdan.cc/lint",
sum = "h1:DxJ5nJdkhDlLok9K6qO+5290kphDJbHOQO1DFFFTeBo=",
version = "v0.0.0-20170908181259-adc824a0674b",
)
go_repository(
name = "cc_mvdan_unparam",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "mvdan.cc/unparam",
sum = "h1:Cq7MalBHYACRd6EesksG1Q8EoIAKOsiZviGKbOLIej4=",
version = "v0.0.0-20190720180237-d51796306d8f",
)
go_repository(
name = "com_github_alecthomas_template",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/alecthomas/template",
sum = "h1:cAKDfWh5VpdgMhJosfJnn5/FoN2SRZ4p7fJNX58YPaU=",
version = "v0.0.0-20160405071501-a0175ee3bccc",
)
go_repository(
name = "com_github_alecthomas_units",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/alecthomas/units",
sum = "h1:qet1QNfXsQxTZqLG4oE62mJzwPIB8+Tee4RNCL9ulrY=",
version = "v0.0.0-20151022065526-2efee857e7cf",
)
go_repository(
name = "com_github_beorn7_perks",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/beorn7/perks",
sum = "h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0=",
version = "v1.0.0",
)
go_repository(
name = "com_github_bombsimon_wsl_v2",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/bombsimon/wsl/v2",
sum = "h1:+Vjcn+/T5lSrO8Bjzhk4v14Un/2UyCA1E3V5j9nwTkQ=",
version = "v2.0.0",
)
go_repository(
name = "com_github_cespare_xxhash",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/cespare/xxhash",
sum = "h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=",
version = "v1.1.0",
)
go_repository(
name = "com_github_coreos_bbolt",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/coreos/bbolt",
sum = "h1:wZwiHHUieZCquLkDL0B8UhzreNWsPHooDAG3q34zk0s=",
version = "v1.3.2",
)
go_repository(
name = "com_github_coreos_go_systemd",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/coreos/go-systemd",
sum = "h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8=",
version = "v0.0.0-20190321100706-95778dfbb74e",
)
go_repository(
name = "com_github_coreos_pkg",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/coreos/pkg",
sum = "h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg=",
version = "v0.0.0-20180928190104-399ea9e2e55f",
)
go_repository(
name = "com_github_dgrijalva_jwt_go",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/dgrijalva/jwt-go",
sum = "h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=",
version = "v3.2.0+incompatible",
)
go_repository(
name = "com_github_dgryski_go_sip13",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/dgryski/go-sip13",
sum = "h1:RMLoZVzv4GliuWafOuPuQDKSm1SJph7uCRnnS61JAn4=",
version = "v0.0.0-20181026042036-e10d5fee7954",
)
go_repository(
name = "com_github_fatih_color",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/fatih/color",
sum = "h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s=",
version = "v1.9.0",
)
go_repository(
name = "com_github_ghodss_yaml",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/ghodss/yaml",
sum = "h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=",
version = "v1.0.0",
)
go_repository(
name = "com_github_go_critic_go_critic",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/go-critic/go-critic",
sum = "h1:4DTQfT1wWwLg/hzxwD9bkdhDQrdJtxe6DUTadPlrIeE=",
version = "v0.4.1",
)
go_repository(
name = "com_github_go_kit_kit",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/go-kit/kit",
sum = "h1:Wz+5lgoB0kkuqLEc6NVmwRknTKP6dTGbSqvhZtBI/j0=",
version = "v0.8.0",
)
go_repository(
name = "com_github_go_lintpack_lintpack",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/go-lintpack/lintpack",
sum = "h1:DI5mA3+eKdWeJ40nU4d6Wc26qmdG8RCi/btYq0TuRN0=",
version = "v0.5.2",
)
go_repository(
name = "com_github_go_logfmt_logfmt",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/go-logfmt/logfmt",
sum = "h1:MP4Eh7ZCb31lleYCFuwm0oe4/YGak+5l1vA2NOE80nA=",
version = "v0.4.0",
)
go_repository(
name = "com_github_go_ole_go_ole",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/go-ole/go-ole",
sum = "h1:2lOsA72HgjxAuMlKpFiCbHTvu44PIVkZ5hqm3RSdI/E=",
version = "v1.2.1",
)
go_repository(
name = "com_github_go_sql_driver_mysql",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/go-sql-driver/mysql",
sum = "h1:7LxgVwFb2hIQtMm87NdgAVfXjnt4OePseqT1tKx+opk=",
version = "v1.4.0",
)
go_repository(
name = "com_github_go_stack_stack",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/go-stack/stack",
sum = "h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=",
version = "v1.8.0",
)
go_repository(
name = "com_github_go_toolsmith_astcast",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/go-toolsmith/astcast",
sum = "h1:JojxlmI6STnFVG9yOImLeGREv8W2ocNUM+iOhR6jE7g=",
version = "v1.0.0",
)
go_repository(
name = "com_github_go_toolsmith_astcopy",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/go-toolsmith/astcopy",
sum = "h1:OMgl1b1MEpjFQ1m5ztEO06rz5CUd3oBv9RF7+DyvdG8=",
version = "v1.0.0",
)
go_repository(
name = "com_github_go_toolsmith_astequal",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/go-toolsmith/astequal",
sum = "h1:4zxD8j3JRFNyLN46lodQuqz3xdKSrur7U/sr0SDS/gQ=",
version = "v1.0.0",
)
go_repository(
name = "com_github_go_toolsmith_astfmt",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/go-toolsmith/astfmt",
sum = "h1:A0vDDXt+vsvLEdbMFJAUBI/uTbRw1ffOPnxsILnFL6k=",
version = "v1.0.0",
)
go_repository(
name = "com_github_go_toolsmith_astinfo",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/go-toolsmith/astinfo",
sum = "h1:wP6mXeB2V/d1P1K7bZ5vDUO3YqEzcvOREOxZPEu3gVI=",
version = "v0.0.0-20180906194353-9809ff7efb21",
)
go_repository(
name = "com_github_go_toolsmith_astp",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/go-toolsmith/astp",
sum = "h1:alXE75TXgcmupDsMK1fRAy0YUzLzqPVvBKoyWV+KPXg=",
version = "v1.0.0",
)
go_repository(
name = "com_github_go_toolsmith_pkgload",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/go-toolsmith/pkgload",
sum = "h1:4DFWWMXVfbcN5So1sBNW9+yeiMqLFGl1wFLTL5R0Tgg=",
version = "v1.0.0",
)
go_repository(
name = "com_github_go_toolsmith_strparse",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/go-toolsmith/strparse",
sum = "h1:Vcw78DnpCAKlM20kSbAyO4mPfJn/lyYA4BJUDxe2Jb4=",
version = "v1.0.0",
)
go_repository(
name = "com_github_go_toolsmith_typep",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/go-toolsmith/typep",
sum = "h1:zKymWyA1TRYvqYrYDrfEMZULyrhcnGY3x7LDKU2XQaA=",
version = "v1.0.0",
)
go_repository(
name = "com_github_gobwas_glob",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/gobwas/glob",
sum = "h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=",
version = "v0.2.3",
)
go_repository(
name = "com_github_gofrs_flock",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/gofrs/flock",
sum = "h1:ekuhfTjngPhisSjOJ0QWKpPQE8/rbknHaes6WVJj5Hw=",
version = "v0.0.0-20190320160742-5135e617513b",
)
go_repository(
name = "com_github_golangci_check",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/golangci/check",
sum = "h1:23T5iq8rbUYlhpt5DB4XJkc6BU31uODLD1o1gKvZmD0=",
version = "v0.0.0-20180506172741-cfe4005ccda2",
)
go_repository(
name = "com_github_golangci_dupl",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/golangci/dupl",
sum = "h1:w8hkcTqaFpzKqonE9uMCefW1WDie15eSP/4MssdenaM=",
version = "v0.0.0-20180902072040-3e9179ac440a",
)
go_repository(
name = "com_github_golangci_errcheck",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/golangci/errcheck",
sum = "h1:YYWNAGTKWhKpcLLt7aSj/odlKrSrelQwlovBpDuf19w=",
version = "v0.0.0-20181223084120-ef45e06d44b6",
)
go_repository(
name = "com_github_golangci_go_misc",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/golangci/go-misc",
sum = "h1:9kfjN3AdxcbsZBf8NjltjWihK2QfBBBZuv91cMFfDHw=",
version = "v0.0.0-20180628070357-927a3d87b613",
)
go_repository(
name = "com_github_golangci_goconst",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/golangci/goconst",
sum = "h1:pe9JHs3cHHDQgOFXJJdYkK6fLz2PWyYtP4hthoCMvs8=",
version = "v0.0.0-20180610141641-041c5f2b40f3",
)
go_repository(
name = "com_github_golangci_gocyclo",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/golangci/gocyclo",
sum = "h1:J2XAy40+7yz70uaOiMbNnluTg7gyQhtGqLQncQh+4J8=",
version = "v0.0.0-20180528134321-2becd97e67ee",
)
go_repository(
name = "com_github_golangci_gofmt",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/golangci/gofmt",
sum = "h1:iR3fYXUjHCR97qWS8ch1y9zPNsgXThGwjKPrYfqMPks=",
version = "v0.0.0-20190930125516-244bba706f1a",
)
go_repository(
name = "com_github_golangci_golangci_lint",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/golangci/golangci-lint",
replace = "github.com/golangci/golangci-lint",
sum = "h1:wkACDEoy+b0CVqnSK8BbWrVkN2tsVLUA1+SIkGSm4o0=",
version = "v1.23.3",
)
go_repository(
name = "com_github_golangci_ineffassign",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/golangci/ineffassign",
sum = "h1:gLLhTLMk2/SutryVJ6D4VZCU3CUqr8YloG7FPIBWFpI=",
version = "v0.0.0-20190609212857-42439a7714cc",
)
go_repository(
name = "com_github_golangci_lint_1",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/golangci/lint-1",
sum = "h1:MfyDlzVjl1hoaPzPD4Gpb/QgoRfSBR0jdhwGyAWwMSA=",
version = "v0.0.0-20191013205115-297bf364a8e0",
)
go_repository(
name = "com_github_golangci_maligned",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/golangci/maligned",
sum = "h1:kNY3/svz5T29MYHubXix4aDDuE3RWHkPvopM/EDv/MA=",
version = "v0.0.0-20180506175553-b1d89398deca",
)
go_repository(
name = "com_github_golangci_misspell",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/golangci/misspell",
sum = "h1:EL/O5HGrF7Jaq0yNhBLucz9hTuRzj2LdwGBOaENgxIk=",
version = "v0.0.0-20180809174111-950f5d19e770",
)
go_repository(
name = "com_github_golangci_prealloc",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/golangci/prealloc",
sum = "h1:leSNB7iYzLYSSx3J/s5sVf4Drkc68W2wm4Ixh/mr0us=",
version = "v0.0.0-20180630174525-215b22d4de21",
)
go_repository(
name = "com_github_golangci_revgrep",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/golangci/revgrep",
sum = "h1:HVfrLniijszjS1aiNg8JbBMO2+E1WIQ+j/gL4SQqGPg=",
version = "v0.0.0-20180526074752-d9c87f5ffaf0",
)
go_repository(
name = "com_github_golangci_unconvert",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/golangci/unconvert",
sum = "h1:zwtduBRr5SSWhqsYNgcuWO2kFlpdOZbP0+yRjmvPGys=",
version = "v0.0.0-20180507085042-28b1c447d1f4",
)
go_repository(
name = "com_github_google_renameio",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/google/renameio",
sum = "h1:GOZbcHa3HfsPKPlmyPyN2KEohoMXOhdMbHrvbpl2QaA=",
version = "v0.1.0",
)
go_repository(
name = "com_github_gopherjs_gopherjs",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/gopherjs/gopherjs",
sum = "h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=",
version = "v0.0.0-20181017120253-0766667cb4d1",
)
go_repository(
name = "com_github_gorilla_websocket",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/gorilla/websocket",
sum = "h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q=",
version = "v1.4.0",
)
go_repository(
name = "com_github_gostaticanalysis_analysisutil",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/gostaticanalysis/analysisutil",
sum = "h1:JVnpOZS+qxli+rgVl98ILOXVNbW+kb5wcxeGx8ShUIw=",
version = "v0.0.0-20190318220348-4088753ea4d3",
)
go_repository(
name = "com_github_grpc_ecosystem_go_grpc_middleware",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/grpc-ecosystem/go-grpc-middleware",
sum = "h1:Iju5GlWwrvL6UBg4zJJt3btmonfrMlCDdsejg4CZE7c=",
version = "v1.0.0",
)
go_repository(
name = "com_github_grpc_ecosystem_go_grpc_prometheus",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/grpc-ecosystem/go-grpc-prometheus",
sum = "h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho=",
version = "v1.2.0",
)
go_repository(
name = "com_github_grpc_ecosystem_grpc_gateway",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/grpc-ecosystem/grpc-gateway",
sum = "h1:bM6ZAFZmc/wPFaRDi0d5L7hGEZEx/2u+Tmr2evNHDiI=",
version = "v1.9.0",
)
go_repository(
name = "com_github_jingyugao_rowserrcheck",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/jingyugao/rowserrcheck",
sum = "h1:GmsqmapfzSJkm28dhRoHz2tLRbJmqhU86IPgBtN3mmk=",
version = "v0.0.0-20191204022205-72ab7603b68a",
)
go_repository(
name = "com_github_jirfag_go_printf_func_name",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/jirfag/go-printf-func-name",
sum = "h1:jNYPNLe3d8smommaoQlK7LOA5ESyUJJ+Wf79ZtA7Vp4=",
version = "v0.0.0-20191110105641-45db9963cdd3",
)
go_repository(
name = "com_github_jmoiron_sqlx",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/jmoiron/sqlx",
sum = "h1:lrdPtrORjGv1HbbEvKWDUAy97mPpFm4B8hp77tcCUJY=",
version = "v1.2.1-0.20190826204134-d7d95172beb5",
)
go_repository(
name = "com_github_jonboulle_clockwork",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/jonboulle/clockwork",
sum = "h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo=",
version = "v0.1.0",
)
go_repository(
name = "com_github_jtolds_gls",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/jtolds/gls",
sum = "h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=",
version = "v4.20.0+incompatible",
)
go_repository(
name = "com_github_julienschmidt_httprouter",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/julienschmidt/httprouter",
sum = "h1:TDTW5Yz1mjftljbcKqRcrYhd4XeOoI98t+9HbQbYf7g=",
version = "v1.2.0",
)
go_repository(
name = "com_github_klauspost_compress",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/klauspost/compress",
sum = "h1:8VMb5+0wMgdBykOV96DwNwKFQ+WTI4pzYURP99CcB9E=",
version = "v1.4.1",
)
go_repository(
name = "com_github_klauspost_cpuid",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/klauspost/cpuid",
sum = "h1:NMpwD2G9JSFOE1/TJjGSo5zG7Yb2bTe7eq1jH+irmeE=",
version = "v1.2.0",
)
go_repository(
name = "com_github_kr_logfmt",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/kr/logfmt",
sum = "h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY=",
version = "v0.0.0-20140226030751-b84e30acd515",
)
go_repository(
name = "com_github_lib_pq",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/lib/pq",
sum = "h1:LXpIM/LZ5xGFhOpXAQUIMM1HdyqzVYM13zNdjCEEcA0=",
version = "v1.2.0",
)
go_repository(
name = "com_github_logrusorgru_aurora",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/logrusorgru/aurora",
sum = "h1:9MlwzLdW7QSDrhDjFlsEYmxpFyIoXmYRon3dt0io31k=",
version = "v0.0.0-20181002194514-a7b3b318ed4e",
)
go_repository(
name = "com_github_matoous_godox",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/matoous/godox",
sum = "h1:RHba4YImhrUVQDHUCe2BNSOz4tVy2yGyXhvYDvxGgeE=",
version = "v0.0.0-20190911065817-5d6d842e92eb",
)
go_repository(
name = "com_github_mattn_go_colorable",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/mattn/go-colorable",
sum = "h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA=",
version = "v0.1.4",
)
go_repository(
name = "com_github_mattn_go_isatty",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/mattn/go-isatty",
sum = "h1:FxPOTFNqGkuDUGi3H/qkUbQO4ZiBa2brKq5r0l8TGeM=",
version = "v0.0.11",
)
go_repository(
name = "com_github_mattn_go_sqlite3",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/mattn/go-sqlite3",
sum = "h1:pDRiWfl+++eC2FEFRy6jXmQlvp4Yh3z1MJKg4UeYM/4=",
version = "v1.9.0",
)
go_repository(
name = "com_github_mattn_goveralls",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/mattn/goveralls",
sum = "h1:7eJB6EqsPhRVxvwEXGnqdO2sJI0PTsrWoTMXEk9/OQc=",
version = "v0.0.2",
)
go_repository(
name = "com_github_matttproud_golang_protobuf_extensions",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/matttproud/golang_protobuf_extensions",
sum = "h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=",
version = "v1.0.1",
)
go_repository(
name = "com_github_mitchellh_go_ps",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/mitchellh/go-ps",
sum = "h1:9+ke9YJ9KGWw5ANXK6ozjoK47uI3uNbXv4YVINBnGm8=",
version = "v0.0.0-20190716172923-621e5597135b",
)
go_repository(
name = "com_github_mozilla_tls_observatory",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/mozilla/tls-observatory",
sum = "h1:Av0AX0PnAlPZ3AY2rQUobGFaZfE4KHVRdKWIEPvsCWY=",
version = "v0.0.0-20190404164649-a3c1b6cfecfd",
)
go_repository(
name = "com_github_mwitkow_go_conntrack",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/mwitkow/go-conntrack",
sum = "h1:F9x/1yl3T2AeKLr2AMdilSD8+f9bvMnNN8VS5iDtovc=",
version = "v0.0.0-20161129095857-cc309e4a2223",
)
go_repository(
name = "com_github_nbutton23_zxcvbn_go",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/nbutton23/zxcvbn-go",
sum = "h1:AREM5mwr4u1ORQBMvzfzBgpsctsbQikCVpvC+tX285E=",
version = "v0.0.0-20180912185939-ae427f1e4c1d",
)
go_repository(
name = "com_github_oklog_ulid",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/oklog/ulid",
sum = "h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4=",
version = "v1.3.1",
)
go_repository(
name = "com_github_oneofone_xxhash",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/OneOfOne/xxhash",
sum = "h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE=",
version = "v1.2.2",
)
go_repository(
name = "com_github_openpeedeep_depguard",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/OpenPeeDeeP/depguard",
sum = "h1:VlW4R6jmBIv3/u1JNlawEvJMM4J+dPORPaZasQee8Us=",
version = "v1.0.1",
)
go_repository(
name = "com_github_pborman_uuid",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/pborman/uuid",
sum = "h1:J7Q5mO4ysT1dv8hyrUGHb9+ooztCXu1D8MY8DZYsu3g=",
version = "v1.2.0",
)
go_repository(
name = "com_github_prometheus_client_golang",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/prometheus/client_golang",
sum = "h1:9iH4JKXLzFbOAdtqv/a+j8aewx2Y8lAjAydhbaScPF8=",
version = "v0.9.3",
)
go_repository(
name = "com_github_prometheus_client_model",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/prometheus/client_model",
sum = "h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM=",
version = "v0.0.0-20190812154241-14fe0d1b01d4",
)
go_repository(
name = "com_github_prometheus_common",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/prometheus/common",
sum = "h1:7etb9YClo3a6HjLzfl6rIQaU+FDfi0VSX39io3aQ+DM=",
version = "v0.4.0",
)
go_repository(
name = "com_github_prometheus_procfs",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/prometheus/procfs",
sum = "h1:sofwID9zm4tzrgykg80hfFph1mryUeLRsUfoocVVmRY=",
version = "v0.0.0-20190507164030-5867b95ac084",
)
go_repository(
name = "com_github_prometheus_tsdb",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/prometheus/tsdb",
sum = "h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA=",
version = "v0.7.1",
)
go_repository(
name = "com_github_quasilyte_go_consistent",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/quasilyte/go-consistent",
sum = "h1:JoUA0uz9U0FVFq5p4LjEq4C0VgQ0El320s3Ms0V4eww=",
version = "v0.0.0-20190521200055-c6f3937de18c",
)
go_repository(
name = "com_github_rogpeppe_fastuuid",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/rogpeppe/fastuuid",
sum = "h1:gu+uRPtBe88sKxUCEXRoeCvVG90TJmwhiqRpvdhQFng=",
version = "v0.0.0-20150106093220-6724a57986af",
)
go_repository(
name = "com_github_rogpeppe_go_internal",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/rogpeppe/go-internal",
sum = "h1:RR9dF3JtopPvtkroDZuVD7qquD0bnHlKSqaQhgwt8yk=",
version = "v1.3.0",
)
go_repository(
name = "com_github_securego_gosec",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/securego/gosec",
sum = "h1:AtnWoOvTioyDXFvu96MWEeE8qj4COSQnJogzLy/u41A=",
version = "v0.0.0-20200103095621-79fbf3af8d83",
)
go_repository(
name = "com_github_shirou_gopsutil",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/shirou/gopsutil",
sum = "h1:WokF3GuxBeL+n4Lk4Fa8v9mbdjlrl7bHuneF4N1bk2I=",
version = "v0.0.0-20190901111213-e4ec7b275ada",
)
go_repository(
name = "com_github_shirou_w32",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/shirou/w32",
sum = "h1:udFKJ0aHUL60LboW/A+DfgoHVedieIzIXE8uylPue0U=",
version = "v0.0.0-20160930032740-bb4de0191aa4",
)
go_repository(
name = "com_github_shurcool_go",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/shurcooL/go",
sum = "h1:MZM7FHLqUHYI0Y/mQAt3d2aYa0SiNms/hFqC9qJYolM=",
version = "v0.0.0-20180423040247-9e1955d9fb6e",
)
go_repository(
name = "com_github_shurcool_go_goon",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/shurcooL/go-goon",
sum = "h1:llrF3Fs4018ePo4+G/HV/uQUqEI1HMDjCeOf2V6puPc=",
version = "v0.0.0-20170922171312-37c2f522c041",
)
go_repository(
name = "com_github_smartystreets_assertions",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/smartystreets/assertions",
sum = "h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM=",
version = "v0.0.0-20180927180507-b2de0cb4f26d",
)
go_repository(
name = "com_github_smartystreets_goconvey",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/smartystreets/goconvey",
sum = "h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s=",
version = "v1.6.4",
)
go_repository(
name = "com_github_soheilhy_cmux",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/soheilhy/cmux",
sum = "h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E=",
version = "v0.1.4",
)
go_repository(
name = "com_github_sourcegraph_go_diff",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/sourcegraph/go-diff",
sum = "h1:gO6i5zugwzo1RVTvgvfwCOSVegNuvnNi6bAD1QCmkHs=",
version = "v0.5.1",
)
go_repository(
name = "com_github_spaolacci_murmur3",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/spaolacci/murmur3",
sum = "h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ=",
version = "v0.0.0-20180118202830-f09979ecbc72",
)
go_repository(
name = "com_github_stackexchange_wmi",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/StackExchange/wmi",
sum = "h1:fLjPD/aNc3UIOA6tDi6QXUemppXK3P9BI7mr2hd6gx8=",
version = "v0.0.0-20180116203802-5d049714c4a6",
)
go_repository(
name = "com_github_subosito_gotenv",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/subosito/gotenv",
sum = "h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s=",
version = "v1.2.0",
)
go_repository(
name = "com_github_timakin_bodyclose",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/timakin/bodyclose",
sum = "h1:RumXZ56IrCj4CL+g1b9OL/oH0QnsF976bC8xQFYUD5Q=",
version = "v0.0.0-20190930140734-f7f2e9bca95e",
)
go_repository(
name = "com_github_tmc_grpc_websocket_proxy",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/tmc/grpc-websocket-proxy",
sum = "h1:LnC5Kc/wtumK+WB441p7ynQJzVuNRJiqddSIE3IlSEQ=",
version = "v0.0.0-20190109142713-0ad062ec5ee5",
)
go_repository(
name = "com_github_tommy_muehle_go_mnd",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/tommy-muehle/go-mnd",
sum = "h1:4D0wuPKjOTiK2garzuPGGvm4zZ/wLYDOH8TJSABC7KU=",
version = "v1.1.1",
)
go_repository(
name = "com_github_ugorji_go",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/ugorji/go",
sum = "h1:j4s+tAvLfL3bZyefP2SEWmhBzmuIlH/eqNuPdFPgngw=",
version = "v1.1.4",
)
go_repository(
name = "com_github_ultraware_funlen",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/ultraware/funlen",
sum = "h1:Av96YVBwwNSe4MLR7iI/BIa3VyI7/djnto/pK3Uxbdo=",
version = "v0.0.2",
)
go_repository(
name = "com_github_ultraware_whitespace",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/ultraware/whitespace",
sum = "h1:If7Va4cM03mpgrNH9k49/VOicWpGoG70XPBFFODYDsg=",
version = "v0.0.4",
)
go_repository(
name = "com_github_uudashr_gocognit",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/uudashr/gocognit",
sum = "h1:MoG2fZ0b/Eo7NXoIwCVFLG5JED3qgQz5/NEE+rOsjPs=",
version = "v1.0.1",
)
go_repository(
name = "com_github_valyala_bytebufferpool",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/valyala/bytebufferpool",
sum = "h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=",
version = "v1.0.0",
)
go_repository(
name = "com_github_valyala_fasthttp",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/valyala/fasthttp",
sum = "h1:dzZJf2IuMiclVjdw0kkT+f9u4YdrapbNyGAN47E/qnk=",
version = "v1.2.0",
)
go_repository(
name = "com_github_valyala_quicktemplate",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/valyala/quicktemplate",
sum = "h1:BaO1nHTkspYzmAjPXj0QiDJxai96tlcZyKcI9dyEGvM=",
version = "v1.2.0",
)
go_repository(
name = "com_github_valyala_tcplisten",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/valyala/tcplisten",
sum = "h1:0R4NLDRDZX6JcmhJgXi5E4b8Wg84ihbmUKp/GvSPEzc=",
version = "v0.0.0-20161114210144-ceec8f93295a",
)
go_repository(
name = "com_github_xiang90_probing",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/xiang90/probing",
sum = "h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8=",
version = "v0.0.0-20190116061207-43a291ad63a2",
)
go_repository(
name = "com_sourcegraph_sqs_pbtypes",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "sourcegraph.com/sqs/pbtypes",
sum = "h1:JPJh2pk3+X4lXAkZIk2RuE/7/FoK9maXw+TNPJhVS/c=",
version = "v0.0.0-20180604144634-d3ebe8f20ae4",
)
go_repository(
name = "in_gopkg_alecthomas_kingpin_v2",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "gopkg.in/alecthomas/kingpin.v2",
sum = "h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc=",
version = "v2.2.6",
)
go_repository(
name = "in_gopkg_errgo_v2",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "gopkg.in/errgo.v2",
sum = "h1:0vLT13EuvQ0hNvakwLuFZ/jYrLp5F3kcWHXdRggjCE8=",
version = "v2.1.0",
)
go_repository(
name = "in_gopkg_ini_v1",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "gopkg.in/ini.v1",
sum = "h1:AQvPpx3LzTDM0AjnIRlVFwFFGC+npRopjZxLJj6gdno=",
version = "v1.51.0",
)
go_repository(
name = "in_gopkg_resty_v1",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "gopkg.in/resty.v1",
sum = "h1:CuXP0Pjfw9rOuY6EP+UvtNvt5DSqHpIxILZKT/quCZI=",
version = "v1.12.0",
)
go_repository(
name = "io_etcd_go_bbolt",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "go.etcd.io/bbolt",
sum = "h1:Z/90sZLPOeCy2PwprqkFa25PdkusRzaj9P8zm/KNyvk=",
version = "v1.3.2",
)
go_repository(
name = "org_uber_go_atomic",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "go.uber.org/atomic",
sum = "h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU=",
version = "v1.4.0",
)
go_repository(
name = "org_uber_go_multierr",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "go.uber.org/multierr",
sum = "h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI=",
version = "v1.1.0",
)
go_repository(
name = "org_uber_go_zap",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "go.uber.org/zap",
sum = "h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM=",
version = "v1.10.0",
)
go_repository(
name = "com_github_sendgrid_rest",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/sendgrid/rest",
sum = "h1:HDib/5xzQREPq34lN3YMhQtMkdXxS/qLp5G3k9a5++4=",
version = "v2.4.1+incompatible",
)
go_repository(
name = "com_github_sendgrid_sendgrid_go",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/sendgrid/sendgrid-go",
sum = "h1:kosbgHyNVYVaqECDYvFVLVD9nvThweBd6xp7vaCT3GI=",
version = "v3.5.0+incompatible",
)
go_repository(
name = "io_k8s_utils",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "k8s.io/utils",
sum = "h1:I3f2hcBrepGRXI1z4sukzAb8w1R4eqbsHrAsx06LGYM=",
version = "v0.0.0-20200229041039-0a110f9eb7ab",
)
go_repository(
name = "com_github_google_go_github_v29",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/google/go-github/v29",
sum = "h1:IktKCTwU//aFHnpA+2SLIi7Oo9uhAzgsdZNbcAqhgdc=",
version = "v29.0.3",
)
go_repository(
name = "com_github_cpuguy83_go_md2man_v2",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/cpuguy83/go-md2man/v2",
sum = "h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM=",
version = "v2.0.0",
)
go_repository(
name = "com_github_russross_blackfriday_v2",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/russross/blackfriday/v2",
sum = "h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q=",
version = "v2.0.1",
)
go_repository(
name = "com_github_bazelbuild_rules_go",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/bazelbuild/rules_go",
sum = "h1:aBphnU/7PK1QqMDQAwHJH/YzOMt4SYc5kAxpmRHwW/s=",
version = "v0.23.1",
)
go_repository(
name = "com_github_yuin_goldmark",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/yuin/goldmark",
sum = "h1:j4d4Lw3zqZelDhBksEo3BnWg9xhXRQGJPPSL6OApZjI=",
version = "v1.1.30",
)
go_repository(
name = "com_github_hashicorp_errwrap",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/hashicorp/errwrap",
sum = "h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=",
version = "v1.0.0",
)
go_repository(
name = "com_github_hashicorp_go_multierror",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/hashicorp/go-multierror",
sum = "h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o=",
version = "v1.0.0",
)
go_repository(
name = "com_github_census_instrumentation_opencensus_proto",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/census-instrumentation/opencensus-proto",
sum = "h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk=",
version = "v0.2.1",
)
go_repository(
name = "com_github_envoyproxy_go_control_plane",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/envoyproxy/go-control-plane",
sum = "h1:4cmBvAEBNJaGARUEs3/suWRyfyBfhf7I60WBZq+bv2w=",
version = "v0.9.1-0.20191026205805-5f8ba28d4473",
)
go_repository(
name = "com_github_envoyproxy_protoc_gen_validate",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/envoyproxy/protoc-gen-validate",
sum = "h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A=",
version = "v0.1.0",
)
go_repository(
name = "com_github_go_git_gcfg",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/go-git/gcfg",
sum = "h1:Q5ViNfGF8zFgyJWPqYwA7qGFoMTEiBmdlkcfRmpIMa4=",
version = "v1.5.0",
)
go_repository(
name = "com_github_go_git_go_billy_v5",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/go-git/go-billy/v5",
sum = "h1:7NQHvd9FVid8VL4qVUMm8XifBK+2xCoZ2lSk0agRrHM=",
version = "v5.0.0",
)
go_repository(
name = "com_github_go_git_go_git_fixtures_v4",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/go-git/go-git-fixtures/v4",
sum = "h1:q+IFMfLx200Q3scvt2hN79JsEzy4AmBTp/pqnefH+Bc=",
version = "v4.0.1",
)
go_repository(
name = "com_github_go_git_go_git_v5",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/go-git/go-git/v5",
sum = "h1:k5RWPm4iJwYtfWoxIJy4wJX9ON7ihPeZZYC1fLYDnpg=",
version = "v5.0.0",
)
go_repository(
name = "com_github_niemeyer_pretty",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/niemeyer/pretty",
sum = "h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=",
version = "v0.0.0-20200227124842-a10e7caefd8e",
)
go_repository(
name = "com_github_saschagrunert_ccli",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/saschagrunert/ccli",
sum = "h1:yQhmn+jcWbGiiwGg4rWVqvmYcrFsbD0ghWjm9sENynU=",
version = "v1.0.2-0.20200423111659-b68f755cc0f5",
)
go_repository(
name = "com_github_saschagrunert_go_modiff",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/saschagrunert/go-modiff",
sum = "h1:IcLs5Ufd0ByqN2Bv2kQqKxrsUKRFs/3bLAoIBQg07Fk=",
version = "v1.2.0",
)
go_repository(
name = "com_github_urfave_cli_v2",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/urfave/cli/v2",
sum = "h1:JTTnM6wKzdA0Jqodd966MVj4vWbbquZykeX1sKbe2C4=",
version = "v2.2.0",
)
| 37.298695 | 74 | 0.636781 |
ccdd867078ff0e43f0dfc8006adbb1872e698e22 | 4,144 | py | Python | build/x86/python/m5/internal/param_Switch.py | billionshang/gem5 | 18cc4294f32315595f865d07d1f33434e92b06b2 | [
"BSD-3-Clause"
] | null | null | null | build/x86/python/m5/internal/param_Switch.py | billionshang/gem5 | 18cc4294f32315595f865d07d1f33434e92b06b2 | [
"BSD-3-Clause"
] | null | null | null | build/x86/python/m5/internal/param_Switch.py | billionshang/gem5 | 18cc4294f32315595f865d07d1f33434e92b06b2 | [
"BSD-3-Clause"
] | null | null | null | # This file was automatically generated by SWIG (http://www.swig.org).
# Version 3.0.8
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info
if version_info >= (2, 6, 0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_param_Switch', [dirname(__file__)])
except ImportError:
import _param_Switch
return _param_Switch
if fp is not None:
try:
_mod = imp.load_module('_param_Switch', fp, pathname, description)
finally:
fp.close()
return _mod
_param_Switch = swig_import_helper()
del swig_import_helper
else:
import _param_Switch
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self, class_type, name, value, static=1):
if (name == "thisown"):
return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name, None)
if method:
return method(self, value)
if (not static):
object.__setattr__(self, name, value)
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self, class_type, name, value):
return _swig_setattr_nondynamic(self, class_type, name, value, 0)
def _swig_getattr_nondynamic(self, class_type, name, static=1):
if (name == "thisown"):
return self.this.own()
method = class_type.__swig_getmethods__.get(name, None)
if method:
return method(self)
if (not static):
return object.__getattr__(self, name)
else:
raise AttributeError(name)
def _swig_getattr(self, class_type, name):
return _swig_getattr_nondynamic(self, class_type, name, 0)
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object:
pass
_newclass = 0
def _swig_setattr_nondynamic_method(set):
def set_attr(self, name, value):
if (name == "thisown"):
return self.this.own(value)
if hasattr(self, name) or (name == "this"):
set(self, name, value)
else:
raise AttributeError("You cannot add attributes to %s" % self)
return set_attr
import m5.internal.param_BasicRouter
import m5.internal.param_ClockedObject
import m5.internal.param_ClockDomain
import m5.internal.param_SimObject
import m5.internal.drain
import m5.internal.serialize
class Switch(m5.internal.param_BasicRouter.BasicRouter):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
Switch_swigregister = _param_Switch.Switch_swigregister
Switch_swigregister(Switch)
class SwitchParams(m5.internal.param_BasicRouter.BasicRouterParams):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def create(self):
return _param_Switch.SwitchParams_create(self)
virt_nets = _swig_property(_param_Switch.SwitchParams_virt_nets_get, _param_Switch.SwitchParams_virt_nets_set)
def __init__(self):
this = _param_Switch.new_SwitchParams()
try:
self.this.append(this)
except Exception:
self.this = this
__swig_destroy__ = _param_Switch.delete_SwitchParams
__del__ = lambda self: None
SwitchParams_swigregister = _param_Switch.SwitchParams_swigregister
SwitchParams_swigregister(SwitchParams)
| 30.028986 | 114 | 0.676641 |
f3524d7786f294f680962dddd5beb46c86fd8c5e | 15,143 | py | Python | experiments.py | joshsanz/learned_uncertainty | 2103126105dbe44cfe75fc22291ba669c1a162f3 | [
"MIT"
] | null | null | null | experiments.py | joshsanz/learned_uncertainty | 2103126105dbe44cfe75fc22291ba669c1a162f3 | [
"MIT"
] | null | null | null | experiments.py | joshsanz/learned_uncertainty | 2103126105dbe44cfe75fc22291ba669c1a162f3 | [
"MIT"
] | null | null | null | import matplotlib
matplotlib.use('tkagg')
from matplotlib import pyplot as plt
plt.rc('figure', figsize=[10, 6])
import time
from data_models import *
from prediction_models import *
from control_models import *
def error(predicted_return, true_return):
return (predicted_return - true_return)
def get_gaussian_data(num_samples, true_asset_value, asset_covariance, seed=1):
num_assets = asset_covariance.shape[0]
sampler = GaussianNoise(seed)
data = np.zeros(shape=(num_samples, num_assets))
for t in range(num_samples):
sampler_input = (true_asset_value, asset_covariance)
data[t] = sampler.sample(sampler_input)
return data
def get_wiener_data(num_samples, true_asset_value, asset_covariance, seed=1):
num_assets = asset_covariance.shape[0]
steps = get_gaussian_data(num_samples, np.zeros((num_assets,)), asset_covariance, seed)
return np.cumsum(steps, axis=0) + true_asset_value
def get_real_data():
sampler = RealData()
return sampler.labels(), sampler.dates(), sampler.sample()
def get_returns(data, investment_strategies, asset_predictions):
num_samples = investment_strategies.shape[0]
predicted_return = np.zeros(shape=(num_samples,))
true_return = np.zeros(shape=(num_samples,))
for t in range(num_samples):
if t <= 2:
continue
observed_asset_value = data[t]
predicted_asset_value = asset_predictions[t]
investment_strategy = investment_strategies[t]
true_return[t] = investment_strategy.dot(observed_asset_value)
predicted_return[t] = investment_strategy.dot(predicted_asset_value)
return predicted_return, true_return
def run_gaussian_norm(data, num_samples, num_assets, pred_params, control_params):
gamma = control_params['gamma']
regularization = control_params['regularization']
prediction_model = UnbiasGaussianEstimator()
window = pred_params['window']
cov_model = NormModel(num_assets=num_assets, gamma=gamma, regularization=regularization)
predicted_asset_values = np.zeros(shape=(num_samples, num_assets))
investment_strategies = np.zeros(shape=(num_samples, num_assets))
for t in range(num_samples):
if t <= 2:
continue
if window is None:
past_data = data[:t]
else:
past_data = data[max(0, t-window):t]
predicted_asset_value, predicted_asset_variance = prediction_model.predict(past_data)
predicted_asset_values[t] = predicted_asset_value
control_input = (predicted_asset_value, predicted_asset_variance)
cov_model.run(control_input)
investment_strategy = cov_model.variables()
investment_strategies[t] = investment_strategy
return predicted_asset_values, investment_strategies
def run_gaussian_covar(data, num_samples, num_assets, pred_params, control_params):
gamma = control_params['gamma']
prediction_model = UnbiasGaussianEstimator()
window = pred_params['window']
cov_model = CovarianceModel(num_assets=num_assets, gamma=gamma)
predicted_asset_values = np.zeros(shape=(num_samples, num_assets))
investment_strategies = np.zeros(shape=(num_samples, num_assets))
for t in range(num_samples):
if t <= 2:
continue
if window is None:
past_data = data[:t]
else:
past_data = data[max(0, t-window):t]
predicted_asset_value, predicted_asset_variance = prediction_model.predict(past_data)
predicted_asset_values[t] = predicted_asset_value
control_input = (predicted_asset_value, predicted_asset_variance)
cov_model.run(control_input)
investment_strategy = cov_model.variables()
investment_strategies[t] = investment_strategy
return predicted_asset_values, investment_strategies
def run_simple_gaussian_experiments(params, real_data=False, plot=False, seed=1):
if not real_data:
num_samples = 100
true_asset_value = params['asset_value']
asset_covariance = params['asset_covariance']
data = get_gaussian_data(num_samples, true_asset_value, asset_covariance, seed)
data = np.clip(data, 1e-3, None)
else:
data_labels, data_dates, data = get_real_data()
print("date range:", data_dates[0][0], "-", data_dates[0][-1])
num_samples = data.shape[0]
gamma = params['gamma']
window = params['window']
num_assets = data.shape[1]
if plot:
if real_data:
for i in range(num_assets):
plt.plot(data.T[i], label=data_labels[i])
else:
plt.plot(data, label='Asset Values')
plt.legend()
plt.title('Input Data')
plt.show()
# Add experiments to run here.
experiments = [
("gaussian_unbiased_covar", run_gaussian_covar, {'window': None}, {"gamma": gamma}),
("gaussian_unbiased_l1", run_gaussian_norm, {'window': None}, {"gamma": gamma, "regularization": 1}),
("gaussian_unbiased_l2", run_gaussian_norm, {'window': None}, {"gamma": gamma, "regularization": 2}),
("gaussian_windowed_covar", run_gaussian_covar, {'window': window}, {"gamma": gamma}),
("gaussian_windowed_l1", run_gaussian_norm, {'window': window}, {"gamma": gamma, "regularization": 1}),
("gaussian_windowed_l2", run_gaussian_norm, {'window': window}, {"gamma": gamma, "regularization": 2}),
]
bar_plot_mean = []
bar_plot_std = []
results = {}
results['true_values'] = data
for name, experiment_func, pred_params, control_params in experiments:
predicted_asset_values, investment_strategies = experiment_func(data,
num_samples,
num_assets,
pred_params,
control_params)
predicted_return, true_return = get_returns(data, investment_strategies, predicted_asset_values)
results[name] = {}
results[name]['predicted_return'] = predicted_return
results[name]['strategies'] = investment_strategies
results[name]['predicted_values'] = predicted_asset_values
results[name]['true_return'] = true_return
print(name, np.sum(true_return))
bar_plot_mean.append(np.mean(true_return))
bar_plot_std.append(np.std(true_return))
# all_error = error(predicted_return, true_return)
# window = 10
# for i in range(0, num_samples-window, window):
# print(name, np.mean(all_error[i:i + window]))
if plot:
# We really just care about how well the investment strategies actually do,
# which is given by true_return.
plt.plot(np.arange(3, num_samples), true_return[3:], label=name + ' true return', alpha=0.5)
# In final plots, predicted return may not be relevant.
# plt.plot(np.arange(3, num_samples), predicted_return[3:], label=name + ' predicted return')
if plot:
plt.legend()
plt.show()
plt.bar(np.arange(len(experiments)), height=bar_plot_mean, yerr=bar_plot_std)
plt.show()
return results
def run_ltv_gaussian_experiments(params, plot=False, seed=1):
num_samples = 100
true_asset_v0 = params['asset_value']
true_asset_delta = params['asset_delta']
asset_covariance = params['asset_covariance']
gamma = params['gamma']
window = params['window']
true_asset_value = true_asset_v0 + (true_asset_delta.T @ np.arange(0,num_samples).reshape(-1,1).T).T
data = get_gaussian_data(num_samples, np.zeros((3,)), asset_covariance, seed) + true_asset_value
data = np.clip(data, 1e-3, None)
num_assets = data.shape[1]
if plot:
plt.plot(data, label='Asset Values')
plt.legend()
plt.title('Input Data')
plt.show()
# Add experiments to run here.
experiments = [
("gaussian_unbiased_covar", run_gaussian_covar, {'window': None}, {"gamma": gamma}),
("gaussian_unbiased_l1", run_gaussian_norm, {'window': None}, {"gamma": gamma, "regularization": 1}),
("gaussian_unbiased_l2", run_gaussian_norm, {'window': None}, {"gamma": gamma, "regularization": 2}),
("gaussian_windowed_covar", run_gaussian_covar, {'window': window}, {"gamma": gamma}),
("gaussian_windowed_l1", run_gaussian_norm, {'window': window}, {"gamma": gamma, "regularization": 1}),
("gaussian_windowed_l2", run_gaussian_norm, {'window': window}, {"gamma": gamma, "regularization": 2}),
]
bar_plot_mean = []
bar_plot_std = []
results = {}
results['true_values'] = data
for name, experiment_func, pred_params, control_params in experiments:
predicted_asset_values, investment_strategies = experiment_func(data,
num_samples,
num_assets,
pred_params,
control_params)
predicted_return, true_return = get_returns(data, investment_strategies, predicted_asset_values)
results[name] = {}
results[name]['predicted_return'] = predicted_return
results[name]['strategies'] = investment_strategies
results[name]['predicted_values'] = predicted_asset_values
results[name]['true_return'] = true_return
print(name, np.sum(true_return))
bar_plot_mean.append(np.mean(true_return))
bar_plot_std.append(np.std(true_return))
# all_error = error(predicted_return, true_return)
# window = 10
# for i in range(0, num_samples-window, window):
# print(name, np.mean(all_error[i:i + window]))
if plot:
# We really just care about how well the investment strategies actually do,
# which is given by true_return.
plt.plot(np.arange(3, num_samples), true_return[3:], label=name + ' true return', alpha=0.33)
# In final plots, predicted return may not be relevant.
plt.plot(np.arange(3, num_samples), predicted_return[3:], label=name + ' predicted return')
if plot:
plt.legend()
plt.show()
plt.bar(np.arange(len(experiments)), height=bar_plot_mean, yerr=bar_plot_std)
plt.show()
return results
def run_wiener_experiments(params, plot=False, seed=1):
num_samples = 100
true_asset_v0 = params['asset_value']
asset_covariance = params['asset_covariance']
gamma = params['gamma']
window = params['window']
data = get_wiener_data(num_samples, true_asset_v0, asset_covariance, seed)
data = np.clip(data, 1e-3, None)
num_assets = data.shape[1]
if plot:
plt.plot(data, label='Asset Values')
plt.legend()
plt.title('Input Data')
plt.show()
# Add experiments to run here.
experiments = [
("gaussian_unbiased_covar", run_gaussian_covar, {'window': None}, {"gamma": gamma}),
("gaussian_unbiased_l1", run_gaussian_norm, {'window': None}, {"gamma": gamma, "regularization": 1}),
("gaussian_unbiased_l2", run_gaussian_norm, {'window': None}, {"gamma": gamma, "regularization": 2}),
("gaussian_windowed_covar", run_gaussian_covar, {'window': window}, {"gamma": gamma}),
("gaussian_windowed_l1", run_gaussian_norm, {'window': window}, {"gamma": gamma, "regularization": 1}),
("gaussian_windowed_l2", run_gaussian_norm, {'window': window}, {"gamma": gamma, "regularization": 2}),
]
bar_plot_mean = []
bar_plot_std = []
results = {}
results['true_values'] = data
for name, experiment_func, pred_params, control_params in experiments:
predicted_asset_values, investment_strategies = experiment_func(data,
num_samples,
num_assets,
pred_params,
control_params)
predicted_return, true_return = get_returns(data, investment_strategies, predicted_asset_values)
results[name] = {}
results[name]['predicted_return'] = predicted_return
results[name]['strategies'] = investment_strategies
results[name]['predicted_values'] = predicted_asset_values
results[name]['true_return'] = true_return
print(name, np.sum(true_return))
bar_plot_mean.append(np.mean(true_return))
bar_plot_std.append(np.std(true_return))
# all_error = error(predicted_return, true_return)
# window = 10
# for i in range(0, num_samples-window, window):
# print(name, np.mean(all_error[i:i + window]))
if plot:
# We really just care about how well the investment strategies actually do,
# which is given by true_return.
plt.plot(np.arange(3, num_samples), true_return[3:], label=name + ' true return', alpha=0.33)
# In final plots, predicted return may not be relevant.
plt.plot(np.arange(3, num_samples), predicted_return[3:], label=name + ' predicted return')
if plot:
plt.legend()
plt.show()
plt.bar(np.arange(len(experiments)), height=bar_plot_mean, yerr=bar_plot_std)
plt.show()
return results
if __name__ == "__main__":
run_simple_gaussian_experiments(params={'gamma': 1,
'window': 10},
real_data=True,
plot=True, seed=int(time.time()))
run_simple_gaussian_experiments(params={'asset_value': np.array([0.8, 1.0, 1.1]),
'asset_covariance': np.diag([0.02, 0.01, 0.03]),
'gamma': 1,
'window': 10},
plot=True, seed=int(time.time()))
run_ltv_gaussian_experiments(params={'asset_value': np.array([0.9, 1.2, 1.0]),
'asset_covariance': np.diag([1.0, 1.0, 0.2]) * 0.02,
'asset_delta': np.array([[0.002, -0.003, 0.001]]),
'gamma': 1,
'window': 10},
plot=True, seed=int(time.time()))
run_wiener_experiments(params={'asset_value': np.array([0.9, 1.2, 1.0]),
'asset_covariance': np.diag([1.0, 1.0, 0.2]) * 0.02,
'gamma': 1,
'window': 10},
plot=True, seed=int(time.time()))
| 44.801775 | 111 | 0.606947 |
253733e240cb538f9540b0c99590e485d399f71e | 3,902 | py | Python | voter_validation/search.py | lakshbhasin/VoterValidation | 2249f387046b5039b650e3b5c63b9d3d3b7aea8f | [
"Apache-2.0"
] | 6 | 2018-11-02T14:10:54.000Z | 2020-07-14T02:38:03.000Z | voter_validation/search.py | lakshbhasin/VoterValidation | 2249f387046b5039b650e3b5c63b9d3d3b7aea8f | [
"Apache-2.0"
] | 2 | 2021-06-10T20:56:46.000Z | 2021-06-10T20:58:54.000Z | voter_validation/search.py | lakshbhasin/VoterValidation | 2249f387046b5039b650e3b5c63b9d3d3b7aea8f | [
"Apache-2.0"
] | null | null | null | """
Search-related functions and variables
"""
import re
from django.contrib.postgres.search import TrigramSimilarity
from django.db.models import F, Case, When, FloatField
from .models import Voter, RegStatus
from .serializers import VoterSerializer
ALPHANUMERIC_REGEX = re.compile(r'\W+', re.UNICODE)
# Fields used in computing trigram similarity for Voter searches (fuzzy search)
FULL_NAME_TRIGRAM_SIM_FIELDS = ['full_name'] # is a combination of all names
RES_ADDR_TRIGRAM_SIM_FIELDS = ['res_addr']
FULL_NAME_WEIGHT = 1.5
RES_ADDR_WEIGHT = 1.25
EXACT_ADDR_WEIGHT = 0.35 # extra weight if voter address contains input address
def normalize_query(query):
"""
Make the query lower-case and remove non-alphanumeric characters.
"""
if query is None:
return query
query = query.lower()
query = ' '.join(ALPHANUMERIC_REGEX.split(query))
return query
def construct_similarity_metric(fields, query):
"""
Constructs a TrigramSimilarity metric with the given fields and query.
This is used for "candidate generation" for search.
:param fields: list of strings corresponding to model fields
:param query: string search query
:return: TrigramSimilarity
"""
similarity = None
for sim_field in fields:
if similarity is None:
similarity = TrigramSimilarity(sim_field, query)
else:
similarity += TrigramSimilarity(sim_field, query)
return similarity
def voter_search(name, address, res_zip, campaign_id=None,
debug=False, normalize=True, limit=60):
"""
Searches for the given Voter and returns a list of matching Active results.
:param name: string full name of Voter
:param address: string full residential address of Voter (or part thereof)
:param res_zip: string ZIP of Voter
:param campaign_id: if set, this is used to determine if a Voter has already
been validated.
:param debug: if True, add debug information to JSON about search.
:param normalize: if True, normalize the query parameters
:param limit: if > 0, return the top "limit" results.
:return: list of JSON-serialized Voters ranked in order
"""
if normalize:
name = normalize_query(name)
address = normalize_query(address)
res_zip = normalize_query(res_zip)
# Filter by ZIP exactly, if present
corpus = Voter.objects
if res_zip is not None and res_zip != "":
corpus = corpus.filter(res_addr_zip=res_zip)
# Ignore non-Active registration status
corpus = corpus.filter(reg_status=RegStatus.ACTIVE.value)
# Use full name and address for trigram similarity computation.
addr_similarity = construct_similarity_metric(
RES_ADDR_TRIGRAM_SIM_FIELDS, address)
name_similarity = construct_similarity_metric(
FULL_NAME_TRIGRAM_SIM_FIELDS, name)
# Use weighted sum of trigram similarity for match.
voters = corpus.annotate(
name_similarity=name_similarity,
addr_similarity=addr_similarity,
addr_exact_match=Case(
When(res_addr__icontains=address, then=1.0),
default=0.0,
output_field=FloatField()
))
if name is not None and name != "":
voters = voters.filter(name_similarity__gte=0.005)
if address is not None and address != "":
voters = voters.filter(addr_similarity__gte=0.005)
voters = voters.annotate(
search_score=FULL_NAME_WEIGHT * F('name_similarity')
+ RES_ADDR_WEIGHT * F('addr_similarity')
+ EXACT_ADDR_WEIGHT * F('addr_exact_match'))\
.order_by('-search_score')
if limit > 0:
voters = voters[:limit]
voters = voters.prefetch_related('validationrecord_set')
results = [VoterSerializer(v).serialize(
debug=debug, campaign_id=campaign_id) for v in voters]
return results
| 34.839286 | 80 | 0.696053 |
093c5efebad0895cdd14a30096469ec2c390df82 | 22,063 | py | Python | nmt/model.py | deepmipt/nmt-chit-chat | 397c6c91747fc9a2c404a4852bb7e289ec4de542 | [
"Apache-2.0"
] | null | null | null | nmt/model.py | deepmipt/nmt-chit-chat | 397c6c91747fc9a2c404a4852bb7e289ec4de542 | [
"Apache-2.0"
] | null | null | null | nmt/model.py | deepmipt/nmt-chit-chat | 397c6c91747fc9a2c404a4852bb7e289ec4de542 | [
"Apache-2.0"
] | 5 | 2017-12-29T21:38:53.000Z | 2021-03-10T06:31:29.000Z | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Basic sequence-to-sequence model with dynamic RNN support."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import tensorflow as tf
from tensorflow.python.layers import core as layers_core
from . import model_helper
from .utils import iterator_utils
from .utils import misc_utils as utils
utils.check_tensorflow_version()
__all__ = ["BaseModel", "Model"]
class BaseModel(object):
"""Sequence-to-sequence base class.
"""
def __init__(self,
hparams,
mode,
iterator,
source_vocab_table,
target_vocab_table,
reverse_target_vocab_table=None,
scope=None,
single_cell_fn=None):
"""Create the model.
Args:
hparams: Hyperparameter configurations.
mode: TRAIN | EVAL | INFER
iterator: Dataset Iterator that feeds data.
source_vocab_table: Lookup table mapping source words to ids.
target_vocab_table: Lookup table mapping target words to ids.
reverse_target_vocab_table: Lookup table mapping ids to target words. Only
required in INFER mode. Defaults to None.
scope: scope of the model.
single_cell_fn: allow for adding customized cell. When not specified,
we default to model_helper._single_cell
"""
assert isinstance(iterator, iterator_utils.BatchedInput)
self.iterator = iterator
self.mode = mode
self.src_vocab_table = source_vocab_table
self.tgt_vocab_table = target_vocab_table
self.src_vocab_size = hparams.src_vocab_size
self.tgt_vocab_size = hparams.tgt_vocab_size
self.num_layers = hparams.num_layers
self.num_gpus = hparams.num_gpus
self.time_major = hparams.time_major
# Initializer
initializer = model_helper.get_initializer(
hparams.init_op, hparams.random_seed, hparams.init_weight)
tf.get_variable_scope().set_initializer(initializer)
# Embeddings
# TODO(ebrevdo): Only do this if the mode is TRAIN?
self.init_embeddings(hparams, scope)
self.batch_size = tf.size(self.iterator.source_sequence_length)
# Projection
with tf.variable_scope(scope or "build_network"):
with tf.variable_scope("decoder/output_projection"):
self.output_layer = layers_core.Dense(
hparams.tgt_vocab_size, use_bias=False, name="output_projection")
# To make it flexible for external code to add other cell types
# If not specified, we will later use model_helper._single_cell
self.single_cell_fn = single_cell_fn
## Train graph
res = self.build_graph(hparams, scope=scope)
if self.mode == tf.contrib.learn.ModeKeys.TRAIN:
self.train_loss = res[1]
self.word_count = tf.reduce_sum(
self.iterator.source_sequence_length) + tf.reduce_sum(
self.iterator.target_sequence_length)
elif self.mode == tf.contrib.learn.ModeKeys.EVAL:
self.eval_loss = res[1]
elif self.mode == tf.contrib.learn.ModeKeys.INFER:
self.infer_logits, _, self.final_context_state, self.sample_id = res
self.sample_words = reverse_target_vocab_table.lookup(
tf.to_int64(self.sample_id))
if self.mode != tf.contrib.learn.ModeKeys.INFER:
## Count the number of predicted words for compute ppl.
self.predict_count = tf.reduce_sum(
self.iterator.target_sequence_length)
## Learning rate
utils.print_out(" start_decay_step=%d, learning_rate=%g, decay_steps %d,"
"decay_factor %g" % (hparams.start_decay_step, hparams.learning_rate,
hparams.decay_steps, hparams.decay_factor))
self.global_step = tf.Variable(0, trainable=False)
params = tf.trainable_variables()
# Gradients and SGD update operation for training the model.
# Arrage for the embedding vars to appear at the beginning.
if self.mode == tf.contrib.learn.ModeKeys.TRAIN:
if hparams.optimizer == "sgd":
self.learning_rate = tf.cond(
self.global_step < hparams.start_decay_step,
lambda: tf.constant(hparams.learning_rate),
lambda: tf.train.exponential_decay(
hparams.learning_rate,
(self.global_step - hparams.start_decay_step),
hparams.decay_steps,
hparams.decay_factor,
staircase=True),
name="learning_rate")
opt = tf.train.GradientDescentOptimizer(self.learning_rate)
tf.summary.scalar("lr", self.learning_rate)
elif hparams.optimizer == "adam":
assert float(
hparams.learning_rate
) <= 0.001, "! High Adam learning rate %g" % hparams.learning_rate
self.learning_rate = tf.constant(hparams.learning_rate)
opt = tf.train.AdamOptimizer(self.learning_rate)
gradients = tf.gradients(
self.train_loss,
params,
colocate_gradients_with_ops=hparams.colocate_gradients_with_ops)
clipped_gradients, gradient_norm_summary = model_helper.gradient_clip(
gradients, max_gradient_norm=hparams.max_gradient_norm)
self.update = opt.apply_gradients(
zip(clipped_gradients, params), global_step=self.global_step)
# Summary
self.train_summary = tf.summary.merge([
tf.summary.scalar("lr", self.learning_rate),
tf.summary.scalar("train_loss", self.train_loss),
] + gradient_norm_summary)
if self.mode == tf.contrib.learn.ModeKeys.INFER:
self.infer_summary = self._get_infer_summary(hparams)
# Saver
self.saver = tf.train.Saver(tf.global_variables())
# Print trainable variables
utils.print_out("# Trainable variables")
for param in params:
utils.print_out(" %s, %s, %s" % (param.name, str(param.get_shape()),
param.op.device))
def init_embeddings(self, hparams, scope):
"""Init embeddings."""
self.embedding_encoder, self.embedding_decoder = (
model_helper.create_emb_for_encoder_and_decoder(
share_vocab=hparams.share_vocab,
src_vocab_size=self.src_vocab_size,
tgt_vocab_size=self.tgt_vocab_size,
src_embed_size=hparams.num_units,
tgt_embed_size=hparams.num_units,
num_partitions=hparams.num_embeddings_partitions,
scope=scope,))
def train(self, sess):
assert self.mode == tf.contrib.learn.ModeKeys.TRAIN
return sess.run([self.update,
self.train_loss,
self.predict_count,
self.train_summary,
self.global_step,
self.word_count,
self.batch_size])
def eval(self, sess):
assert self.mode == tf.contrib.learn.ModeKeys.EVAL
return sess.run([self.eval_loss,
self.predict_count,
self.batch_size])
def build_graph(self, hparams, scope=None):
"""Subclass must implement this method.
Creates a sequence-to-sequence model with dynamic RNN decoder API.
Args:
hparams: Hyperparameter configurations.
scope: VariableScope for the created subgraph; default "dynamic_seq2seq".
Returns:
A tuple of the form (logits, loss, final_context_state),
where:
logits: float32 Tensor [batch_size x num_decoder_symbols].
loss: the total loss / batch_size.
final_context_state: The final state of decoder RNN.
Raises:
ValueError: if encoder_type differs from mono and bi, or
attention_option is not (luong | scaled_luong |
bahdanau | normed_bahdanau).
"""
utils.print_out("# creating %s graph ..." % self.mode)
dtype = tf.float32
num_layers = hparams.num_layers
num_gpus = hparams.num_gpus
with tf.variable_scope(scope or "dynamic_seq2seq", dtype=dtype):
# Encoder
encoder_outputs, encoder_state = self._build_encoder(hparams)
## Decoder
logits, sample_id, final_context_state = self._build_decoder(
encoder_outputs, encoder_state, hparams)
## Loss
if self.mode != tf.contrib.learn.ModeKeys.INFER:
with tf.device(model_helper.get_device_str(num_layers - 1, num_gpus)):
loss = self._compute_loss(logits)
else:
loss = None
return logits, loss, final_context_state, sample_id
@abc.abstractmethod
def _build_encoder(self, hparams):
"""Subclass must implement this.
Build and run an RNN encoder.
Args:
hparams: Hyperparameters configurations.
Returns:
A tuple of encoder_outputs and encoder_state.
"""
pass
def _build_encoder_cell(self, hparams, num_layers, num_residual_layers,
base_gpu=0):
"""Build a multi-layer RNN cell that can be used by encoder."""
return model_helper.create_rnn_cell(
unit_type=hparams.unit_type,
num_units=hparams.num_units,
num_layers=num_layers,
num_residual_layers=num_residual_layers,
forget_bias=hparams.forget_bias,
dropout=hparams.dropout,
num_gpus=hparams.num_gpus,
mode=self.mode,
base_gpu=base_gpu,
single_cell_fn=self.single_cell_fn)
def _build_decoder(self, encoder_outputs, encoder_state, hparams):
"""Build and run a RNN decoder with a final projection layer.
Args:
encoder_outputs: The outputs of encoder for every time step.
encoder_state: The final state of the encoder.
hparams: The Hyperparameters configurations.
Returns:
A tuple of final logits and final decoder state:
logits: size [time, batch_size, vocab_size] when time_major=True.
"""
tgt_sos_id = tf.cast(self.tgt_vocab_table.lookup(tf.constant(hparams.sos)),
tf.int32)
tgt_eos_id = tf.cast(self.tgt_vocab_table.lookup(tf.constant(hparams.eos)),
tf.int32)
num_layers = hparams.num_layers
num_gpus = hparams.num_gpus
iterator = self.iterator
# maximum_iteration: The maximum decoding steps.
if hparams.tgt_max_len_infer:
maximum_iterations = hparams.tgt_max_len_infer
utils.print_out(" decoding maximum_iterations %d" % maximum_iterations)
else:
# TODO(thangluong): add decoding_length_factor flag
decoding_length_factor = 2.0
max_encoder_length = tf.reduce_max(iterator.source_sequence_length)
maximum_iterations = tf.to_int32(tf.round(
tf.to_float(max_encoder_length) * decoding_length_factor))
## Decoder.
with tf.variable_scope("decoder") as decoder_scope:
cell, decoder_initial_state = self._build_decoder_cell(
hparams, encoder_outputs, encoder_state,
iterator.source_sequence_length)
## Train or eval
if self.mode != tf.contrib.learn.ModeKeys.INFER:
# decoder_emp_inp: [max_time, batch_size, num_units]
target_input = iterator.target_input
if self.time_major:
target_input = tf.transpose(target_input)
decoder_emb_inp = tf.nn.embedding_lookup(
self.embedding_decoder, target_input)
# Helper
helper = tf.contrib.seq2seq.TrainingHelper(
decoder_emb_inp, iterator.target_sequence_length,
time_major=self.time_major)
# Decoder
my_decoder = tf.contrib.seq2seq.BasicDecoder(
cell,
helper,
decoder_initial_state,)
# Dynamic decoding
outputs, final_context_state, _ = tf.contrib.seq2seq.dynamic_decode(
my_decoder,
output_time_major=self.time_major,
swap_memory=True,
scope=decoder_scope)
sample_id = outputs.sample_id
# Note: there's a subtle difference here between train and inference.
# We could have set output_layer when create my_decoder
# and shared more code between train and inference.
# We chose to apply the output_layer to all timesteps for speed:
# 10% improvements for small models & 20% for larger ones.
# If memory is a concern, we should apply output_layer per timestep.
device_id = num_layers if num_layers < num_gpus else (num_layers - 1)
with tf.device(model_helper.get_device_str(device_id, num_gpus)):
logits = self.output_layer(outputs.rnn_output)
## Inference
else:
beam_width = hparams.beam_width
length_penalty_weight = hparams.length_penalty_weight
start_tokens = tf.fill([self.batch_size], tgt_sos_id)
end_token = tgt_eos_id
if beam_width > 0:
my_decoder = tf.contrib.seq2seq.BeamSearchDecoder(
cell=cell,
embedding=self.embedding_decoder,
start_tokens=start_tokens,
end_token=end_token,
initial_state=decoder_initial_state,
beam_width=beam_width,
output_layer=self.output_layer,
length_penalty_weight=length_penalty_weight)
else:
# Helper
helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(
self.embedding_decoder, start_tokens, end_token)
# Decoder
my_decoder = tf.contrib.seq2seq.BasicDecoder(
cell,
helper,
decoder_initial_state,
output_layer=self.output_layer # applied per timestep
)
# Dynamic decoding
outputs, final_context_state, _ = tf.contrib.seq2seq.dynamic_decode(
my_decoder,
maximum_iterations=maximum_iterations,
output_time_major=self.time_major,
swap_memory=True,
scope=decoder_scope)
if beam_width > 0:
logits = tf.no_op()
sample_id = outputs.predicted_ids
else:
logits = outputs.rnn_output
sample_id = outputs.sample_id
return logits, sample_id, final_context_state
def get_max_time(self, tensor):
time_axis = 0 if self.time_major else 1
return tensor.shape[time_axis].value or tf.shape(tensor)[time_axis]
@abc.abstractmethod
def _build_decoder_cell(self, hparams, encoder_outputs, encoder_state,
source_sequence_length):
"""Subclass must implement this.
Args:
hparams: Hyperparameters configurations.
encoder_outputs: The outputs of encoder for every time step.
encoder_state: The final state of the encoder.
source_sequence_length: sequence length of encoder_outputs.
Returns:
A tuple of a multi-layer RNN cell used by decoder
and the intial state of the decoder RNN.
"""
pass
def _compute_loss(self, logits):
"""Compute optimization loss."""
target_output = self.iterator.target_output
if self.time_major:
target_output = tf.transpose(target_output)
max_time = self.get_max_time(target_output)
crossent = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=target_output, logits=logits)
target_weights = tf.sequence_mask(
self.iterator.target_sequence_length, max_time, dtype=logits.dtype)
if self.time_major:
target_weights = tf.transpose(target_weights)
loss = tf.reduce_sum(
crossent * target_weights) / tf.to_float(self.batch_size)
return loss
def _get_infer_summary(self, hparams):
return tf.no_op()
def infer(self, sess):
assert self.mode == tf.contrib.learn.ModeKeys.INFER
return sess.run([
self.infer_logits, self.infer_summary, self.sample_id, self.sample_words
])
def decode(self, sess):
"""Decode a batch.
Args:
sess: tensorflow session to use.
Returns:
A tuple consiting of outputs, infer_summary.
outputs: of size [batch_size, time]
"""
_, infer_summary, _, sample_words = self.infer(sess)
# make sure outputs is of shape [batch_size, time]
if self.time_major:
sample_words = sample_words.transpose()
return sample_words, infer_summary
class Model(BaseModel):
"""Sequence-to-sequence dynamic model.
This class implements a multi-layer recurrent neural network as encoder,
and a multi-layer recurrent neural network decoder.
"""
def _build_encoder(self, hparams):
"""Build an encoder."""
num_layers = hparams.num_layers
num_residual_layers = hparams.num_residual_layers
iterator = self.iterator
source = iterator.source
if self.time_major:
source = tf.transpose(source)
with tf.variable_scope("encoder") as scope:
dtype = scope.dtype
# Look up embedding, emp_inp: [max_time, batch_size, num_units]
encoder_emb_inp = tf.nn.embedding_lookup(
self.embedding_encoder, source)
# Encoder_outpus: [max_time, batch_size, num_units]
if hparams.encoder_type == "uni":
utils.print_out(" num_layers = %d, num_residual_layers=%d" %
(num_layers, num_residual_layers))
cell = self._build_encoder_cell(
hparams, num_layers, num_residual_layers)
encoder_outputs, encoder_state = tf.nn.dynamic_rnn(
cell,
encoder_emb_inp,
dtype=dtype,
sequence_length=iterator.source_sequence_length,
time_major=self.time_major)
elif hparams.encoder_type == "bi":
num_bi_layers = int(num_layers / 2)
num_bi_residual_layers = int(num_residual_layers / 2)
utils.print_out(" num_bi_layers = %d, num_bi_residual_layers=%d" %
(num_bi_layers, num_bi_residual_layers))
encoder_outputs, bi_encoder_state = (
self._build_bidirectional_rnn(
inputs=encoder_emb_inp,
sequence_length=iterator.source_sequence_length,
dtype=dtype,
hparams=hparams,
num_bi_layers=num_bi_layers,
num_bi_residual_layers=num_bi_residual_layers))
if num_bi_layers == 1:
encoder_state = bi_encoder_state
else:
# alternatively concat forward and backward states
encoder_state = []
for layer_id in range(num_bi_layers):
encoder_state.append(bi_encoder_state[0][layer_id]) # forward
encoder_state.append(bi_encoder_state[1][layer_id]) # backward
encoder_state = tuple(encoder_state)
else:
raise ValueError("Unknown encoder_type %s" % hparams.encoder_type)
return encoder_outputs, encoder_state
def _build_bidirectional_rnn(self, inputs, sequence_length,
dtype, hparams,
num_bi_layers,
num_bi_residual_layers,
base_gpu=0):
"""Create and call biddirectional RNN cells.
Args:
num_residual_layers: Number of residual layers from top to bottom. For
example, if `num_bi_layers=4` and `num_residual_layers=2`, the last 2 RNN
layers in each RNN cell will be wrapped with `ResidualWrapper`.
base_gpu: The gpu device id to use for the first forward RNN layer. The
i-th forward RNN layer will use `(base_gpu + i) % num_gpus` as its
device id. The `base_gpu` for backward RNN cell is `(base_gpu +
num_bi_layers)`.
Returns:
The concatenated bidirectional output and the bidirectional RNN cell"s
state.
"""
# Construct forward and backward cells
fw_cell = self._build_encoder_cell(hparams,
num_bi_layers,
num_bi_residual_layers,
base_gpu=base_gpu)
bw_cell = self._build_encoder_cell(hparams,
num_bi_layers,
num_bi_residual_layers,
base_gpu=(base_gpu + num_bi_layers))
bi_outputs, bi_state = tf.nn.bidirectional_dynamic_rnn(
fw_cell,
bw_cell,
inputs,
dtype=dtype,
sequence_length=sequence_length,
time_major=self.time_major)
return tf.concat(bi_outputs, -1), bi_state
def _build_decoder_cell(self, hparams, encoder_outputs, encoder_state,
source_sequence_length):
"""Build an RNN cell that can be used by decoder."""
# We only make use of encoder_outputs in attention-based models
if hparams.attention:
raise ValueError("BasicModel doesn't support attention.")
num_layers = hparams.num_layers
num_residual_layers = hparams.num_residual_layers
cell = model_helper.create_rnn_cell(
unit_type=hparams.unit_type,
num_units=hparams.num_units,
num_layers=num_layers,
num_residual_layers=num_residual_layers,
forget_bias=hparams.forget_bias,
dropout=hparams.dropout,
num_gpus=hparams.num_gpus,
mode=self.mode,
single_cell_fn=self.single_cell_fn)
# For beam search, we need to replicate encoder infos beam_width times
if self.mode == tf.contrib.learn.ModeKeys.INFER and hparams.beam_width > 0:
decoder_initial_state = tf.contrib.seq2seq.tile_batch(
encoder_state, multiplier=hparams.beam_width)
else:
decoder_initial_state = encoder_state
return cell, decoder_initial_state
| 36.833055 | 81 | 0.660472 |
b82fef787478f897970bc3aac73fd67cfc2c18f3 | 2,859 | py | Python | suorganizer/suorganizer/settings/dev.py | mohammadasim/suorganiser | 26f11c944c34cd11b5961ec1b5eeb5cb3a7acdf8 | [
"Apache-2.0"
] | null | null | null | suorganizer/suorganizer/settings/dev.py | mohammadasim/suorganiser | 26f11c944c34cd11b5961ec1b5eeb5cb3a7acdf8 | [
"Apache-2.0"
] | 3 | 2021-06-04T22:51:25.000Z | 2021-09-22T18:51:53.000Z | suorganizer/suorganizer/settings/dev.py | mohammadasim/suorganiser | 26f11c944c34cd11b5961ec1b5eeb5cb3a7acdf8 | [
"Apache-2.0"
] | null | null | null | from .base import *
from ..log_filters import ManagementFilter
import socket
DEBUG = True
ALLOWED_HOSTS = ['app', '127.0.0.1']
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': get_env_variable('PGSQL_DB_NAME'),
'USER': get_env_variable('PGSQL_DB_USER'),
'PASSWORD': get_env_variable('PGSQL_DB_PASW'),
'HOST': get_env_variable('PGSQL_DB_HOST'),
'PORT': get_env_variable('PGSQL_DB_PORT'),
# 'OPTIONS': {'sslmode': 'verify-full'},
}
}
# Dev email settings, enables email output to the console
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
SERVER_EMAIL = 'contact@django-unleashed.com'
DEFAULT_FROM_EMAIL = 'no-reply@django-unleashed.com'
EMAIL_SUBJECT_PREFIX = '[Startup Organizer]'
MANAGERS = (
('Us', 'ourselves@django-unleashed.com'),
)
SITE_ID = 1
# Auth app settings
# Redirect to blogs_post_list view
LOGIN_REDIRECT_URL = 'blogs_posts_list'
LOGIN_URL = 'dj-auth:login'
# Logger settings
verbose = (
"[%(asctime)s] %(levelname)s"
"[%(name)s:%(lineno)s] %(message)s"
)
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'remove_migration_sql': {
'()': ManagementFilter
},
},
'handlers': {
'console': {
'filters': ['remove_migration_sql'],
'class': 'logging.StreamHandler',
},
},
'formatters': {
'verbose': {
'format': verbose,
'datefmt': "%Y-%b-%d %H:%M:%S"
},
},
'loggers': {
'django': {
'handlers': ['console'],
'level': 'INFO',
'formatter': 'verbose'
}
}
}
# django-debug-toolbar
INSTALLED_APPS += ('debug_toolbar',)
MIDDLEWARE += ()
socket_hostname = socket.gethostname()
container_ip = socket.gethostbyname(socket_hostname)
INTERNAL_IPS = [container_ip]
def show_toolbar(request):
return True
DEBUG_TOOLBAR_CONFIG = {
'SHOW_TOOLBAR_CALLBACK': show_toolbar,
}
# Cache settings
"""
We use local memory cache, which simply keeps webpages
in memory. In deployment this setting will be changed.
It is possible to define multiple different caches, each of
which might fulfill a different purpose. Here we simply
define a single cache and called it default.
The BACKEND key tells the cache what kind of cache it is, while
the location gives the cache a unique identifier, used separately
from the name default. We also set how long we want the cache to
remember webpages.
By defining the CACHE_MIDDLEWARE_ALIAS we tell the middleware
which cache to use.
"""
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
'LOCATION': 'unique-snowflake',
'TIMEOUT': 600, # seconds == 10 minutes
}
}
CACHE_MIDDLEWARE_ALIAS = 'default'
| 26.472222 | 65 | 0.651626 |
1dba67ea9ea31b98a41e31e11d19b057efbc8007 | 3,614 | py | Python | python/dset/hpatches_dataset.py | albutko/vlb | 437245c0991948eeb36a277937a7e67d389041e4 | [
"BSD-2-Clause"
] | 1 | 2019-12-13T18:08:50.000Z | 2019-12-13T18:08:50.000Z | python/dset/hpatches_dataset.py | albutko/vlb | 437245c0991948eeb36a277937a7e67d389041e4 | [
"BSD-2-Clause"
] | null | null | null | python/dset/hpatches_dataset.py | albutko/vlb | 437245c0991948eeb36a277937a7e67d389041e4 | [
"BSD-2-Clause"
] | null | null | null | from dset.dataset import SequenceDataset
import urllib
import tarfile
import os
import sys
import scipy
if sys.version_info[0] >= 3:
from urllib.request import urlretrieve
else:
from urllib import urlretrieve
class HPatches_Dataset(SequenceDataset):
def __init__(self,root_dir = './datasets/', download_flag = False):
super(HPatches_Dataset,self).__init__(name = 'hpatches_full', root_dir = root_dir, download_flag = download_flag, set_task=True)
def download(self):
try:
os.stat(self.root_dir)
except:
os.mkdir(self.root_dir)
try:
os.stat('{}{}'.format(self.root_dir,self.name))
except:
os.mkdir('{}{}'.format(self.root_dir,self.name))
download_url = "{}".format(self.url)
download_filename = "{}/{}.tar.gz".format(self.root_dir, self.name)
try:
print("Downloading HPatches from {}".format(download_url))
urlretrieve(download_url, download_filename)
tar = tarfile.open(download_filename)
dd = tar.getnames()[0]
tar.extractall('{}'.format(self.root_dir))
tar.close()
os.remove(download_filename)
os.rmdir("{}{}".format(self.root_dir, self.name))
os.rename("{}{}".format(self.root_dir, dd), "{}{}".format(self.root_dir, self.name))
except Exception as e:
print(str(e))
print('Cannot download from {}.'.format(download_url))
def read_image_data(self):
"""
Load image data from vggh like dataset
"""
for sequence_name in self.sequence_name_list:
sequence = self.sequences[sequence_name]
for image_id in sequence.image_id_list:
sequence.image_dict[image_id].image_path = '{}{}/{}'.format(self.root_dir, self.name, sequence.image_dict[image_id].filename)
def set_task(self):
"""
Deprecated
"""
for sequence_name in self.sequence_name_list:
sequence = self.sequences[sequence_name]
for link_id in sequence.link_id_list:
this_link = sequence.link_dict[link_id]
image_a = sequence.image_dict[this_link.source]
image_b = sequence.image_dict[this_link.target]
this_link.task['ima'] = str(image_a.idx)
this_link.task['imb'] = str(image_b.idx)
image_a_data = scipy.ndimage.imread(image_a.image_path)
image_b_data = scipy.ndimage.imread(image_b.image_path)
try:
imga_ch = image_a_data.shape[2]
except:
imga_ch = 1
try:
imgb_ch = image_b_data.shape[2]
except:
imgb_ch = 1
this_link.task['ima_size'] = [image_a_data.shape[0], image_a_data.shape[1], imga_ch]
this_link.task['imb_size'] = [image_b_data.shape[0], image_b_data.shape[1], imgb_ch]
this_link.task['H'] = this_link.transform_matrix
this_link.task['name'] = str(sequence.name)
this_link.task['description'] = {}
this_link.task['description']['impair'] = [str(image_a.idx), str(image_b.idx)]
try:
this_link.task['description']['nuisanceName'] = str(sequence.label)
this_link.task['description']['nuisanceValue'] = str(imageb.label)
except:
pass
def read_link_data(self):
self.read_link_data_vggh()
| 38.446809 | 141 | 0.581074 |
82fb6f6138c723772395683a20e8a4de2411004f | 1,844 | py | Python | pettingzoo/butterfly/pistonball/manual_control.py | rodrigodelazcano/PettingZoo | 41fe43c7da2fd92fa8c6aa5a5a28083664092aa5 | [
"Apache-2.0"
] | null | null | null | pettingzoo/butterfly/pistonball/manual_control.py | rodrigodelazcano/PettingZoo | 41fe43c7da2fd92fa8c6aa5a5a28083664092aa5 | [
"Apache-2.0"
] | null | null | null | pettingzoo/butterfly/pistonball/manual_control.py | rodrigodelazcano/PettingZoo | 41fe43c7da2fd92fa8c6aa5a5a28083664092aa5 | [
"Apache-2.0"
] | null | null | null | import os
os.environ["PYGAME_HIDE_SUPPORT_PROMPT"] = "hide"
import time
import numpy as np
import pygame
def manual_control(**kwargs):
from .pistonball import env as _env
# flatten_obs is True by default
env = _env(**kwargs)
env.reset()
# Use save_observation to save a dictionary of observations
# save_observation(obs_dict, reverse_colors=False)
# exit()
i = 19
clock = pygame.time.Clock()
start = time.time()
done = False
quit_game = 0
pygame.key.set_repeat(20, 0)
num_agents = len(env.agents) # 20
while not done:
clock.tick(60)
action_list = np.array([1 for _ in range(num_agents)])
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
quit_game = 1
break
if event.key == pygame.K_BACKSPACE:
# Backspace to reset
env.reset()
i = 19
if event.key == pygame.K_a and time.time() - start > 0.1:
i = (i - 1) if (i != 0) else i
start = time.time()
if event.key == pygame.K_d and time.time() - start > 0.1:
i = (i + 1) if (i != num_agents - 1) else i
start = time.time()
if event.key == pygame.K_s:
action_list[i] = 0
if event.key == pygame.K_w:
action_list[i] = 2
if quit_game:
break
# actions should be a dict of numpy arrays
for a in action_list:
env.step(a)
pygame.event.pump()
env.render()
done = any(env.dones.values())
# Uncomment next line to print FPS at which the game runs
env.close()
| 30.229508 | 73 | 0.517896 |
2e2ec89ae4c3956278c89d02c8add83a65f79484 | 2,762 | py | Python | discord.py | christopher-roelofs/GameEventHub | 4ed43f27b5e0593e511613906ec2d9369483fae0 | [
"MIT"
] | null | null | null | discord.py | christopher-roelofs/GameEventHub | 4ed43f27b5e0593e511613906ec2d9369483fae0 | [
"MIT"
] | null | null | null | discord.py | christopher-roelofs/GameEventHub | 4ed43f27b5e0593e511613906ec2d9369483fae0 | [
"MIT"
] | null | null | null |
from pypresence import Presence
import time
import config
import event_manager
import logger
from string_util import replace_text
from time import sleep
SETTINGS = config.get_config()
client_id = SETTINGS["discord"]['application_id']
connected = False
retries = 0
max_retries = 3
RPC = None
def initialize():
global RPC
global retries
global connected
if retries < max_retries:
try:
logger.info("Attempting to connect to Discord ...")
RPC = Presence(client_id, pipe=0) # Initialize the client class
RPC.connect() # Start the handshake loop
connected = True
logger.info("Connected to Discord")
except Exception as e:
logger.error("Failed to connect to Discord: {}".format(repr(e)))
retries += 1
sleep(1)
initialize()
def update_activity(details, state, large_image=None, large_text=None, small_image=None, small_text=None, buttons=None):
if state == "":
state = None
if large_image == "":
large_image = None
if large_text == "":
large_text = None
if small_image == "":
small_image = None
if small_text == "":
small_text = None
try:
RPC.update(details=details, state=state, start=time.time(), large_image=large_image, large_text=large_text,
small_image=small_image, small_text=small_text, buttons=buttons) # Set the presence
except Exception as e:
logger.error(f"Faild to update Discord status: {e}")
if "reconnect" in SETTINGS["discord"]:
if SETTINGS["discord"]["reconnect"]:
global retries
retries = 0
initialize()
def handle_event(event, action):
if connected:
buttons = None
if "buttons" in action:
buttons = action["buttons"]
update_activity(replace_text(action["details_text"], event.tokens), replace_text(action["state_text"], event.tokens), replace_text(action["large_image"], event.tokens).lower(
), replace_text(action["large_text"], event.tokens), replace_text(action["small_image"], event.tokens).lower(), replace_text(action["small_text"], event.tokens), buttons)
event_manager.subscribers["Discord"] = {}
event_manager.subscribers["Discord"]["initialize"] = lambda: initialize()
event_manager.subscribers["Discord"]["handle_event"] = {
'function': handle_event, 'arg': "args"}
if __name__ == "__main__":
while True:
buttons = [{"label": "Button 1", "url": "https://www.google.com"},
{"label": "Button 2", "url": "https://www.google.com"}]
update_activity("Console", "Game", "segacd", None, None, None, buttons)
time.sleep(100000)
| 35.410256 | 182 | 0.636857 |
639b568b78a2938ecdb6a1772237a69eecdfc52c | 5,678 | py | Python | Controller.py | atelieryou/robot_car | 05d1f10dbcc0199794372f26b239c17d69716429 | [
"MIT"
] | null | null | null | Controller.py | atelieryou/robot_car | 05d1f10dbcc0199794372f26b239c17d69716429 | [
"MIT"
] | null | null | null | Controller.py | atelieryou/robot_car | 05d1f10dbcc0199794372f26b239c17d69716429 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import pygame
import bleak
import asyncio
import queue
import threading
import pygame.locals as pl
import struct
import sys
# ble_simple_peripheral.pyにあるものと一致するようにする
UUID = "6e400002-b5a3-f393-e0a9-e50e24dcca9e"
# スレッド間で共有するためのグローバル変数
power = queue.Queue()
# 通信のための関数
async def central(loop):
try:
while True:
# デバイスのスキャンと選択をする
while True:
print("Scanning...")
devices = await bleak.discover()
for i, v in enumerate(devices):
print(f"{i}: {v}")
inp = input("Select a device to connect : ")
# 何も入力されなかったらもう一回スキャンする
if inp == "":
continue
# 何かが入力されたなら
else:
# デバイスのリストからデバイスを取得する
try:
device = devices[int(inp)]
# リストの大きさを超えた数字が入力されたか、int型に変換できない文字列を入力されたらエラーを表示する
except (IndexError, ValueError):
print("Input a valid number.")
# デバイスの取得が成功したらループから脱出する
else:
break
print(f"Selected : {device}")
try:
# デバイスに接続する
async with bleak.BleakClient(device, loop=loop) as client:
# 接続が終わるまで待つ
await client.is_connected()
print("Connected")
while True:
# LeftPower, RightPower をBytes型に変換する
d = power.get()
data = struct.pack("hh", d[0], d[1])
# データを送信する
await client.write_gatt_char(UUID, data)
# 送信したデータを表示する
print(f"Write: {data}")
# エラーが起きたら
except (AttributeError, bleak.BleakError):
# エラーメッセージを表示する
print("Error disconnected.")
# Ctrl + C で終了する
except KeyboardInterrupt:
pass
# コントローラー
class Controller:
def __init__(self):
# pygameを初期化する
pygame.init()
# ジョイスティックを格納するための変数
self.joystick = None
# ウインドウの初期化
self.screen = pygame.display.set_mode((500, 500))
# FPSを一定に保つための時計
self.clock = pygame.time.Clock()
# 入力のバッファ
self.input = [0.0, 0.0]
# FPS(フレーム毎秒)
self.FPS = 30
self.pre_power = (-10.0, -10.0)
# 背景色
self.BGColor = (200, 200, 200)
# 入力に使うキー
self.UPKEYS = [pl.K_w, pl.K_UP]
self.DOWNKEYS = [pl.K_s, pl.K_DOWN]
self.LEFTKEYS = [pl.K_a, pl.K_LEFT]
self.RIGHTKEYS = [pl.K_d, pl.K_RIGHT]
# 接続されているジョイスティックを取得する
self.init_joystick()
def init_joystick(self):
# 接続されているジョイスティックが0以上なら0番目のジョイスティックを設定する
pygame.joystick.init()
if pygame.joystick.get_count() > 0:
self.joystick = pygame.joystick.Joystick(0)
self.joystick.init()
# キー入力とジョイスティックの入力を受け取る
def input_handler(self):
# 押されているキーを取得
pressed_keys = pygame.key.get_pressed()
self.input = [0.0, 0.0]
for upkey in self.UPKEYS:
if pressed_keys[upkey]:
self.input[1] = 1.0
break
for downkey in self.DOWNKEYS:
if pressed_keys[downkey]:
self.input[1] = -1.0
break
for leftkey in self.LEFTKEYS:
if pressed_keys[leftkey]:
self.input[0] = -1.0
break
for rightkey in self.RIGHTKEYS:
if pressed_keys[rightkey]:
self.input[0] = 1.0
break
# ジョイスティックがあれば
if self.joystick:
self.input[0] += self.joystick.get_axis(0)
self.input[1] -= self.joystick.get_axis(1)
self.input[0] = max(-1.0, min(1.0, self.input[0]))
self.input[1] = max(-1.0, min(1.0, self.input[1]))
def apply_powers(self):
lp = max(-1023, min(1023, int(self.input[1] * 1023 + self.input[0] * 512)))
rp = max(-1023, min(1023, int(self.input[1] * 1023 + self.input[0] * -512)))
if (lp, rp) != self.pre_power:
power.put((lp, rp))
self.pre_power = (lp, rp)
def draw(self):
scr_size = self.screen.get_size()
sf = pygame.Surface(size=scr_size)
sf.fill(self.BGColor)
pygame.draw.circle(sf, (255, 0, 0), (int(scr_size[0] / 2 + self.input[0] * 200),
int(scr_size[1] / 2 + self.input[1] * -200)), 10)
self.screen.blit(sf, self.screen.get_rect())
pygame.display.update()
def run(self):
while True:
self.clock.tick(self.FPS)
self.input_handler()
self.apply_powers()
self.draw()
for event in pygame.event.get():
if event.type == pl.QUIT:
pygame.quit()
sys.exit()
if event.type == pl.KEYDOWN:
if event.key == pl.K_ESCAPE:
pygame.quit()
sys.exit()
if __name__ == '__main__':
loop = asyncio.get_event_loop()
cent = threading.Thread(target=loop.run_until_complete, args=(central(loop),))
cent.setDaemon(True)
cent.start()
con = Controller()
con.run()
| 31.898876 | 95 | 0.482388 |
5ec8e9e2d4644020b7358b62d7d069d152172a24 | 4,711 | py | Python | config.py | CSHF-yhhhh/A-Simple-Packet-Filtering-Firewall-in-Linux | febff6825fb12e5220289b1a666b6a8d0eccf94f | [
"MIT"
] | null | null | null | config.py | CSHF-yhhhh/A-Simple-Packet-Filtering-Firewall-in-Linux | febff6825fb12e5220289b1a666b6a8d0eccf94f | [
"MIT"
] | null | null | null | config.py | CSHF-yhhhh/A-Simple-Packet-Filtering-Firewall-in-Linux | febff6825fb12e5220289b1a666b6a8d0eccf94f | [
"MIT"
] | null | null | null | # -*- encoding: utf-8 -*-
"""
@文件:config.py
@作者: CSHF[https://github.com/CSHF-yhhhh]
@说明: 防火墙的配置文件
在运行之前,先将通目录下的 write.c 编译成write.so, 命令为 gcc write.c -shared -o write.so
"""
import ctypes
import subprocess
import threading
# 加载C模块
C_WRITE = ctypes.cdll.LoadLibrary("./write.so")
def __OpenDev(path="/dev/cshfFirewall"):
"""
说明: 连接驱动
args{
path: 驱动路径
}
return: 驱动的文字描述符
"""
dev = C_WRITE.OpenDev(ctypes.c_char_p(bytes(path, "utf-8")))
if dev < 0: # 打开设备失败
print("Failed to open the device...", dev)
return -1
else:
return dev
def __WriteLineToDev(dev, msg):
"""
说明:
args{
dev: 驱动的文件描述符
msg: 需要写的信息
}
return: 成功写入的字节, 小于0则写入失败
"""
rst = C_WRITE.WriteDev(dev, ctypes.c_char_p(bytes(msg, "utf-8")))
if rst < 0:
print("Failed to write [{}].".format(msg.replace("\n", "")))
else:
print("Successed to write [{}].".format(msg.replace("\n", "")))
return rst
def __CloseDev(dev):
"""
说明:
args{
dev: 驱动的文件描述符
}
return: None
"""
C_WRITE.CloseDev(dev)
# 全局配置字典
config = {
"filter_mode": "", # 过滤模式 blacklist whitelist
"filter_rules": [], # 过滤规则列表
"log_tag": "[cshfFirewall]:", # 内核日志输出的标签
"log_quit": False,
}
config["log_tag_len"] = len(config["log_tag"]) # 该标签的长度
__config_lock = threading.Lock()
protocol_map = {"1": "ICMP", "6": "TCP", "17": "UDP"}
def ReadConfig(call_back, set_title):
# 读取日志
__config_lock.acquire()
config_file = open("config.txt", "r")
config_data = config_file.readlines()
config_file.close()
config["filter_mode"] = config_data.pop(0).replace("\n", "")
config["filter_rules"].clear()
set_title(config["filter_mode"])
for line_str in config_data:
new_line = line_str.split(" ")
new_line[-1] = new_line[-1].replace("\n", "")
if call_back:
call_back(new_line)
config["filter_rules"].append(new_line)
print(config["filter_rules"])
__config_lock.release()
def SaveConfig(f_mod):
"""
说明: 保存配置,并将配置写入驱动
args{
f_mod: blacklist or whitelist
}
return:
"""
# 保存配置, 每次保存配置,清空防火墙的规则,然后重新写入规则
__config_lock.acquire()
config["filter_mode"] = f_mod
config_file = open("config.txt", "w")
dev = __OpenDev() # 连接驱动
__WriteLineToDev(dev, "c" + "\n") # 清除原有配置
config_file.write(config["filter_mode"] + "\n")
__WriteLineToDev(dev, config["filter_mode"] + "\n") # 设置过滤模式
for rule in config["filter_rules"]:
rule_msg = "{} {} {} {} {} {}\n".format(*rule)
config_file.write(rule_msg)
__WriteLineToDev(dev, rule_msg) # 写入每一条配置
config_file.close()
__CloseDev(dev)
__config_lock.release()
def addConfig(
ftype="1", protocol="1", src="0.0.0.0", dst="0.0.0.0", sport="0", dport="0"
):
# 添加一条规则, 如果存在则返回0, 成功返回1
rule = list()
rule.append(str(ftype))
rule.append(str(protocol))
rule.append(str(src))
rule.append(str(dst))
rule.append(str(sport))
rule.append(str(dport))
__config_lock.acquire()
if rule in config["filter_rules"]:
__config_lock.release()
return 0
else:
config["filter_rules"].append(rule)
__config_lock.release()
SaveConfig(config["filter_mode"]) # 保存配置
return 1
def GetLog(call_back=None):
"""
说明: 获取日志
args{
call_back: 将内核输出传到call_back
}
return:
"""
output = subprocess.Popen("journalctl -f", shell=True, stdout=subprocess.PIPE)
for ol in iter(output.stdout.readline, "b"):
if config["log_quit"]:
return
ol = str(ol)
i = ol.find(config["log_tag"])
if i >= 0:
msg = ol[i + config["log_tag_len"] :].replace("\\n'", "")
""" with open("firewall.log", "a")as f:
f.write(msg + "\n")
f.close() """
if call_back:
call_back(msg + "\n")
def FirewallStatus():
"""
说明: 得到防火墙的状态,若已经开启则返回1 否则返回0
args{
}
return:
"""
out_put = subprocess.run(
["lsmod"], encoding="utf-8", shell=True, stdout=subprocess.PIPE
).stdout
# print(out_put)
if "firewall" in out_put:
return 1
else:
return 0
def StartFirewall():
"""
说明: 开启防火墙
args{
}
return:
"""
if not FirewallStatus():
subprocess.run(["insmod", "/home/cshf/code/firewall-master/firewall.ko"])
SaveConfig(config["filter_mode"])
def StopFirewall():
"""
说明: 关闭防火墙
args{
}
return:
"""
subprocess.run(["rmmod", "firewall.ko"])
| 22.649038 | 82 | 0.56782 |
71bad1f9f4dbf3b0e98353c19760f8ac2f092861 | 169 | py | Python | working-library/background/AvatarAPI/urls.py | FredCof/Fresh-supermarket-Online | 25c1cb28f5b5dc1f85e53ee7de5b055de438c491 | [
"Apache-2.0"
] | 2 | 2021-03-12T16:35:27.000Z | 2021-03-12T16:35:34.000Z | working-library/background/AvatarAPI/urls.py | FredCof/Fresh-supermarket-Online | 25c1cb28f5b5dc1f85e53ee7de5b055de438c491 | [
"Apache-2.0"
] | 4 | 2021-03-19T13:16:08.000Z | 2021-06-09T19:26:37.000Z | working-library/background/AvatarAPI/urls.py | FredCof/Fresh-supermarket-Online | 25c1cb28f5b5dc1f85e53ee7de5b055de438c491 | [
"Apache-2.0"
] | null | null | null | from django.conf.urls import url
from . import views
urlpatterns = [
url('api/avatar/upload', views.AvatarUpload),
url('api/avatar/load', views.AvatarLoad),
]
| 18.777778 | 49 | 0.704142 |
b37355a205f508d228a55738171e4e11b43b3bff | 2,039 | py | Python | sdk/python/pulumi_azure_native/blockchain/list_location_consortiums.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/blockchain/list_location_consortiums.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/blockchain/list_location_consortiums.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'ListLocationConsortiumsResult',
'AwaitableListLocationConsortiumsResult',
'list_location_consortiums',
]
@pulumi.output_type
class ListLocationConsortiumsResult:
"""
Collection of the consortium payload.
"""
def __init__(__self__, value=None):
if value and not isinstance(value, list):
raise TypeError("Expected argument 'value' to be a list")
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def value(self) -> Optional[Sequence['outputs.ConsortiumResponse']]:
"""
Gets or sets the collection of consortiums.
"""
return pulumi.get(self, "value")
class AwaitableListLocationConsortiumsResult(ListLocationConsortiumsResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListLocationConsortiumsResult(
value=self.value)
def list_location_consortiums(location_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListLocationConsortiumsResult:
"""
Collection of the consortium payload.
API Version: 2018-06-01-preview.
:param str location_name: Location Name.
"""
__args__ = dict()
__args__['locationName'] = location_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:blockchain:listLocationConsortiums', __args__, opts=opts, typ=ListLocationConsortiumsResult).value
return AwaitableListLocationConsortiumsResult(
value=__ret__.value)
| 31.369231 | 148 | 0.693968 |
2d9238871635bd063d8508c6659280a426ba4e08 | 2,124 | py | Python | zerver/webhooks/appveyor/tests.py | acguglielmo/zulip | 97ed71ca699c3abd1c2584cc3c6a8370430ae2f6 | [
"Apache-2.0"
] | 1 | 2019-04-14T20:31:55.000Z | 2019-04-14T20:31:55.000Z | zerver/webhooks/appveyor/tests.py | hcxiong/zulip | bf22eefedebd50b25f32b22988217c13a89b65d1 | [
"Apache-2.0"
] | 4 | 2021-11-15T17:52:28.000Z | 2022-02-27T10:43:15.000Z | zerver/webhooks/appveyor/tests.py | hcxiong/zulip | bf22eefedebd50b25f32b22988217c13a89b65d1 | [
"Apache-2.0"
] | 9 | 2019-11-04T18:59:29.000Z | 2022-03-22T17:46:37.000Z | # -*- coding: utf-8 -*-
from zerver.lib.test_classes import WebhookTestCase
class AppveyorHookTests(WebhookTestCase):
STREAM_NAME = 'appveyor'
URL_TEMPLATE = "/api/v1/external/appveyor?api_key={api_key}&stream={stream}"
FIXTURE_DIR_NAME = 'appveyor'
def test_appveyor_build_success_message(self) -> None:
"""
Tests if appveyor build success notification is handled correctly
"""
expected_subject = "Hubot-DSC-Resource"
expected_message = ("[Build Hubot-DSC-Resource 2.0.59 completed](https://ci.appveyor.com/project"
"/joebloggs/hubot-dsc-resource/build/2.0.59)\n"
"Commit [c06e208b47](https://github.com/joebloggs/Hubot-DSC-Resource"
"/commit/c06e208b47) by Joe Bloggs on 6/12/2018"
" 6:22 PM: Increment version number.\n"
"Build Started: 9/9/2018 7:04 PM\n"
"Build Finished: 9/9/2018 7:06 PM")
self.send_and_test_stream_message('appveyor_build_success', expected_subject, expected_message)
def test_appveyor_build_failure_message(self) -> None:
"""
Tests if appveyor build failure notification is handled correctly
"""
expected_subject = "Hubot-DSC-Resource"
expected_message = ("[Build Hubot-DSC-Resource 2.0.59 failed](https://ci.appveyor.com/project"
"/joebloggs/hubot-dsc-resource/build/2.0.59)\n"
"Commit [c06e208b47](https://github.com/joebloggs/Hubot-DSC-Resource"
"/commit/c06e208b47) by Joe Bloggs on 6/12/2018"
" 6:22 PM: Increment version number.\n"
"Build Started: 9/9/2018 7:04 PM\n"
"Build Finished: 9/9/2018 7:06 PM")
self.send_and_test_stream_message('appveyor_build_failure', expected_subject, expected_message)
def get_body(self, fixture_name: str) -> str:
return self.webhook_fixture_data("appveyor", fixture_name, file_type="json")
| 51.804878 | 105 | 0.60452 |
75622bcf396409de2b81fd6c1cb0ad6922ccd962 | 1,146 | py | Python | backend/src/accounts/migrations/0001_initial.py | BenK93/GeThaText | c74aacf9d58734670588f0ee2965b2471022edd5 | [
"MIT"
] | null | null | null | backend/src/accounts/migrations/0001_initial.py | BenK93/GeThaText | c74aacf9d58734670588f0ee2965b2471022edd5 | [
"MIT"
] | null | null | null | backend/src/accounts/migrations/0001_initial.py | BenK93/GeThaText | c74aacf9d58734670588f0ee2965b2471022edd5 | [
"MIT"
] | null | null | null | # Generated by Django 3.0.7 on 2020-09-26 14:05
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Account',
fields=[
('password', models.CharField(max_length=128, verbose_name='password')),
('email', models.EmailField(max_length=60, unique=True, verbose_name='email')),
('username', models.CharField(max_length=30, primary_key=True, serialize=False, unique=True)),
('date_joined', models.DateTimeField(auto_now_add=True, verbose_name='date joined')),
('last_login', models.DateTimeField(auto_now=True, verbose_name='last login')),
('is_admin', models.BooleanField(default=False)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('is_superuser', models.BooleanField(default=False)),
],
options={
'abstract': False,
},
),
]
| 35.8125 | 110 | 0.582897 |
5c3dea1c70042ca97ceaf89763bf23452179f7a7 | 240 | py | Python | scrapli_paramiko/transport/__init__.py | scrapli/scrapli_paramiko | b21b9f7aa00c8cdd9a882fc9b77765e936ccb22d | [
"MIT"
] | null | null | null | scrapli_paramiko/transport/__init__.py | scrapli/scrapli_paramiko | b21b9f7aa00c8cdd9a882fc9b77765e936ccb22d | [
"MIT"
] | 11 | 2020-07-12T15:50:45.000Z | 2021-01-24T15:48:44.000Z | scrapli_paramiko/transport/__init__.py | scrapli/scrapli_paramiko | b21b9f7aa00c8cdd9a882fc9b77765e936ccb22d | [
"MIT"
] | null | null | null | """scrapli_paramiko.transport"""
from scrapli_paramiko.transport.miko import MIKO_TRANSPORT_ARGS as TRANSPORT_ARGS
from scrapli_paramiko.transport.miko import MikoTransport as Transport
__all__ = (
"Transport",
"TRANSPORT_ARGS",
)
| 26.666667 | 81 | 0.8 |
f61c046f82f85ff8f3df0821181d95345b4adb2a | 1,914 | py | Python | utils/unicode_csv.py | spudmind/spud | 86e44bca4efd3cd6358467e1511048698a45edbc | [
"MIT"
] | 2 | 2015-04-11T12:22:41.000Z | 2016-08-18T11:12:06.000Z | utils/unicode_csv.py | spudmind/spud | 86e44bca4efd3cd6358467e1511048698a45edbc | [
"MIT"
] | 84 | 2015-01-22T14:33:49.000Z | 2015-04-01T23:15:29.000Z | utils/unicode_csv.py | spudmind/spud | 86e44bca4efd3cd6358467e1511048698a45edbc | [
"MIT"
] | 1 | 2015-04-16T03:10:39.000Z | 2015-04-16T03:10:39.000Z | # -*- coding: utf-8 -*-
import csv, codecs, cStringIO
'''
UTF-8 CSV - Straight from the python 2 docs:
https://docs.python.org/2/library/csv.html#examples
'''
class UTF8Recoder:
"""
Iterator that reads an encoded stream and reencodes the input to UTF-8
"""
def __init__(self, f, encoding):
self.reader = codecs.getreader(encoding)(f)
def __iter__(self):
return self
def next(self):
return self.reader.next().encode("utf-8")
class UnicodeReader:
"""
A CSV reader which will iterate over lines in the CSV file "f",
which is encoded in the given encoding.
"""
def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
f = UTF8Recoder(f, encoding)
self.reader = csv.reader(f, dialect=dialect, **kwds)
def next(self):
row = self.reader.next()
return [unicode(s, "utf-8") for s in row]
def __iter__(self):
return self
class UnicodeWriter:
"""
A CSV writer which will write rows to CSV file "f",
which is encoded in the given encoding.
"""
def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
# Redirect output to a queue
self.queue = cStringIO.StringIO()
self.writer = csv.writer(self.queue, dialect=dialect, **kwds)
self.stream = f
self.encoder = codecs.getincrementalencoder(encoding)()
def writerow(self, row):
self.writer.writerow([s.encode("utf-8") for s in row])
# Fetch UTF-8 output from the queue ...
data = self.queue.getvalue()
data = data.decode("utf-8")
# ... and reencode it into the target encoding
data = self.encoder.encode(data)
# write to the target stream
self.stream.write(data)
# empty queue
self.queue.truncate(0)
def writerows(self, rows):
for row in rows:
self.writerow(row)
| 28.567164 | 74 | 0.612853 |
dac068b221c7d04db08068dfb4d691026518870b | 6,717 | py | Python | colour/colorimetry/tests/test_generation.py | tjdcs/colour | 09413da71b5da57408eb812797c5db1300d4791a | [
"BSD-3-Clause"
] | null | null | null | colour/colorimetry/tests/test_generation.py | tjdcs/colour | 09413da71b5da57408eb812797c5db1300d4791a | [
"BSD-3-Clause"
] | null | null | null | colour/colorimetry/tests/test_generation.py | tjdcs/colour | 09413da71b5da57408eb812797c5db1300d4791a | [
"BSD-3-Clause"
] | null | null | null | # !/usr/bin/env python
"""Define the unit tests for the :mod:`colour.colorimetry.generation` module."""
import numpy as np
import unittest
from colour.colorimetry.generation import (
sd_constant,
sd_zeros,
sd_ones,
msds_constant,
msds_zeros,
msds_ones,
sd_gaussian_normal,
sd_gaussian_fwhm,
sd_single_led_Ohno2005,
sd_multi_leds_Ohno2005,
)
__author__ = "Colour Developers"
__copyright__ = "Copyright 2013 Colour Developers"
__license__ = "New BSD License - https://opensource.org/licenses/BSD-3-Clause"
__maintainer__ = "Colour Developers"
__email__ = "colour-developers@colour-science.org"
__status__ = "Production"
__all__ = [
"TestSdConstant",
"TestSdZeros",
"TestSdOnes",
"TestMsdsConstant",
"TestMsdsZeros",
"TestMsdsOnes",
"TestSdGaussianNormal",
"TestSdGaussianFwhm",
"TestSdSingleLedOhno2005",
"TestSdMultiLedsOhno2005",
]
class TestSdConstant(unittest.TestCase):
"""
Define :func:`colour.colorimetry.generation.sd_constant` definition unit
tests methods.
"""
def test_sd_constant(self):
"""Test :func:`colour.colorimetry.generation.sd_constant` definition."""
sd = sd_constant(np.pi)
self.assertAlmostEqual(sd[360], np.pi, places=7)
self.assertAlmostEqual(sd[555], np.pi, places=7)
self.assertAlmostEqual(sd[780], np.pi, places=7)
class TestSdZeros(unittest.TestCase):
"""
Define :func:`colour.colorimetry.generation.sd_zeros` definition unit
tests methods.
"""
def test_sd_zeros(self):
"""
Test :func:`colour.colorimetry.generation.sd_zeros`
definition.
"""
sd = sd_zeros()
self.assertEqual(sd[360], 0)
self.assertEqual(sd[555], 0)
self.assertEqual(sd[780], 0)
class TestSdOnes(unittest.TestCase):
"""
Define :func:`colour.colorimetry.generation.sd_ones` definition unit
tests methods.
"""
def test_sd_ones(self):
"""Test :func:`colour.colorimetry.generation.sd_ones` definition."""
sd = sd_ones()
self.assertEqual(sd[360], 1)
self.assertEqual(sd[555], 1)
self.assertEqual(sd[780], 1)
class TestMsdsConstant(unittest.TestCase):
"""
Define :func:`colour.colorimetry.generation.msds_constant` definition unit
tests methods.
"""
def test_msds_constant(self):
"""Test :func:`colour.colorimetry.generation.msds_constant` definition."""
msds = msds_constant(np.pi, labels=["a", "b", "c"])
np.testing.assert_almost_equal(
msds[360], np.array([np.pi, np.pi, np.pi]), decimal=7
)
np.testing.assert_almost_equal(
msds[555], np.array([np.pi, np.pi, np.pi]), decimal=7
)
np.testing.assert_almost_equal(
msds[780], np.array([np.pi, np.pi, np.pi]), decimal=7
)
class TestMsdsZeros(unittest.TestCase):
"""
Define :func:`colour.colorimetry.generation.msds_zeros` definition unit
tests methods.
"""
def test_msds_zeros(self):
"""
Test :func:`colour.colorimetry.generation.msds_zeros`
definition.
"""
msds = msds_zeros(labels=["a", "b", "c"])
np.testing.assert_equal(msds[360], np.array([0, 0, 0]))
np.testing.assert_equal(msds[555], np.array([0, 0, 0]))
np.testing.assert_equal(msds[780], np.array([0, 0, 0]))
class TestMsdsOnes(unittest.TestCase):
"""
Define :func:`colour.colorimetry.generation.msds_ones` definition unit
tests methods.
"""
def test_msds_ones(self):
"""Test :func:`colour.colorimetry.generation.msds_ones` definition."""
msds = msds_ones(labels=["a", "b", "c"])
np.testing.assert_equal(msds[360], np.array([1, 1, 1]))
np.testing.assert_equal(msds[555], np.array([1, 1, 1]))
np.testing.assert_equal(msds[780], np.array([1, 1, 1]))
class TestSdGaussianNormal(unittest.TestCase):
"""
Define :func:`colour.colorimetry.generation.sd_gaussian_normal`
definition unit tests methods.
"""
def test_sd_gaussian_normal(self):
"""
Test :func:`colour.colorimetry.generation.sd_gaussian_normal`
definition.
"""
sd = sd_gaussian_normal(555, 25)
self.assertAlmostEqual(sd[530], 0.606530659712633, places=7)
self.assertAlmostEqual(sd[555], 1, places=7)
self.assertAlmostEqual(sd[580], 0.606530659712633, places=7)
class TestSdGaussianFwhm(unittest.TestCase):
"""
Define :func:`colour.colorimetry.generation.sd_gaussian_fwhm` definition
unit tests methods.
"""
def test_sd_gaussian_fwhm(self):
"""
Test :func:`colour.colorimetry.generation.sd_gaussian_fwhm`
definition.
"""
sd = sd_gaussian_fwhm(555, 25)
self.assertAlmostEqual(sd[530], 0.367879441171443, places=7)
self.assertAlmostEqual(sd[555], 1, places=7)
self.assertAlmostEqual(sd[580], 0.367879441171443, places=7)
class TestSdSingleLedOhno2005(unittest.TestCase):
"""
Define :func:`colour.colorimetry.generation.sd_single_led_Ohno2005`
definition unit tests methods.
"""
def test_sd_single_led_Ohno2005(self):
"""
Test :func:`colour.colorimetry.generation.sd_single_led_Ohno2005`
definition.
"""
sd = sd_single_led_Ohno2005(555, 25)
self.assertAlmostEqual(sd[530], 0.127118445056538, places=7)
self.assertAlmostEqual(sd[555], 1, places=7)
self.assertAlmostEqual(sd[580], 0.127118445056538, places=7)
class TestSdMultiLedsOhno2005(unittest.TestCase):
"""
Define :func:`colour.colorimetry.generation.sd_multi_leds_Ohno2005`
definition unit tests methods.
"""
def test_sd_multi_leds_Ohno2005(self):
"""
Test :func:`colour.colorimetry.generation.sd_multi_leds_Ohno2005`
definition.
"""
sd = sd_multi_leds_Ohno2005(
np.array([457, 530, 615]),
np.array([20, 30, 20]),
np.array([0.731, 1.000, 1.660]),
)
self.assertAlmostEqual(sd[500], 0.129513248576116, places=7)
self.assertAlmostEqual(sd[570], 0.059932156222703, places=7)
self.assertAlmostEqual(sd[640], 0.116433257970624, places=7)
sd = sd_multi_leds_Ohno2005(
np.array([457, 530, 615]),
np.array([20, 30, 20]),
)
self.assertAlmostEqual(sd[500], 0.130394510062799, places=7)
self.assertAlmostEqual(sd[570], 0.058539618824187, places=7)
self.assertAlmostEqual(sd[640], 0.070140708922879, places=7)
if __name__ == "__main__":
unittest.main()
| 25.637405 | 82 | 0.643889 |
cb073b903b25d6164f6cd02abd2a1732b2eb5130 | 443 | py | Python | test.py | Neutrinos00/RawImagesSorter | e91b62f56a272f483fe8b571cddec5058bad544a | [
"MIT"
] | null | null | null | test.py | Neutrinos00/RawImagesSorter | e91b62f56a272f483fe8b571cddec5058bad544a | [
"MIT"
] | null | null | null | test.py | Neutrinos00/RawImagesSorter | e91b62f56a272f483fe8b571cddec5058bad544a | [
"MIT"
] | null | null | null | import exifread
import pyperclip
def main():
path_name='./Photos/DSC01311.ARW'
pyperclip.copy(path_name)
with open(path_name, 'rb') as f:
tags = exifread.process_file(f)
for key, val in tags.items():
if 'Image Orientation' in key:
if 'Rotated 90 CCW' in str(val):
print(1)
else:
print(0)
if __name__ == '__main__':
main()
| 21.095238 | 48 | 0.528217 |
a87466ce290681e4f133c34cf983315d4bbb9d40 | 724 | py | Python | models/mnist/__init__.py | Frankzd/distiller | 931138e2d23989ef9305712e5aa147ae3dff53de | [
"Apache-2.0"
] | null | null | null | models/mnist/__init__.py | Frankzd/distiller | 931138e2d23989ef9305712e5aa147ae3dff53de | [
"Apache-2.0"
] | null | null | null | models/mnist/__init__.py | Frankzd/distiller | 931138e2d23989ef9305712e5aa147ae3dff53de | [
"Apache-2.0"
] | null | null | null | #
# Copyright (c) 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""This package contains MNIST image classification models for pytorch"""
from .simplenet_mnist import *
from .lenet_mnist import *
| 34.47619 | 74 | 0.76105 |
dfc4b4ce7bdabf82853d46ce05901f7cf9eb2655 | 45 | py | Python | tarkov/globals_/__init__.py | JustEmuTarkov/jet_py | 2f352b5e6f5d88594d08afc46e9458e919271788 | [
"MIT"
] | 14 | 2021-02-24T02:32:48.000Z | 2022-01-03T05:51:45.000Z | tarkov/globals_/__init__.py | JustEmuTarkov/jet_py | 2f352b5e6f5d88594d08afc46e9458e919271788 | [
"MIT"
] | 1 | 2021-03-08T09:02:29.000Z | 2021-03-08T09:02:29.000Z | tarkov/globals_/__init__.py | JustEmuTarkov/jet_py | 2f352b5e6f5d88594d08afc46e9458e919271788 | [
"MIT"
] | 4 | 2021-04-14T01:47:01.000Z | 2021-11-29T02:18:32.000Z | from .models import GlobalsModel, ItemPreset
| 22.5 | 44 | 0.844444 |
94d64eed9c6fa75a1847356be5a48cbd90b125d1 | 698 | py | Python | cleaning_utils/human_number.py | rezashabrang/cleaner-utils | d0181ee1a131e877e66d943323018617aa488433 | [
"MIT"
] | 4 | 2021-12-28T16:37:00.000Z | 2022-02-17T12:02:02.000Z | cleaning_utils/human_number.py | rezashabrang/cleaner-utils | d0181ee1a131e877e66d943323018617aa488433 | [
"MIT"
] | null | null | null | cleaning_utils/human_number.py | rezashabrang/cleaner-utils | d0181ee1a131e877e66d943323018617aa488433 | [
"MIT"
] | 1 | 2021-09-20T12:32:30.000Z | 2021-09-20T12:32:30.000Z | """Human-Readable Numbers Format."""
# ------------------------ Import libraries and functions ---------------------
from typing import Union
# ---------------------------- function definition ----------------------------
def human_number(number: Union[int, float]) -> str:
"""human-readable number format.
It is a general-purpose number formatter, which separates every three
digits to make it human-readable.
Args:
number (int or float): Accepts only one element (i.e., scalar).
Returns:
A text variable of <class 'str'> after separating numbers.
Examples:
>>> human_number(165485613)
'165,485,613'
"""
return f"{number:,}"
| 26.846154 | 79 | 0.560172 |
22945b4e10a697d586b97dd163856d9a7b25437f | 586 | py | Python | final_design/tests/Flashlight/flash.py | DFEC-R2D2/r2d2 | 9b64233865ebfe9f0ca3f1b400b55cc8d6494adf | [
"MIT"
] | 3 | 2017-08-02T04:46:17.000Z | 2020-03-30T12:11:13.000Z | final_design/tests/Flashlight/flash.py | DFEC-R2D2/r2d2 | 9b64233865ebfe9f0ca3f1b400b55cc8d6494adf | [
"MIT"
] | null | null | null | final_design/tests/Flashlight/flash.py | DFEC-R2D2/r2d2 | 9b64233865ebfe9f0ca3f1b400b55cc8d6494adf | [
"MIT"
] | null | null | null | #!/usr/bin/env python
##############################################
# The MIT License (MIT)
# Copyright (c) 2016 Kevin Walchko
# see LICENSE for full details
##############################################
from __future__ import print_function
import time
import sys
sys.path.append('../../python')
from library import FlashlightGPIO
f = FlashlightGPIO(26)
while(True):
choice = input("Enter:\n 1 - On\n 2 - Off\n 3 - Quit\n>> ")
choice = int(choice)
if choice == 1:
print("LED ON")
f.set(True)
elif choice == 2:
print("LED OFF")
f.set(False)
elif choice == 3:
break
| 20.928571 | 60 | 0.564846 |
f6bb917f09c0bd18503f4886b5ef4248243ae115 | 1,577 | py | Python | script.py | elnuraliyev111/hashmap_vs_linkedlist | d536da0fb26199b7e70bb8a78f4b078789200801 | [
"MIT"
] | null | null | null | script.py | elnuraliyev111/hashmap_vs_linkedlist | d536da0fb26199b7e70bb8a78f4b078789200801 | [
"MIT"
] | null | null | null | script.py | elnuraliyev111/hashmap_vs_linkedlist | d536da0fb26199b7e70bb8a78f4b078789200801 | [
"MIT"
] | null | null | null | from hashmap import HashMap
from linkedlist import LinkedList
N = 6
#Insert Data Into HashMap
my_hashmap = HashMap(N)
my_hashmap.assign("Zachary", "Sunburn Sickness")
my_hashmap.assign("Elise", "Severe Nausea")
my_hashmap.assign("Mimi", "Stomach Flu")
my_hashmap.assign("Devan", "Malaria")
my_hashmap.assign("Gary", "Bacterial Meningitis")
my_hashmap.assign("Neeknaz", "Broken Cheekbone")
#Insert Data into LinkedList
my_linked_list = LinkedList(["Zachary", "Sunburn Sickness"])
my_linked_list.insert_beginning(["Elise", "Severe Nausea"])
my_linked_list.insert_beginning(["Mimi", "Stomach Flu"])
my_linked_list.insert_beginning(["Devan", "Malaria"])
my_linked_list.insert_beginning(["Gary", "Bacterial Meningitis"])
my_linked_list.insert_beginning(["Neeknaz", "Broken Cheekbone"])
#Get Zachary's Disease from a HashMap
hashmap_zachary_disease = my_hashmap.retrieve("Zachary") #Checkpoint 1
print("Zachary's disease is {0}".format(hashmap_zachary_disease))
hashmap_runtime = "1" #Checkpoint 2
print("The runtime of retrieving a value from a hashmap is O({0})\n\n".format(hashmap_runtime))
#Get Zachary's Disease from a Linked List
#Write Code here for Checkpoint 3
current = my_linked_list.head_node
while current:
if(current.get_value()[0] == "Zachary"):
linked_list_zachary_disease = current.get_value()[1]
current = current.next_node
print("Zachary's disease is {0}".format(linked_list_zachary_disease))
linked_list_runtime = "N" #Checkpoint 4
print("The runtime of retrieving the first value added to a linked list is O({0})\n\n".format(linked_list_runtime))
| 38.463415 | 115 | 0.773621 |
9ef0a289a77e581047769762c3ec569d6a305593 | 15,490 | py | Python | plugins/modules/oci_os_management_managed_instance_facts.py | LaudateCorpus1/oci-ansible-collection | 2b1cd87b4d652a97c1ca752cfc4fdc4bdb37a7e7 | [
"Apache-2.0"
] | null | null | null | plugins/modules/oci_os_management_managed_instance_facts.py | LaudateCorpus1/oci-ansible-collection | 2b1cd87b4d652a97c1ca752cfc4fdc4bdb37a7e7 | [
"Apache-2.0"
] | null | null | null | plugins/modules/oci_os_management_managed_instance_facts.py | LaudateCorpus1/oci-ansible-collection | 2b1cd87b4d652a97c1ca752cfc4fdc4bdb37a7e7 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# Copyright (c) 2020, 2022 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_os_management_managed_instance_facts
short_description: Fetches details about one or multiple ManagedInstance resources in Oracle Cloud Infrastructure
description:
- Fetches details about one or multiple ManagedInstance resources in Oracle Cloud Infrastructure
- Returns a list of all Managed Instances.
- If I(managed_instance_id) is specified, the details of a single ManagedInstance will be returned.
version_added: "2.9.0"
author: Oracle (@oracle)
options:
managed_instance_id:
description:
- OCID for the managed instance
- Required to get a specific managed_instance.
type: str
aliases: ["id"]
compartment_id:
description:
- The ID of the compartment in which to list resources.
- Required to list multiple managed_instances.
type: str
display_name:
description:
- A user-friendly name. Does not have to be unique, and it's changeable.
- "Example: `My new resource`"
type: str
aliases: ["name"]
sort_order:
description:
- The sort order to use, either 'asc' or 'desc'.
type: str
choices:
- "ASC"
- "DESC"
sort_by:
description:
- The field to sort by. Only one sort order may be provided. Default order for TIMECREATED is descending. Default order for DISPLAYNAME is
ascending. If no value is specified TIMECREATED is default.
type: str
choices:
- "TIMECREATED"
- "DISPLAYNAME"
os_family:
description:
- The OS family for which to list resources.
type: str
choices:
- "LINUX"
- "WINDOWS"
- "ALL"
extends_documentation_fragment: [ oracle.oci.oracle ]
"""
EXAMPLES = """
- name: Get a specific managed_instance
oci_os_management_managed_instance_facts:
# required
managed_instance_id: "ocid1.managedinstance.oc1..xxxxxxEXAMPLExxxxxx"
- name: List managed_instances
oci_os_management_managed_instance_facts:
# required
compartment_id: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
# optional
display_name: display_name_example
sort_order: ASC
sort_by: TIMECREATED
os_family: LINUX
"""
RETURN = """
managed_instances:
description:
- List of ManagedInstance resources
returned: on success
type: complex
contains:
display_name:
description:
- Managed Instance identifier
returned: on success
type: str
sample: display_name_example
id:
description:
- OCID for the managed instance
returned: on success
type: str
sample: "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx"
description:
description:
- Information specified by the user about the managed instance
returned: on success
type: str
sample: description_example
last_checkin:
description:
- Time at which the instance last checked in
returned: on success
type: str
sample: last_checkin_example
last_boot:
description:
- Time at which the instance last booted
returned: on success
type: str
sample: last_boot_example
updates_available:
description:
- Number of updates available to be installed
returned: on success
type: int
sample: 56
os_name:
description:
- Operating System Name
- Returned for get operation
returned: on success
type: str
sample: os_name_example
os_version:
description:
- Operating System Version
- Returned for get operation
returned: on success
type: str
sample: os_version_example
os_kernel_version:
description:
- Operating System Kernel Version
- Returned for get operation
returned: on success
type: str
sample: os_kernel_version_example
compartment_id:
description:
- OCID for the Compartment
returned: on success
type: str
sample: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
status:
description:
- status of the managed instance.
returned: on success
type: str
sample: NORMAL
parent_software_source:
description:
- the parent (base) Software Source attached to the Managed Instance
- Returned for get operation
returned: on success
type: complex
contains:
name:
description:
- software source name
returned: on success
type: str
sample: name_example
id:
description:
- software source identifier
returned: on success
type: str
sample: "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx"
child_software_sources:
description:
- list of child Software Sources attached to the Managed Instance
- Returned for get operation
returned: on success
type: complex
contains:
name:
description:
- software source name
returned: on success
type: str
sample: name_example
id:
description:
- software source identifier
returned: on success
type: str
sample: "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx"
managed_instance_groups:
description:
- The ids of the managed instance groups of which this instance is a
member.
- Returned for get operation
returned: on success
type: complex
contains:
id:
description:
- unique identifier that is immutable on creation
returned: on success
type: str
sample: "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx"
display_name:
description:
- User friendly name
returned: on success
type: str
sample: display_name_example
os_family:
description:
- The Operating System type of the managed instance.
returned: on success
type: str
sample: LINUX
is_reboot_required:
description:
- Indicates whether a reboot is required to complete installation of updates.
returned: on success
type: bool
sample: true
notification_topic_id:
description:
- OCID of the ONS topic used to send notification to users
- Returned for get operation
returned: on success
type: str
sample: "ocid1.notificationtopic.oc1..xxxxxxEXAMPLExxxxxx"
ksplice_effective_kernel_version:
description:
- The ksplice effective kernel version
- Returned for get operation
returned: on success
type: str
sample: ksplice_effective_kernel_version_example
is_data_collection_authorized:
description:
- True if user allow data collection for this instance
- Returned for get operation
returned: on success
type: bool
sample: true
autonomous:
description:
- if present, indicates the Managed Instance is an autonomous instance. Holds all the Autonomous specific information
- Returned for get operation
returned: on success
type: complex
contains:
is_auto_update_enabled:
description:
- True if daily updates are enabled
returned: on success
type: bool
sample: true
security_updates_available:
description:
- Number of security type updates available to be installed
- Returned for get operation
returned: on success
type: int
sample: 56
bug_updates_available:
description:
- Number of bug fix type updates available to be installed
- Returned for get operation
returned: on success
type: int
sample: 56
enhancement_updates_available:
description:
- Number of enhancement type updates available to be installed
- Returned for get operation
returned: on success
type: int
sample: 56
other_updates_available:
description:
- Number of non-classified updates available to be installed
- Returned for get operation
returned: on success
type: int
sample: 56
scheduled_job_count:
description:
- Number of scheduled jobs associated with this instance
- Returned for get operation
returned: on success
type: int
sample: 56
work_request_count:
description:
- Number of work requests associated with this instance
- Returned for get operation
returned: on success
type: int
sample: 56
sample: [{
"display_name": "display_name_example",
"id": "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx",
"description": "description_example",
"last_checkin": "last_checkin_example",
"last_boot": "last_boot_example",
"updates_available": 56,
"os_name": "os_name_example",
"os_version": "os_version_example",
"os_kernel_version": "os_kernel_version_example",
"compartment_id": "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx",
"status": "NORMAL",
"parent_software_source": {
"name": "name_example",
"id": "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx"
},
"child_software_sources": [{
"name": "name_example",
"id": "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx"
}],
"managed_instance_groups": [{
"id": "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx",
"display_name": "display_name_example"
}],
"os_family": "LINUX",
"is_reboot_required": true,
"notification_topic_id": "ocid1.notificationtopic.oc1..xxxxxxEXAMPLExxxxxx",
"ksplice_effective_kernel_version": "ksplice_effective_kernel_version_example",
"is_data_collection_authorized": true,
"autonomous": {
"is_auto_update_enabled": true
},
"security_updates_available": 56,
"bug_updates_available": 56,
"enhancement_updates_available": 56,
"other_updates_available": 56,
"scheduled_job_count": 56,
"work_request_count": 56
}]
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import oci_common_utils
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIResourceFactsHelperBase,
get_custom_class,
)
try:
from oci.os_management import OsManagementClient
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class ManagedInstanceFactsHelperGen(OCIResourceFactsHelperBase):
"""Supported operations: get, list"""
def get_required_params_for_get(self):
return [
"managed_instance_id",
]
def get_required_params_for_list(self):
return [
"compartment_id",
]
def get_resource(self):
return oci_common_utils.call_with_backoff(
self.client.get_managed_instance,
managed_instance_id=self.module.params.get("managed_instance_id"),
)
def list_resources(self):
optional_list_method_params = [
"display_name",
"sort_order",
"sort_by",
"os_family",
]
optional_kwargs = dict(
(param, self.module.params[param])
for param in optional_list_method_params
if self.module.params.get(param) is not None
)
return oci_common_utils.list_all_resources(
self.client.list_managed_instances,
compartment_id=self.module.params.get("compartment_id"),
**optional_kwargs
)
ManagedInstanceFactsHelperCustom = get_custom_class("ManagedInstanceFactsHelperCustom")
class ResourceFactsHelper(
ManagedInstanceFactsHelperCustom, ManagedInstanceFactsHelperGen
):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec()
module_args.update(
dict(
managed_instance_id=dict(aliases=["id"], type="str"),
compartment_id=dict(type="str"),
display_name=dict(aliases=["name"], type="str"),
sort_order=dict(type="str", choices=["ASC", "DESC"]),
sort_by=dict(type="str", choices=["TIMECREATED", "DISPLAYNAME"]),
os_family=dict(type="str", choices=["LINUX", "WINDOWS", "ALL"]),
)
)
module = AnsibleModule(argument_spec=module_args)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_facts_helper = ResourceFactsHelper(
module=module,
resource_type="managed_instance",
service_client_class=OsManagementClient,
namespace="os_management",
)
result = []
if resource_facts_helper.is_get():
result = [resource_facts_helper.get()]
elif resource_facts_helper.is_list():
result = resource_facts_helper.list()
else:
resource_facts_helper.fail()
module.exit_json(managed_instances=result)
if __name__ == "__main__":
main()
| 33.894967 | 150 | 0.579277 |
af2dea943f461aa705963837be699637489ef8e6 | 447 | py | Python | samcli/commands/build/exceptions.py | torresxb1/aws-sam-cli | d307f2eb6e1a91a476a5e2ca6070f974b0c913f1 | [
"BSD-2-Clause",
"Apache-2.0"
] | 1 | 2021-07-10T14:19:00.000Z | 2021-07-10T14:19:00.000Z | samcli/commands/build/exceptions.py | torresxb1/aws-sam-cli | d307f2eb6e1a91a476a5e2ca6070f974b0c913f1 | [
"BSD-2-Clause",
"Apache-2.0"
] | 1 | 2021-07-29T20:56:48.000Z | 2021-07-29T20:56:48.000Z | samcli/commands/build/exceptions.py | torresxb1/aws-sam-cli | d307f2eb6e1a91a476a5e2ca6070f974b0c913f1 | [
"BSD-2-Clause",
"Apache-2.0"
] | 1 | 2020-03-18T18:16:23.000Z | 2020-03-18T18:16:23.000Z | """Build exceptions"""
from samcli.commands.exceptions import UserException
class InvalidBuildDirException(UserException):
"""
Value provided to --build-dir is invalid
"""
class MissingBuildMethodException(UserException):
"""
Exception to be thrown when a layer is tried to build without BuildMethod
"""
class InvalidBuildImageException(UserException):
"""
Value provided to --build-image is invalid
"""
| 20.318182 | 77 | 0.715884 |
f6ecd09f2ee84178c834d5a8146cdba3be412eb9 | 1,546 | py | Python | nipype/interfaces/fsl/tests/test_auto_WarpPointsToStd.py | demianw/nipype | 52d64c30d96ecd94f1833156e28dce32c4f05ebe | [
"BSD-3-Clause"
] | null | null | null | nipype/interfaces/fsl/tests/test_auto_WarpPointsToStd.py | demianw/nipype | 52d64c30d96ecd94f1833156e28dce32c4f05ebe | [
"BSD-3-Clause"
] | 2 | 2017-10-05T21:08:38.000Z | 2018-10-09T23:01:23.000Z | nipype/interfaces/fsl/tests/test_auto_WarpPointsToStd.py | effigies/nipype | 18fe222557cf3b9627e06b2a66fba589feaca581 | [
"Apache-2.0"
] | 1 | 2016-10-11T19:18:53.000Z | 2016-10-11T19:18:53.000Z | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from ....testing import assert_equal
from ..utils import WarpPointsToStd
def test_WarpPointsToStd_inputs():
input_map = dict(args=dict(argstr='%s',
),
coord_mm=dict(argstr='-mm',
xor=['coord_vox'],
),
coord_vox=dict(argstr='-vox',
xor=['coord_mm'],
),
environ=dict(nohash=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
img_file=dict(argstr='-img %s',
mandatory=True,
),
in_coords=dict(argstr='%s',
mandatory=True,
position=-1,
),
out_file=dict(name_source='in_coords',
name_template='%s_warped',
output_name='out_file',
),
premat_file=dict(argstr='-premat %s',
),
std_file=dict(argstr='-std %s',
mandatory=True,
),
terminal_output=dict(nohash=True,
),
warp_file=dict(argstr='-warp %s',
xor=['xfm_file'],
),
xfm_file=dict(argstr='-xfm %s',
xor=['warp_file'],
),
)
inputs = WarpPointsToStd.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_WarpPointsToStd_outputs():
output_map = dict(out_file=dict(),
)
outputs = WarpPointsToStd.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(outputs.traits()[key], metakey), value
| 25.344262 | 78 | 0.629366 |
cfe6b4ede30ac50ec010b866070cfcea8bc81f63 | 1,119 | py | Python | apps/users/schema.py | kamranhossain/modern-django-setup-v2 | 2f3900c2e32d9fba034e453acaef91cd6c0ee2b2 | [
"MIT"
] | 1 | 2020-07-02T00:25:04.000Z | 2020-07-02T00:25:04.000Z | apps/users/schema.py | kamranhossain/modern-django-setup-v2 | 2f3900c2e32d9fba034e453acaef91cd6c0ee2b2 | [
"MIT"
] | 5 | 2021-03-19T02:24:47.000Z | 2021-06-10T19:02:53.000Z | apps/users/schema.py | kamranhossain/modern-django-setup-v2 | 2f3900c2e32d9fba034e453acaef91cd6c0ee2b2 | [
"MIT"
] | null | null | null | from django.contrib.auth import get_user_model
import graphene
from graphene_django import DjangoObjectType
class UserType(DjangoObjectType):
class Meta:
model = get_user_model()
class Query(graphene.ObjectType):
user = graphene.Field(UserType, id=graphene.Int(required=True))
me = graphene.Field(UserType)
def resolve_user(self, info, id):
return get_user_model().objects.get(id=id)
def resolve_me(self, info):
user = info.context.user
if user.is_anonymous:
raise Exception("Not logged in!")
return user
class CreactUser(graphene.Mutation):
user = graphene.Field(UserType)
class Arguments:
username = graphene.String(required=True)
password = graphene.String(required=True)
email = graphene.String(required=True)
def mutate(self, info, username, password, email):
user = get_user_model()(username=username, email=email)
user.set_password(password)
user.save()
return CreactUser(user=user)
class Mutation(graphene.ObjectType):
create_user = CreactUser.Field()
| 24.866667 | 67 | 0.688114 |
75509282beddfe3bd4a075ecd0da770467f9c3c1 | 7,248 | py | Python | tests/references/test_utils.py | ColeVoelpel/virtool | 859c8d2516f07343bde47f3bae0247dedd76e6c4 | [
"MIT"
] | 1 | 2019-08-23T00:19:00.000Z | 2019-08-23T00:19:00.000Z | tests/references/test_utils.py | ColeVoelpel/virtool | 859c8d2516f07343bde47f3bae0247dedd76e6c4 | [
"MIT"
] | null | null | null | tests/references/test_utils.py | ColeVoelpel/virtool | 859c8d2516f07343bde47f3bae0247dedd76e6c4 | [
"MIT"
] | null | null | null | import pytest
import virtool.references.utils
@pytest.mark.parametrize("empty", [True, False])
@pytest.mark.parametrize("in_seen", [True, False])
def test_detect_duplicate_abbreviation(in_seen, empty, test_otu):
seen = set()
duplicates = set()
if in_seen:
seen.add("PVF")
if empty:
test_otu["abbreviation"] = ""
virtool.references.utils.detect_duplicate_abbreviation(test_otu, duplicates, seen)
if in_seen or not empty:
assert seen == {"PVF"}
else:
assert seen == set()
if in_seen and not empty:
assert duplicates == {"PVF"}
else:
assert duplicates == set()
@pytest.mark.parametrize("seen", [False, True])
def test_detect_duplicate_ids(seen, test_otu):
duplicate_ids = set()
seen_ids = set()
if seen:
seen_ids.add("6116cba1")
virtool.references.utils.detect_duplicate_ids(test_otu, duplicate_ids, seen_ids)
assert duplicate_ids == ({"6116cba1"} if seen else set())
assert seen_ids == {"6116cba1"}
@pytest.mark.parametrize("has_dups", [True, False])
def test_detect_duplicate_isolate_ids(has_dups, test_otu):
extra_isolate = dict(test_otu["isolates"][0])
if not has_dups:
extra_isolate["id"] = "foobar"
test_otu["isolates"].append(extra_isolate)
duplicate_isolate_ids = dict()
virtool.references.utils.detect_duplicate_isolate_ids(test_otu, duplicate_isolate_ids)
if has_dups:
assert duplicate_isolate_ids == {
test_otu["_id"]: {
"name": "Prunus virus F",
"duplicates": ["cab8b360"]
}
}
else:
assert duplicate_isolate_ids == dict()
@pytest.mark.parametrize("seen", [True, False])
@pytest.mark.parametrize("transform", [None, "lower", "upper"])
def test_detect_duplicate_name(seen, transform, test_otu):
seen_names = set()
if seen:
seen_names.add("prunus virus f")
if transform:
transform_func = getattr(test_otu["name"], transform)
transform_func()
duplicates = set()
virtool.references.utils.detect_duplicate_name(test_otu, duplicates, seen_names)
if seen:
assert duplicates == {test_otu["name"]}
else:
assert duplicates == set()
assert seen_names == {"prunus virus f"}
@pytest.mark.parametrize("intra", [True, False])
@pytest.mark.parametrize("seen", [True, False])
def test_detect_duplicate_sequence_ids(intra, seen, test_merged_otu):
seen_sequence_ids = set()
if intra:
test_merged_otu["isolates"][0]["sequences"].append(
test_merged_otu["isolates"][0]["sequences"][0]
)
if seen:
seen_sequence_ids.add("KX269872")
duplicate_sequence_ids = set()
virtool.references.utils.detect_duplicate_sequence_ids(
test_merged_otu,
duplicate_sequence_ids,
seen_sequence_ids
)
if intra or seen:
assert duplicate_sequence_ids == {"KX269872"}
else:
assert duplicate_sequence_ids == set()
assert seen_sequence_ids == {"KX269872"}
@pytest.mark.parametrize("strict", [True, False])
def test_detect_duplicates(strict, test_merged_otu):
otu_list = [test_merged_otu, test_merged_otu]
otu_list[0]["isolates"].append(otu_list[0]["isolates"][0])
result = virtool.references.utils.detect_duplicates(otu_list, strict=strict)
if strict:
assert result == [
{
'duplicates': ['PVF'],
'id': 'duplicate_abbreviations',
'message': 'Duplicate OTU abbreviations found'
},
{
'duplicates': ['6116cba1'],
'id': 'duplicate_ids',
'message': 'Duplicate OTU ids found'
},
{
'duplicates': {
'6116cba1': {
'duplicates': ['cab8b360'],
'name': 'Prunus virus F'
}
},
'id': 'duplicate_isolate_ids',
'message': 'Duplicate isolate ids found in some OTUs'
},
{
'duplicates': ['Prunus virus F'],
'id': 'duplicate_names',
'message': 'Duplicate OTU names found'
},
{
'duplicates': {'KX269872'},
'id': 'duplicate_sequence_ids',
'message': 'Duplicate sequence ids found'
}
]
else:
assert result == [
{
"duplicates": ["PVF"],
"id": "duplicate_abbreviations",
"message": "Duplicate OTU abbreviations found"
},
{
"duplicates": ["Prunus virus F"],
"id": "duplicate_names",
"message": "Duplicate OTU names found"
}
]
@pytest.mark.parametrize("require_meta", [True, False])
def test_get_import_schema(require_meta):
assert virtool.references.utils.get_import_schema(require_meta) == {
"data_type": {
"type": "string",
"required": require_meta
},
"organism": {
"type": "string",
"required": require_meta
},
"otus": {
"type": "list",
"required": True
}
}
@pytest.mark.parametrize("require_id", [True, False])
def test_get_isolate_schema(require_id):
assert virtool.references.utils.get_isolate_schema(require_id) == {
"id": {
"type": "string",
"required": require_id
},
"source_type": {
"type": "string",
"required": True
},
"source_name": {
"type": "string",
"required": True
},
"default": {
"type": "boolean",
"required": True
},
"sequences": {
"type": "list",
"required": True
}
}
@pytest.mark.parametrize("require_id", [True, False])
def test_get_otu_schema(require_id):
assert virtool.references.utils.get_otu_schema(require_id) == {
"_id": {
"type": "string",
"required": require_id
},
"abbreviation": {
"type": "string"
},
"name": {
"type": "string",
"required": True
},
"isolates": {
"type": "list",
"required": True
}
}
def test_get_owner_user():
assert virtool.references.utils.get_owner_user("fred") == {
"id": "fred",
"build": True,
"modify": True,
"modify_otu": True,
"remove": True
}
@pytest.mark.parametrize("require_id", [True, False])
def test_get_sequence_schema(require_id):
assert virtool.references.utils.get_sequence_schema(require_id) == {
"_id": {
"type": "string",
"required": require_id
},
"accession": {
"type": "string",
"required": True
},
"definition": {
"type": "string",
"required": True
},
"sequence": {
"type": "string",
"required": True
}
}
| 26.549451 | 90 | 0.543184 |
74bf1a0bb2d8bd7a1491b274231081a8be8ab8c0 | 14,632 | py | Python | client/python/modeldb/basic/ModelDbSyncerBase.py | MeRajat/modeldb | 3cf07d8292ab73016e72ac8e24d4b69938407c18 | [
"MIT"
] | null | null | null | client/python/modeldb/basic/ModelDbSyncerBase.py | MeRajat/modeldb | 3cf07d8292ab73016e72ac8e24d4b69938407c18 | [
"MIT"
] | null | null | null | client/python/modeldb/basic/ModelDbSyncerBase.py | MeRajat/modeldb | 3cf07d8292ab73016e72ac8e24d4b69938407c18 | [
"MIT"
] | null | null | null | import sys
import yaml
from thrift import Thrift
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
from ..events import *
from ..thrift.modeldb import ModelDBService
from ..thrift.modeldb import ttypes as modeldb_types
from ..utils.ConfigUtils import ConfigReader
from ..utils import ConfigConstants as constants
from ..utils import MetadataConstants as metadata_constants
FMIN = sys.float_info.min
FMAX = sys.float_info.max
class NewOrExistingProject:
def __init__(self, name, author, description):
self.name = name
self.author = author
self.description = description
def to_thrift(self):
return modeldb_types.Project(-1, self.name, self.author, self.description)
class ExistingProject:
def __init__(self, id):
self.id = id
def to_thrift(self):
return modeldb_types.Project(self.id, "", "", "")
class ExistingExperiment:
def __init__(self, id):
self.id = id
def to_thrift(self):
return modeldb_types.Experiment(self.id, -1, "", "", False)
class DefaultExperiment:
def to_thrift(self):
return modeldb_types.Experiment(-1, -1, "", "", True)
class NewOrExistingExperiment:
def __init__(self, name, description):
self.name = name
self.description = description
def to_thrift(self):
return modeldb_types.Experiment(
-1, -1, self.name, self.description, False)
class NewExperimentRun:
def __init__(self, description="", sha=None):
self.description = description
self.sha = sha
def to_thrift(self):
erun = modeldb_types.ExperimentRun(-1, -1, self.description)
if self.sha:
erun.sha = self.sha
return erun
class ExistingExperimentRun:
def __init__(self, id):
self.id = id
def to_thrift(self):
return modeldb_types.ExperimentRun(self.id, -1, "")
# TODO: fix the way i'm doing tagging
class Dataset:
def __init__(self, filename, metadata={}, tag=None):
self.filename = filename
self.metadata = metadata
self.tag = tag if tag else ""
def __str__(self):
return self.filename + "," + self.tag
class ModelConfig:
def __init__(self, model_type, config, tag=None):
self.model_type = model_type
self.config = config
self.tag = tag if tag else ""
def __str__(self):
return self.model_type + "," + self.tag
class Model:
def __init__(self, model_type, model, path=None, tag=None):
self.model_type = model_type
self.model = model
self.path = path
self.tag = tag if tag else ""
def __str__(self):
return self.model_type + "," + self.path + "," + self.tag
class ModelMetrics:
def __init__(self, metrics, tag=None):
self.metrics = metrics
self.tag = tag if tag else ""
def __str__(self):
return self.metrics
class Syncer(object):
instance = None
@classmethod
def create_syncer(cls, proj_name, user_name, proj_desc=None):
"""
Create a syncer given project information. A default experiment will be
created and a default experiment run will be used
"""
syncer_obj = cls(
NewOrExistingProject(proj_name, user_name, \
proj_desc if proj_desc else ""),
DefaultExperiment(),
NewExperimentRun(""))
return syncer_obj
@classmethod
def create_syncer_from_config(
cls, config_file=".mdb_config", expt_name=None, sha=None):
"""
Create a syncer based on the modeldb configuration file
"""
config_reader = ConfigReader(config_file)
project_info = config_reader.get_project()
experiment_info = config_reader.get_experiment(expt_name)
project = NewOrExistingProject(
project_info[constants.NAME_KEY],
project_info[constants.GIT_USERNAME_KEY],
project_info[constants.DESCRIPTION_KEY])
experiment = DefaultExperiment() if experiment_info == None else \
NewOrExistingExperiment(experiment_info[constants.NAME_KEY],
experiment_info[constants.DESCRIPTION_KEY])
experiment_run = NewExperimentRun("", sha)
syncer_obj = cls(project, experiment, experiment_run)
return syncer_obj
@classmethod
def create_syncer_for_experiment_run(cls, experiment_run_id):
"""
Create a syncer for this experiment run
"""
syncer_obj = cls(None, None, ExistingExperimentRun(experiment_run_id))
return syncer_obj
def __new__(cls, project_config, experiment_config, experiment_run_config): # __new__ always a classmethod
# This will break if cls is some random class.
if not cls.instance:
cls.instance = object.__new__(
cls, project_config, experiment_config, experiment_run_config)
return cls.instance
def __init__(
self, project_config, experiment_config, experiment_run_config):
self.buffer_list = []
self.local_id_to_modeldb_id = {}
self.local_id_to_object = {}
self.local_id_to_tag = {}
self.initialize_thrift_client()
self.setup(project_config, experiment_config, experiment_run_config)
def setup(self, project_config, experiment_config, experiment_run_config):
if isinstance(experiment_run_config, ExistingExperimentRun):
self.experiment_run = experiment_run_config.to_thrift()
self.project = None
self.experiment = None
elif not project_config or not experiment_config:
# TODO: fix this error message
print "Either (project_config and experiment_config) need to be " \
"specified or ExistingExperimentRunConfig needs to be specified"
sys.exit(-1)
else:
self.set_project(project_config)
self.set_experiment(experiment_config)
self.set_experiment_run(experiment_run_config)
def __str__(self):
return "BaseSyncer"
def set_project(self, project_config):
self.project = project_config.to_thrift()
project_event = ProjectEvent(self.project)
self.buffer_list.append(project_event)
self.sync()
def set_experiment(self, experiment_config):
self.experiment = experiment_config.to_thrift()
self.experiment.projectId = self.project.id
experiment_event = ExperimentEvent(self.experiment)
self.buffer_list.append(experiment_event)
self.sync()
def set_experiment_run(self, experiment_run_config):
self.experiment_run = experiment_run_config.to_thrift()
self.experiment_run.experimentId = self.experiment.id
experiment_run_event = ExperimentRunEvent(self.experiment_run)
self.buffer_list.append(experiment_run_event)
self.sync()
def get_local_id(self, obj):
return id(obj)
def store_object(self, obj, modeldb_id):
"""
Stores mapping between objects and their IDs.
"""
local_id = self.get_local_id(obj)
self.local_id_to_modeldb_id[local_id] = modeldb_id
if local_id not in self.local_id_to_object:
self.local_id_to_object[local_id] = obj
def get_modeldb_id_for_object(self, obj):
local_id = self.get_local_id(obj)
if local_id in self.local_id_to_modeldb_id:
return self.local_id_to_modeldb_id[local_id]
else:
return -1
def get_tag_for_object(self, obj):
local_id = self.get_local_id(obj)
if local_id in self.local_id_to_tag:
return self.local_id_to_tag[local_id]
else:
return ""
def add_tag(self, obj, tag):
"""
Stores mapping between objects and their tags.
Tags are short, user-generated names.
"""
local_id = self.get_local_id(obj)
self.local_id_to_tag[local_id] = tag
if local_id not in self.local_id_to_object:
self.local_id_to_object[local_id] = obj
def add_to_buffer(self, event):
"""
As events are generated, they are added to this buffer.
"""
self.buffer_list.append(event)
def sync(self):
"""
When this function is called, all events in the buffer are stored on server.
"""
for b in self.buffer_list:
b.sync(self)
self.clear_buffer()
def clear_buffer(self):
'''
Remove all events from the buffer
'''
self.buffer_list = []
def initialize_thrift_client(self, host="localhost", port=6543):
# Make socket
self.transport = TSocket.TSocket(host, port)
# Buffering is critical. Raw sockets are very slow
self.transport = TTransport.TFramedTransport(self.transport)
# Wrap in a protocol
protocol = TBinaryProtocol.TBinaryProtocol(self.transport)
# Create a client to use the protocol encoder
self.client = ModelDBService.Client(protocol)
self.transport.open()
def closeThriftClient(self):
self.transport.close()
self.client = None
'''
Functions that convert ModelDBSyncerLight classes into ModelDB
thrift classes
'''
def convert_model_to_thrift(self, model):
model_id = self.get_modeldb_id_for_object(model)
if model_id != -1:
return modeldb_types.Transformer(model_id, "", "", "")
return modeldb_types.Transformer(-1,
model.model_type, model.tag, model.path)
def convert_spec_to_thrift(self, spec):
spec_id = self.get_modeldb_id_for_object(spec)
if spec_id != -1:
return modeldb_types.TransformerSpec(spec_id, "", [], "")
hyperparameters = []
for key, value in spec.config.items():
hyperparameter = modeldb_types.HyperParameter(key, \
str(value), type(value).__name__, FMIN, FMAX)
hyperparameters.append(hyperparameter)
transformer_spec = modeldb_types.TransformerSpec(-1, spec.model_type, \
hyperparameters, spec.tag)
return transformer_spec
def set_columns(self, df):
return []
def convert_df_to_thrift(self, dataset):
dataset_id = self.get_modeldb_id_for_object(dataset)
if dataset_id != -1:
return modeldb_types.DataFrame(dataset_id, [], -1, "", "", [])
metadata = []
for key, value in dataset.metadata.items():
kv = modeldb_types.MetadataKV(key, str(value), str(type(value)))
metadata.append(kv)
return modeldb_types.DataFrame(-1, [], -1, dataset.tag, \
dataset.filename, metadata)
'''
End. Functions that convert ModelDBSyncerLight classes into ModelDB
thrift classes
'''
'''
ModelDBSyncerLight API
'''
def sync_datasets(self, datasets):
'''
Registers the datasets used in this experiment run.
The input is expected to be either a single dataset or a dictionary
with keys which are local tags for the dataset and values are the
dataset objects.
'''
# TODO: need to capture the metadata
self.datasets = {}
if type(datasets) != dict:
self.datasets["default"] = dataset
else:
for key, dataset in datasets.items():
if not dataset.tag:
dataset.tag = key
self.datasets[key] = dataset
def sync_model(self, data_tag, config, model):
'''
Syncs the model as having been generated from a given dataset using
the given config
'''
dataset = self.get_dataset_for_tag(data_tag)
fit_event = FitEvent(model, config, dataset)
Syncer.instance.add_to_buffer(fit_event)
def sync_metrics(self, data_tag, model, metrics):
'''
Syncs the metrics for the given model on the given data
'''
dataset = self.get_dataset_for_tag(data_tag)
for metric, value in metrics.metrics.items():
metric_event = MetricEvent(dataset, model, "label_col", \
"prediction_col", metric, value)
Syncer.instance.add_to_buffer(metric_event)
def get_dataset_for_tag(self, data_tag):
if data_tag not in self.datasets:
if "default" not in self.datasets:
self.datasets["default"] = Dataset("", {})
print data_tag, \
' dataset not defined. default dataset will be used.'
data_tag = "default"
return self.datasets[data_tag]
def dataset_from_dict(self, dataset_dict):
filename = dataset_dict[metadata_constants.DATASET_FILENAME_KEY]
metadata = dataset_dict.get(metadata_constants.DATASET_METADATA_KEY, {})
tag = dataset_dict.get(metadata_constants.DATASET_TAG_KEY, 'default')
return Dataset(filename, metadata, tag)
def sync_all(self, metadata_path):
with open(metadata_path) as data_file:
metadata = yaml.load(data_file)
# sync datasets
datasets = {}
for dataset_dict in metadata[metadata_constants.DATASETS_KEY]:
dataset = self.dataset_from_dict(dataset_dict)
datasets[dataset.tag] = dataset
self.sync_datasets(datasets)
# get model details
model_data = metadata[metadata_constants.MODEL_KEY]
model_type = model_data[metadata_constants.TYPE_KEY]
model_name = model_data[metadata_constants.NAME_KEY]
model_path = model_data.get(metadata_constants.PATH_KEY, None)
model_tag = model_data.get(metadata_constants.TAG_KEY, None)
model = Model(model_type, model_name, model_path, model_tag)
model_dataset = self.get_dataset_for_tag(model_tag)
config = model_data[metadata_constants.CONFIG_KEY]
fit_event = FitEvent(model, ModelConfig(model_type, config, model_tag),
model_dataset, model_data)
Syncer.instance.add_to_buffer(fit_event)
# sync metrics
metrics_data = model_data.get(metadata_constants.METRICS_KEY, [])
for metric in metrics_data:
metric_type = metric[metadata_constants.METRIC_TYPE_KEY]
metric_value = metric[metadata_constants.METRIC_VALUE_KEY]
metric_event = MetricEvent(model_dataset, model, "label_col", \
"prediction_col", metric_type, metric_value)
Syncer.instance.add_to_buffer(metric_event)
| 35.257831 | 110 | 0.649399 |
47e2bfbb012a1f89fee0c7cda30aff8d939e3a79 | 189 | py | Python | main/apps.py | Godspower-Eze/savebetter | 3b9dc17d9b6395caaeed91fa79976e9c0061f260 | [
"MIT"
] | null | null | null | main/apps.py | Godspower-Eze/savebetter | 3b9dc17d9b6395caaeed91fa79976e9c0061f260 | [
"MIT"
] | null | null | null | main/apps.py | Godspower-Eze/savebetter | 3b9dc17d9b6395caaeed91fa79976e9c0061f260 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class MainConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'main'
def ready(self):
import main.signals | 21 | 56 | 0.708995 |
eab373d81439ee55c077ba094f832753cedbfa66 | 9,099 | py | Python | applications/neural_search/ranking/ernie_matching/deploy/python/predict.py | tanhanzhuo/PaddleNLP | d0d20678f2bec820570b4f09ca49cd402d20c3b6 | [
"Apache-2.0"
] | 7,091 | 2021-02-05T13:56:25.000Z | 2022-03-31T11:42:50.000Z | applications/neural_search/ranking/ernie_matching/deploy/python/predict.py | tanhanzhuo/PaddleNLP | d0d20678f2bec820570b4f09ca49cd402d20c3b6 | [
"Apache-2.0"
] | 844 | 2021-02-10T01:09:29.000Z | 2022-03-31T12:12:58.000Z | applications/neural_search/ranking/ernie_matching/deploy/python/predict.py | tanhanzhuo/PaddleNLP | d0d20678f2bec820570b4f09ca49cd402d20c3b6 | [
"Apache-2.0"
] | 1,035 | 2021-02-05T14:26:48.000Z | 2022-03-31T11:42:57.000Z | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import numpy as np
import pandas as pd
import paddle
import paddlenlp as ppnlp
from scipy.special import softmax
from scipy.special import expit
from paddle import inference
from paddlenlp.data import Stack, Tuple, Pad
from paddlenlp.datasets import load_dataset
from paddlenlp.utils.log import logger
import paddle.nn.functional as F
import sys
sys.path.append('.')
# yapf: disable
parser = argparse.ArgumentParser()
parser.add_argument("--model_dir", type=str, required=True,
help="The directory to static model.")
parser.add_argument("--max_seq_length", default=128, type=int,
help="The maximum total input sequence length after tokenization. Sequences "
"longer than this will be truncated, sequences shorter will be padded.")
parser.add_argument("--batch_size", default=32, type=int,
help="Batch size per GPU/CPU for training.")
parser.add_argument('--device', choices=['cpu', 'gpu', 'xpu'], default="gpu",
help="Select which device to train model, defaults to gpu.")
parser.add_argument('--use_tensorrt', default=False, type=eval, choices=[True, False],
help='Enable to use tensorrt to speed up.')
parser.add_argument("--precision", default="fp32", type=str, choices=["fp32", "fp16", "int8"],
help='The tensorrt precision.')
parser.add_argument('--cpu_threads', default=10, type=int,
help='Number of threads to predict when using cpu.')
parser.add_argument('--enable_mkldnn', default=False, type=eval, choices=[True, False],
help='Enable to use mkldnn to speed up when using cpu.')
parser.add_argument("--benchmark", type=eval, default=False,
help="To log some information about environment and running.")
parser.add_argument("--save_log_path", type=str, default="./log_output/",
help="The file path to save log.")
args = parser.parse_args()
# yapf: enable
def read_text_pair(data_path):
"""Reads data."""
with open(data_path, 'r', encoding='utf-8') as f:
for line in f:
data = line.rstrip().split("\t")
if len(data) != 3:
continue
yield {'query': data[0], 'title': data[1]}
def convert_example(example, tokenizer, max_seq_length=512, is_test=False):
query, title = example["query"], example["title"]
encoded_inputs = tokenizer(
text=query, text_pair=title, max_seq_len=max_seq_length)
input_ids = encoded_inputs["input_ids"]
token_type_ids = encoded_inputs["token_type_ids"]
if not is_test:
label = np.array([example["label"]], dtype="int64")
return input_ids, token_type_ids, label
else:
return input_ids, token_type_ids
class Predictor(object):
def __init__(self,
model_dir,
device="gpu",
max_seq_length=128,
batch_size=32,
use_tensorrt=False,
precision="fp32",
cpu_threads=10,
enable_mkldnn=False):
self.max_seq_length = max_seq_length
self.batch_size = batch_size
model_file = model_dir + "/inference.get_pooled_embedding.pdmodel"
params_file = model_dir + "/inference.get_pooled_embedding.pdiparams"
if not os.path.exists(model_file):
raise ValueError("not find model file path {}".format(model_file))
if not os.path.exists(params_file):
raise ValueError("not find params file path {}".format(params_file))
config = paddle.inference.Config(model_file, params_file)
if device == "gpu":
# set GPU configs accordingly
# such as intialize the gpu memory, enable tensorrt
config.enable_use_gpu(100, 0)
precision_map = {
"fp16": inference.PrecisionType.Half,
"fp32": inference.PrecisionType.Float32,
"int8": inference.PrecisionType.Int8
}
precision_mode = precision_map[precision]
if args.use_tensorrt:
config.enable_tensorrt_engine(
max_batch_size=batch_size,
min_subgraph_size=30,
precision_mode=precision_mode)
elif device == "cpu":
# set CPU configs accordingly,
# such as enable_mkldnn, set_cpu_math_library_num_threads
config.disable_gpu()
if args.enable_mkldnn:
# cache 10 different shapes for mkldnn to avoid memory leak
config.set_mkldnn_cache_capacity(10)
config.enable_mkldnn()
config.set_cpu_math_library_num_threads(args.cpu_threads)
elif device == "xpu":
# set XPU configs accordingly
config.enable_xpu(100)
config.switch_use_feed_fetch_ops(False)
self.predictor = paddle.inference.create_predictor(config)
self.input_handles = [
self.predictor.get_input_handle(name)
for name in self.predictor.get_input_names()
]
self.output_handle = self.predictor.get_output_handle(
self.predictor.get_output_names()[0])
if args.benchmark:
import auto_log
pid = os.getpid()
self.autolog = auto_log.AutoLogger(
model_name="ernie-tiny",
model_precision=precision,
batch_size=self.batch_size,
data_shape="dynamic",
save_path=args.save_log_path,
inference_config=config,
pids=pid,
process_name=None,
gpu_ids=0,
time_keys=[
'preprocess_time', 'inference_time', 'postprocess_time'
],
warmup=0,
logger=logger)
def predict(self, data, tokenizer):
"""
Predicts the data labels.
Args:
data (obj:`List(str)`): The batch data whose each element is a raw text.
tokenizer(obj:`PretrainedTokenizer`): This tokenizer inherits from :class:`~paddlenlp.transformers.PretrainedTokenizer`
which contains most of the methods. Users should refer to the superclass for more information regarding methods.
label_map(obj:`dict`): The label id (key) to label str (value) map.
Returns:
results(obj:`dict`): All the predictions labels.
"""
if args.benchmark:
self.autolog.times.start()
examples = []
for text in data:
input_ids, segment_ids = convert_example(
text,
tokenizer,
max_seq_length=self.max_seq_length,
is_test=True)
examples.append((input_ids, segment_ids))
batchify_fn = lambda samples, fn=Tuple(
Pad(axis=0, pad_val=tokenizer.pad_token_id), # input
Pad(axis=0, pad_val=tokenizer.pad_token_id), # segment
): fn(samples)
if args.benchmark:
self.autolog.times.stamp()
input_ids, segment_ids = batchify_fn(examples)
self.input_handles[0].copy_from_cpu(input_ids)
self.input_handles[1].copy_from_cpu(segment_ids)
self.predictor.run()
sim_score = self.output_handle.copy_to_cpu()
if args.benchmark:
self.autolog.times.stamp()
sim_score = expit(sim_score)
if args.benchmark:
self.autolog.times.end(stamp=True)
return sim_score
if __name__ == "__main__":
# Define predictor to do prediction.
predictor = Predictor(args.model_dir, args.device, args.max_seq_length,
args.batch_size, args.use_tensorrt, args.precision,
args.cpu_threads, args.enable_mkldnn)
tokenizer = ppnlp.transformers.ErnieGramTokenizer.from_pretrained(
'ernie-gram-zh')
# test_ds = load_dataset("lcqmc", splits=["test"])
input_file = 'sort/test_pairwise.csv'
test_ds = load_dataset(read_text_pair, data_path=input_file, lazy=False)
data = [{'query': d['query'], 'title': d['title']} for d in test_ds]
batches = [
data[idx:idx + args.batch_size]
for idx in range(0, len(data), args.batch_size)
]
results = []
for batch_data in batches:
results.extend(predictor.predict(batch_data, tokenizer))
for idx, text in enumerate(data):
print('Data: {} \t prob: {}'.format(text, results[idx]))
if args.benchmark:
predictor.autolog.report()
| 37.138776 | 132 | 0.634465 |
3bf4afe6b341d2dbf4d1b4ed9784fa825863a885 | 8,312 | py | Python | plaso/lib/binary.py | Defense-Cyber-Crime-Center/plaso | 4f3a85fbea10637c1cdbf0cde9fc539fdcea9c47 | [
"Apache-2.0"
] | 2 | 2016-02-18T12:46:29.000Z | 2022-03-13T03:04:59.000Z | plaso/lib/binary.py | Defense-Cyber-Crime-Center/plaso | 4f3a85fbea10637c1cdbf0cde9fc539fdcea9c47 | [
"Apache-2.0"
] | null | null | null | plaso/lib/binary.py | Defense-Cyber-Crime-Center/plaso | 4f3a85fbea10637c1cdbf0cde9fc539fdcea9c47 | [
"Apache-2.0"
] | 6 | 2016-12-18T08:05:36.000Z | 2021-04-06T14:19:11.000Z | # -*- coding: utf-8 -*-
"""This file contains a helper library to read binary files."""
import binascii
import logging
import os
def ByteArrayCopyToString(byte_array, codepage=u'utf-8'):
"""Copies a UTF-8 encoded byte array into a Unicode string.
Args:
byte_array: A byte array containing an UTF-8 encoded string.
codepage: The codepage of the byte stream. The default is utf-8.
Returns:
A Unicode string.
"""
byte_stream = b''.join(map(chr, byte_array))
return ByteStreamCopyToString(byte_stream, codepage=codepage)
def ByteStreamCopyToString(byte_stream, codepage=u'utf-8'):
"""Copies a UTF-8 encoded byte stream into a Unicode string.
Args:
byte_stream: A byte stream containing an UTF-8 encoded string.
codepage: The codepage of the byte stream. The default is utf-8.
Returns:
A Unicode string.
"""
try:
string = byte_stream.decode(codepage)
except UnicodeDecodeError:
logging.warning(
u'Unable to decode {0:s} formatted byte stream.'.format(codepage))
string = byte_stream.decode(codepage, errors='ignore')
string, _, _ = string.partition(u'\x00')
return string
def ByteStreamCopyToGuid(byte_stream, byte_order=u'little-endian'):
"""Reads a GUID from the byte stream.
Args:
byte_stream: The byte stream that contains the UTF-16 formatted stream.
byte_order: The byte order, either big- or little-endian. The default is
little-endian.
Returns:
String containing the GUID.
"""
if len(byte_stream) >= 16:
if byte_order == u'big-endian':
return (
u'{{{0:02x}{1:02x}{2:02x}{3:02x}-{4:02x}{5:02x}-'
u'{6:02x}{7:02x}-{8:02x}{9:02x}-'
u'{10:02x}{11:02x}{12:02x}{13:02x}{14:02x}{15:02x}}}').format(
*byte_stream[:16])
elif byte_order == u'little-endian':
return (
u'{{{3:02x}{2:02x}{1:02x}{0:02x}-{5:02x}{4:02x}-'
u'{7:02x}{6:02x}-{8:02x}{9:02x}-'
u'{10:02x}{11:02x}{12:02x}{13:02x}{14:02x}{15:02x}}}').format(
*byte_stream[:16])
return u''
def ByteStreamCopyToUtf16Stream(byte_stream, byte_stream_size=None):
"""Reads an UTF-16 formatted stream from a byte stream.
The UTF-16 formatted stream should be terminated by an end-of-string
character (\x00\x00). Otherwise the function reads up to the byte stream size.
Args:
byte_stream: The byte stream that contains the UTF-16 formatted stream.
byte_stream_size: Optional byte stream size or None if the entire
byte stream should be read. The default is None.
Returns:
String containing the UTF-16 formatted stream.
"""
byte_stream_index = 0
if not byte_stream_size:
byte_stream_size = len(byte_stream)
while byte_stream_index + 1 < byte_stream_size:
if (byte_stream[byte_stream_index] == b'\x00' and
byte_stream[byte_stream_index + 1] == b'\x00'):
break
byte_stream_index += 2
return byte_stream[0:byte_stream_index]
def ReadUtf16Stream(file_object, offset=None, byte_size=0):
"""Reads an UTF-16 formatted stream from a file-like object.
Reads an UTF-16 formatted stream that's terminated by
an end-of-string character (\x00\x00) or up to the byte size.
Args:
file_object: A file-like object to read the data from.
offset: An offset into the file object data, if -1 or not set
the current location into the file object data is used.
byte_size: Maximum number of bytes to read or 0 if the function
should keep reading up to the end of file.
Returns:
An Unicode string.
"""
if offset is not None:
file_object.seek(offset, os.SEEK_SET)
char_buffer = []
stream_index = 0
char_raw = file_object.read(2)
while char_raw:
if byte_size and stream_index >= byte_size:
break
if b'\x00\x00' in char_raw:
break
char_buffer.append(char_raw)
stream_index += 2
char_raw = file_object.read(2)
return ReadUtf16(b''.join(char_buffer))
def Ut16StreamCopyToString(byte_stream, byte_stream_size=None):
"""Copies an UTF-16 formatted byte stream to a string.
The UTF-16 formatted byte stream should be terminated by an end-of-string
character (\x00\x00). Otherwise the function reads up to the byte stream size.
Args:
byte_stream: The UTF-16 formatted byte stream.
byte_stream_size: The byte stream size or None if the entire byte stream
should be used.
Returns:
An Unicode string.
"""
utf16_stream = ByteStreamCopyToUtf16Stream(
byte_stream, byte_stream_size=byte_stream_size)
try:
return utf16_stream.decode(u'utf-16-le')
except (UnicodeDecodeError, UnicodeEncodeError) as exception:
logging.error(u'Unable to decode string: {0:s} with error: {1:s}'.format(
HexifyBuffer(utf16_stream), exception))
return utf16_stream.decode(u'utf-16-le', errors=u'ignore')
def ArrayOfUt16StreamCopyToString(byte_stream, byte_stream_size=None):
"""Copies an array of UTF-16 formatted byte streams to an array of strings.
The UTF-16 formatted byte stream should be terminated by an end-of-string
character (\x00\x00). Otherwise the function reads up to the byte stream size.
Args:
byte_stream: The UTF-16 formatted byte stream.
byte_stream_size: The byte stream size or None if the entire byte stream
should be used.
Returns:
An array of Unicode strings.
"""
array_of_strings = []
utf16_stream_start = 0
byte_stream_index = 0
if not byte_stream_size:
byte_stream_size = len(byte_stream)
while byte_stream_index + 1 < byte_stream_size:
if (byte_stream[byte_stream_index] == b'\x00' and
byte_stream[byte_stream_index + 1] == b'\x00'):
if byte_stream_index - utf16_stream_start <= 2:
break
array_of_strings.append(
byte_stream[utf16_stream_start:byte_stream_index].decode(
u'utf-16-le'))
utf16_stream_start = byte_stream_index + 2
byte_stream_index += 2
return array_of_strings
def ArrayOfUt16StreamCopyToStringTable(byte_stream, byte_stream_size=None):
"""Copies an array of UTF-16 formatted byte streams to a string table.
The string table is a dict of strings with the byte offset as their key.
The UTF-16 formatted byte stream should be terminated by an end-of-string
character (\x00\x00). Otherwise the function reads up to the byte stream size.
Args:
byte_stream: The UTF-16 formatted byte stream.
byte_stream_size: The byte stream size or None if the entire byte stream
should be used.
Returns:
A dict of Unicode strings with the byte offset as their key.
"""
string_table = {}
utf16_stream_start = 0
byte_stream_index = 0
if not byte_stream_size:
byte_stream_size = len(byte_stream)
while byte_stream_index + 1 < byte_stream_size:
if (byte_stream[byte_stream_index] == b'\x00' and
byte_stream[byte_stream_index + 1] == b'\x00'):
if byte_stream_index - utf16_stream_start <= 2:
break
string = byte_stream[utf16_stream_start:byte_stream_index].decode(
u'utf-16-le')
string_table[utf16_stream_start] = string
utf16_stream_start = byte_stream_index + 2
byte_stream_index += 2
return string_table
def ReadUtf16(string_buffer):
"""Returns a decoded UTF-16 string from a string buffer."""
if isinstance(string_buffer, (list, tuple)):
use_buffer = u''.join(string_buffer)
else:
use_buffer = string_buffer
if not isinstance(use_buffer, basestring):
return u''
try:
return use_buffer.decode(u'utf-16').replace(u'\x00', u'')
except SyntaxError as exception:
logging.error(u'Unable to decode string: {0:s} with error: {1:s}.'.format(
HexifyBuffer(string_buffer), exception))
except (UnicodeDecodeError, UnicodeEncodeError) as exception:
logging.error(u'Unable to decode string: {0:s} with error: {1:s}'.format(
HexifyBuffer(string_buffer), exception))
return use_buffer.decode(u'utf-16', errors=u'ignore').replace(u'\x00', u'')
def HexifyBuffer(string_buffer):
"""Return a string with the hex representation of a string buffer."""
chars = []
for char in string_buffer:
chars.append(binascii.hexlify(char))
return u'\\x{0:s}'.format(u'\\x'.join(chars))
| 31.366038 | 80 | 0.691651 |
c7fdc79569bb7acb6abc56e956d4e7cfa30e9afc | 1,052 | py | Python | pyfacebook/models/__init__.py | renan-prometheus-arch/python-facebook | 2885a6ef9199aea534ca567c1442b876d64d899e | [
"Apache-2.0"
] | null | null | null | pyfacebook/models/__init__.py | renan-prometheus-arch/python-facebook | 2885a6ef9199aea534ca567c1442b876d64d899e | [
"Apache-2.0"
] | null | null | null | pyfacebook/models/__init__.py | renan-prometheus-arch/python-facebook | 2885a6ef9199aea534ca567c1442b876d64d899e | [
"Apache-2.0"
] | 1 | 2021-06-02T07:15:35.000Z | 2021-06-02T07:15:35.000Z | from .access_token import AccessToken, AuthAccessToken
from .base import BaseModel
from .comment import Comment, CommentSummary
from .page import Page, PageCategory
from .picture import ProfilePictureSource, CoverPhoto
from .post import Post
from .video import Video, VideoCaption
from .photo import Photo
from .album import Album
from .ig_basic_models import IgBasicUser, IgBasicMedia, IgBasicMediaChildren
from .ig_pro_models import (
IgProUser, IgProMedia, IgProComment,
IgProReply, IgProHashtag, IgProInsight, IgProStory
)
__all__ = [
"AccessToken",
"AuthAccessToken",
"BaseModel",
"CoverPhoto",
"Comment",
"CommentSummary",
"Page",
"PageCategory",
"Post",
"ProfilePictureSource",
"Video",
"VideoCaption",
"Photo",
"Album",
# Instagram Basic display
"IgBasicUser",
"IgBasicMedia",
"IgBasicMediaChildren",
# Instagram Professional
"IgProUser",
"IgProMedia",
"IgProComment",
"IgProReply",
"IgProHashtag",
"IgProInsight",
"IgProStory",
]
| 23.909091 | 76 | 0.705323 |
66bf66eaaa9fd8cfc76861eca46ae14149157c31 | 8,558 | py | Python | onadata/apps/logger/south_migrations/0024_auto__chg_field_attachment_mimetype.py | BuildAMovement/whistler-kobocat | 7f61dd0761bb0aa5b27c909bcff8c29453d3311d | [
"BSD-2-Clause"
] | 38 | 2017-02-28T05:39:40.000Z | 2019-01-16T04:39:04.000Z | onadata/apps/logger/south_migrations/0024_auto__chg_field_attachment_mimetype.py | BuildAMovement/whistler-kobocat | 7f61dd0761bb0aa5b27c909bcff8c29453d3311d | [
"BSD-2-Clause"
] | 48 | 2019-03-18T09:26:31.000Z | 2019-05-27T08:12:03.000Z | onadata/apps/logger/south_migrations/0024_auto__chg_field_attachment_mimetype.py | BuildAMovement/whistler-kobocat | 7f61dd0761bb0aa5b27c909bcff8c29453d3311d | [
"BSD-2-Clause"
] | 5 | 2017-02-22T12:25:19.000Z | 2019-01-15T11:16:40.000Z | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Attachment.mimetype'
db.alter_column('odk_logger_attachment', 'mimetype', self.gf('django.db.models.fields.CharField')(max_length=50))
def backwards(self, orm):
# Changing field 'Attachment.mimetype'
db.alter_column('odk_logger_attachment', 'mimetype', self.gf('django.db.models.fields.CharField')(max_length=20))
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'odk_logger.attachment': {
'Meta': {'object_name': 'Attachment'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'attachments'", 'to': "orm['odk_logger.Instance']"}),
'media_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'mimetype': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50', 'blank': 'True'})
},
'odk_logger.instance': {
'Meta': {'object_name': 'Instance'},
'date': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'start_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "u'submitted_via_web'", 'max_length': '20'}),
'survey_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['odk_logger.SurveyType']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'surveys'", 'null': 'True', 'to': "orm['auth.User']"}),
'uuid': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '249'}),
'xform': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'surveys'", 'null': 'True', 'to': "orm['odk_logger.XForm']"}),
'xml': ('django.db.models.fields.TextField', [], {})
},
'odk_logger.instancehistory': {
'Meta': {'object_name': 'InstanceHistory'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '249'}),
'xform_instance': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'submission_history'", 'to': "orm['odk_logger.Instance']"}),
'xml': ('django.db.models.fields.TextField', [], {})
},
'odk_logger.surveytype': {
'Meta': {'object_name': 'SurveyType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'})
},
'odk_logger.xform': {
'Meta': {'ordering': "('id_string',)", 'unique_together': "(('user', 'id_string'),)", 'object_name': 'XForm'},
'bamboo_dataset': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '60'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': "u''", 'null': 'True'}),
'downloadable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'has_start_time': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'id_string': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'is_crowd_form': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'json': ('django.db.models.fields.TextField', [], {'default': "u''"}),
'shared': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'shared_data': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'xforms'", 'null': 'True', 'to': "orm['auth.User']"}),
'uuid': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '32'}),
'xls': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True'}),
'xml': ('django.db.models.fields.TextField', [], {})
}
}
complete_apps = ['logger']
| 73.775862 | 182 | 0.565085 |
bf67ca23e119883f962890d75ddbfd42a8d9787d | 4,367 | py | Python | tests/components/panel_custom/test_init.py | tbarbette/core | 8e58c3aa7bc8d2c2b09b6bd329daa1c092d52d3c | [
"Apache-2.0"
] | 30,023 | 2016-04-13T10:17:53.000Z | 2020-03-02T12:56:31.000Z | tests/components/panel_custom/test_init.py | jagadeeshvenkatesh/core | 1bd982668449815fee2105478569f8e4b5670add | [
"Apache-2.0"
] | 31,101 | 2020-03-02T13:00:16.000Z | 2022-03-31T23:57:36.000Z | tests/components/panel_custom/test_init.py | jagadeeshvenkatesh/core | 1bd982668449815fee2105478569f8e4b5670add | [
"Apache-2.0"
] | 11,956 | 2016-04-13T18:42:31.000Z | 2020-03-02T09:32:12.000Z | """The tests for the panel_custom component."""
from unittest.mock import Mock, patch
from homeassistant import setup
from homeassistant.components import frontend
async def test_webcomponent_custom_path_not_found(hass):
"""Test if a web component is found in config panels dir."""
filename = "mock.file"
config = {
"panel_custom": {
"name": "todomvc",
"webcomponent_path": filename,
"sidebar_title": "Sidebar Title",
"sidebar_icon": "mdi:iconicon",
"url_path": "nice_url",
"config": 5,
}
}
with patch("os.path.isfile", Mock(return_value=False)):
result = await setup.async_setup_component(hass, "panel_custom", config)
assert not result
panels = hass.data.get(frontend.DATA_PANELS, [])
assert panels
assert "nice_url" not in panels
async def test_js_webcomponent(hass):
"""Test if a web component is found in config panels dir."""
config = {
"panel_custom": {
"name": "todo-mvc",
"js_url": "/local/bla.js",
"sidebar_title": "Sidebar Title",
"sidebar_icon": "mdi:iconicon",
"url_path": "nice_url",
"config": {"hello": "world"},
"embed_iframe": True,
"trust_external_script": True,
}
}
result = await setup.async_setup_component(hass, "panel_custom", config)
assert result
panels = hass.data.get(frontend.DATA_PANELS, [])
assert panels
assert "nice_url" in panels
panel = panels["nice_url"]
assert panel.config == {
"hello": "world",
"_panel_custom": {
"js_url": "/local/bla.js",
"name": "todo-mvc",
"embed_iframe": True,
"trust_external": True,
},
}
assert panel.frontend_url_path == "nice_url"
assert panel.sidebar_icon == "mdi:iconicon"
assert panel.sidebar_title == "Sidebar Title"
async def test_module_webcomponent(hass):
"""Test if a js module is found in config panels dir."""
config = {
"panel_custom": {
"name": "todo-mvc",
"module_url": "/local/bla.js",
"sidebar_title": "Sidebar Title",
"sidebar_icon": "mdi:iconicon",
"url_path": "nice_url",
"config": {"hello": "world"},
"embed_iframe": True,
"trust_external_script": True,
"require_admin": True,
}
}
result = await setup.async_setup_component(hass, "panel_custom", config)
assert result
panels = hass.data.get(frontend.DATA_PANELS, [])
assert panels
assert "nice_url" in panels
panel = panels["nice_url"]
assert panel.require_admin
assert panel.config == {
"hello": "world",
"_panel_custom": {
"module_url": "/local/bla.js",
"name": "todo-mvc",
"embed_iframe": True,
"trust_external": True,
},
}
assert panel.frontend_url_path == "nice_url"
assert panel.sidebar_icon == "mdi:iconicon"
assert panel.sidebar_title == "Sidebar Title"
async def test_latest_and_es5_build(hass):
"""Test specifying an es5 and latest build."""
config = {
"panel_custom": {
"name": "todo-mvc",
"js_url": "/local/es5.js",
"module_url": "/local/latest.js",
"url_path": "nice_url",
}
}
assert await setup.async_setup_component(hass, "panel_custom", config)
panels = hass.data.get(frontend.DATA_PANELS, {})
assert panels
assert "nice_url" in panels
panel = panels["nice_url"]
assert panel.config == {
"_panel_custom": {
"name": "todo-mvc",
"js_url": "/local/es5.js",
"module_url": "/local/latest.js",
"embed_iframe": False,
"trust_external": False,
},
}
assert panel.frontend_url_path == "nice_url"
async def test_url_path_conflict(hass):
"""Test config with overlapping url path."""
assert await setup.async_setup_component(
hass,
"panel_custom",
{
"panel_custom": [
{"name": "todo-mvc", "js_url": "/local/bla.js"},
{"name": "todo-mvc", "js_url": "/local/bla.js"},
]
},
)
| 27.815287 | 80 | 0.564003 |
00f0186353b156fc0d2a04d30a0adfff96adea16 | 1,073 | py | Python | grabcut1.py | qwerlarlgus/YOLO_Project1 | 27e7b325439e59c8cf0ee9d6cdfd802a4de6c7d4 | [
"MIT"
] | null | null | null | grabcut1.py | qwerlarlgus/YOLO_Project1 | 27e7b325439e59c8cf0ee9d6cdfd802a4de6c7d4 | [
"MIT"
] | null | null | null | grabcut1.py | qwerlarlgus/YOLO_Project1 | 27e7b325439e59c8cf0ee9d6cdfd802a4de6c7d4 | [
"MIT"
] | 1 | 2021-02-02T07:34:20.000Z | 2021-02-02T07:34:20.000Z | import cv2
cv2.__version__ # 4.1.2
import cv2
import numpy as np
import matplotlib.pyplot as plt
image = cv2.imread('vlcsnap-2021-02-04-10h05m23s567.png', cv2.IMREAD_GRAYSCALE)
#이미지 로드 후 RGB로 변환
image_bgr = cv2.imread('vlcsnap-2021-02-04-10h05m23s567.png')
image_rgb = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2RGB)
# 사각형 좌표: 시작점의 x,y ,넢이, 너비
rectangle = (253, 92, 200, 253)
# 초기 마스크 생성
mask = np.zeros(image_rgb.shape[:2], np.uint8)
# grabCut에 사용할 임시 배열 생성
bgdModel = np.zeros((1, 65), np.float64)
fgdModel = np.zeros((1, 65), np.float64)
# grabCut 실행
cv2.grabCut(image_rgb, # 원본 이미지
mask, # 마스크
rectangle, # 사각형
bgdModel, # 배경을 위한 임시 배열
fgdModel, # 전경을 위한 임시 배열
5, # 반복 횟수
cv2.GC_INIT_WITH_RECT) # 사각형을 위한 초기화
# 배경인 곳은 0, 그 외에는 1로 설정한 마스크 생성
mask_2 = np.where((mask==2) | (mask==0), 0, 1).astype('uint8')
# 이미지에 새로운 마스크를 곱행 배경을 제외
image_rgb_nobg = image_rgb * mask_2[:, :, np.newaxis]
# plot
plt.imshow(image_rgb_nobg)
plt.show()
cv2.imwrite('image1.jpg', image_rgb_nobg)
| 23.844444 | 79 | 0.643057 |
69599d3a220a4980dbdb4e5ecf306c89f58aad44 | 6,608 | py | Python | staidelta/daemon/client.py | WarutaShinken/staidelta-blockchain | ab6fd7d0ea93ac8b1b293240aab18db8db34718d | [
"Apache-2.0"
] | 1 | 2022-03-02T12:36:42.000Z | 2022-03-02T12:36:42.000Z | staidelta/daemon/client.py | WarutaShinken/staidelta-blockchain | ab6fd7d0ea93ac8b1b293240aab18db8db34718d | [
"Apache-2.0"
] | null | null | null | staidelta/daemon/client.py | WarutaShinken/staidelta-blockchain | ab6fd7d0ea93ac8b1b293240aab18db8db34718d | [
"Apache-2.0"
] | null | null | null | import asyncio
import json
import ssl
from contextlib import asynccontextmanager
from pathlib import Path
from typing import Any, Dict, Optional
import websockets
from staidelta.types.blockchain_format.sized_bytes import bytes32
from staidelta.util.config import load_config
from staidelta.util.json_util import dict_to_json_str
from staidelta.util.ws_message import WsRpcMessage, create_payload_dict
class DaemonProxy:
def __init__(self, uri: str, ssl_context: Optional[ssl.SSLContext]):
self._uri = uri
self._request_dict: Dict[bytes32, asyncio.Event] = {}
self.response_dict: Dict[bytes32, Any] = {}
self.ssl_context = ssl_context
def format_request(self, command: str, data: Dict[str, Any]) -> WsRpcMessage:
request = create_payload_dict(command, data, "client", "daemon")
return request
async def start(self):
self.websocket = await websockets.connect(self._uri, max_size=None, ssl=self.ssl_context)
async def listener():
while True:
try:
message = await self.websocket.recv()
except websockets.exceptions.ConnectionClosedOK:
return None
decoded = json.loads(message)
id = decoded["request_id"]
if id in self._request_dict:
self.response_dict[id] = decoded
self._request_dict[id].set()
asyncio.create_task(listener())
await asyncio.sleep(1)
async def _get(self, request: WsRpcMessage) -> WsRpcMessage:
request_id = request["request_id"]
self._request_dict[request_id] = asyncio.Event()
string = dict_to_json_str(request)
asyncio.create_task(self.websocket.send(string))
async def timeout():
await asyncio.sleep(30)
if request_id in self._request_dict:
print("Error, timeout.")
self._request_dict[request_id].set()
asyncio.create_task(timeout())
await self._request_dict[request_id].wait()
if request_id in self.response_dict:
response = self.response_dict[request_id]
self.response_dict.pop(request_id)
else:
response = None
self._request_dict.pop(request_id)
return response
async def start_service(self, service_name: str) -> WsRpcMessage:
data = {"service": service_name}
request = self.format_request("start_service", data)
response = await self._get(request)
return response
async def stop_service(self, service_name: str, delay_before_kill: int = 15) -> WsRpcMessage:
data = {"service": service_name}
request = self.format_request("stop_service", data)
response = await self._get(request)
return response
async def is_running(self, service_name: str) -> bool:
data = {"service": service_name}
request = self.format_request("is_running", data)
response = await self._get(request)
if "is_running" in response["data"]:
return bool(response["data"]["is_running"])
return False
async def is_keyring_locked(self) -> bool:
data: Dict[str, Any] = {}
request = self.format_request("is_keyring_locked", data)
response = await self._get(request)
if "is_keyring_locked" in response["data"]:
return bool(response["data"]["is_keyring_locked"])
return False
async def unlock_keyring(self, passphrase: str) -> WsRpcMessage:
data = {"key": passphrase}
request = self.format_request("unlock_keyring", data)
response = await self._get(request)
return response
async def notify_keyring_migration_completed(self, passphrase: Optional[str]) -> WsRpcMessage:
data: Dict[str, Any] = {"key": passphrase}
request: WsRpcMessage = self.format_request("notify_keyring_migration_completed", data)
response: WsRpcMessage = await self._get(request)
return response
async def ping(self) -> WsRpcMessage:
request = self.format_request("ping", {})
response = await self._get(request)
return response
async def close(self) -> None:
await self.websocket.close()
async def exit(self) -> WsRpcMessage:
request = self.format_request("exit", {})
return await self._get(request)
async def connect_to_daemon(self_hostname: str, daemon_port: int, ssl_context: Optional[ssl.SSLContext]) -> DaemonProxy:
"""
Connect to the local daemon.
"""
client = DaemonProxy(f"wss://{self_hostname}:{daemon_port}", ssl_context)
await client.start()
return client
async def connect_to_daemon_and_validate(root_path: Path, quiet: bool = False) -> Optional[DaemonProxy]:
"""
Connect to the local daemon and do a ping to ensure that something is really
there and running.
"""
from staidelta.server.server import ssl_context_for_client
try:
net_config = load_config(root_path, "config.yaml")
crt_path = root_path / net_config["daemon_ssl"]["private_crt"]
key_path = root_path / net_config["daemon_ssl"]["private_key"]
ca_crt_path = root_path / net_config["private_ssl_ca"]["crt"]
ca_key_path = root_path / net_config["private_ssl_ca"]["key"]
ssl_context = ssl_context_for_client(ca_crt_path, ca_key_path, crt_path, key_path)
connection = await connect_to_daemon(net_config["self_hostname"], net_config["daemon_port"], ssl_context)
r = await connection.ping()
if "value" in r["data"] and r["data"]["value"] == "pong":
return connection
except Exception:
if not quiet:
print("Daemon not started yet")
return None
return None
@asynccontextmanager
async def acquire_connection_to_daemon(root_path: Path, quiet: bool = False):
"""
Asynchronous context manager which attempts to create a connection to the daemon.
The connection object (DaemonProxy) is yielded to the caller. After the caller's
block exits scope, execution resumes in this function, wherein the connection is
closed.
"""
from staidelta.daemon.client import connect_to_daemon_and_validate
daemon: Optional[DaemonProxy] = None
try:
daemon = await connect_to_daemon_and_validate(root_path, quiet=quiet)
yield daemon # <----
except Exception as e:
print(f"Exception occurred while communicating with the daemon: {e}")
if daemon is not None:
await daemon.close()
| 37.333333 | 120 | 0.662379 |
b79e992e8b1a83f3e42a2df632d229c17d270258 | 1,683 | py | Python | package/spack-py-pyscaf/package.py | ctuning/ck-spack | 307934efce1be2d4f104251275c82fbc70127105 | [
"BSD-3-Clause"
] | 1 | 2018-07-17T07:45:09.000Z | 2018-07-17T07:45:09.000Z | package/spack-py-pyscaf/package.py | ctuning/ck-spack | 307934efce1be2d4f104251275c82fbc70127105 | [
"BSD-3-Clause"
] | null | null | null | package/spack-py-pyscaf/package.py | ctuning/ck-spack | 307934efce1be2d4f104251275c82fbc70127105 | [
"BSD-3-Clause"
] | null | null | null | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyPyscaf(PythonPackage):
"""pyScaf orders contigs from genome assemblies utilising several types of
information"""
homepage = "https://pypi.python.org/pypi/pyScaf"
url = "https://pypi.io/packages/source/p/pyScaf/pyScaf-0.12a4.tar.gz"
version('0.12a4', 'c67526747eb04d1e28279ac310916d40')
depends_on('py-setuptools', type='build')
depends_on('py-fastaindex', type=('build', 'run'))
| 43.153846 | 78 | 0.677362 |
b2d878043756e8b9b759b3800e05aebd2e048928 | 52,327 | py | Python | sympy/simplify/tests/test_simplify.py | srjoglekar246/sympy | 21910e9b6e590a40492de4e129e40cbe3b7ecba0 | [
"BSD-3-Clause"
] | null | null | null | sympy/simplify/tests/test_simplify.py | srjoglekar246/sympy | 21910e9b6e590a40492de4e129e40cbe3b7ecba0 | [
"BSD-3-Clause"
] | null | null | null | sympy/simplify/tests/test_simplify.py | srjoglekar246/sympy | 21910e9b6e590a40492de4e129e40cbe3b7ecba0 | [
"BSD-3-Clause"
] | null | null | null | from sympy import (
acos, Add, atan, besselsimp, binomial, collect, collect_const, combsimp,
cos, cosh, cot, coth, count_ops, Derivative, diff, Dummy, E, Eq, erf, exp,
exp_polar, expand, factor, factorial, FallingFactorial, Float, fraction,
Function, gamma, GoldenRatio, hyper, hyper, hypersimp, I, Integer,
Integral, integrate, log, logcombine, Matrix, Mul, nsimplify, O, oo, pi,
Piecewise, polar_lift, polarify, posify, powdenest, powsimp, radsimp,
Rational, ratsimp, ratsimpmodprime, rcollect, RisingFactorial, S,
separatevars, signsimp, simplify, sin, sinh, solve, sqrt, Subs, Symbol,
symbols, sympify, tan, tanh, trigsimp, Wild)
from sympy.core.mul import _keep_coeff
from sympy.simplify.simplify import fraction_expand
from sympy.utilities.pytest import XFAIL
from sympy.abc import x, y, z, t, a, b, c, d, e, k
def test_ratsimp():
f, g = 1/x + 1/y, (x + y)/(x*y)
assert f != g and ratsimp(f) == g
f, g = 1/(1 + 1/x), 1 - 1/(x + 1)
assert f != g and ratsimp(f) == g
f, g = x/(x + y) + y/(x + y), 1
assert f != g and ratsimp(f) == g
f, g = -x - y - y**2/(x + y) + x**2/(x + y), -2*y
assert f != g and ratsimp(f) == g
f = (a*c*x*y + a*c*z - b*d*x*y - b*d*z - b*t*x*y - b*t*x - b*t*z + e*x)/(x*y + z)
G = [a*c - b*d - b*t + (-b*t*x + e*x)/(x*y + z),
a*c - b*d - b*t - ( b*t*x - e*x)/(x*y + z)]
assert f != g and ratsimp(f) in G
A = sqrt(pi)
B = log(erf(x) - 1)
C = log(erf(x) + 1)
D = 8 - 8*erf(x)
f = A*B/D - A*C/D + A*C*erf(x)/D - A*B*erf(x)/D + 2*A/D
assert ratsimp(f) == A*B/8 - A*C/8 - A/(4*erf(x) - 4)
def test_ratsimpmodprime():
a = y**5 + x + y
b = x - y
F = [x*y**5 - x - y]
assert ratsimpmodprime(a/b, F, x, y, order='lex') == \
(x**2 + x*y + x + y) / (x**2 - x*y)
a = x + y**2 - 2
b = x + y**2 - y - 1
F = [x*y - 1]
assert ratsimpmodprime(a/b, F, x, y, order='lex') == \
(x - y - 1)/(x - y)
a = 5*x**3 + 21*x**2 + 4*x*y + 23*x + 12*y + 15
b = 7*x**3 - y*x**2 + 31*x**2 + 2*x*y + 15*y + 37*x + 21
F = [x**2 + y**2 - 1]
assert ratsimpmodprime(a/b, F, x, y, order='lex') == \
(3*x + 4*y + 5)/(5*x + 5*y + 7)
a = x*y - x - 2*y + 4
b = x + y**2 - 2*y
F = [x - 2, y - 3]
assert ratsimpmodprime(a/b, F, x, y, order='lex') == \
Rational(2, 5)
def test_trigsimp1():
x, y = symbols('x,y')
assert trigsimp(1 - sin(x)**2) == cos(x)**2
assert trigsimp(1 - cos(x)**2) == sin(x)**2
assert trigsimp(sin(x)**2 + cos(x)**2) == 1
assert trigsimp(1 + tan(x)**2) == 1/cos(x)**2
assert trigsimp(1/cos(x)**2 - 1) == tan(x)**2
assert trigsimp(1/cos(x)**2 - tan(x)**2) == 1
assert trigsimp(1 + cot(x)**2) == 1/sin(x)**2
assert trigsimp(1/sin(x)**2 - 1) == cot(x)**2
assert trigsimp(1/sin(x)**2 - cot(x)**2) == 1
assert trigsimp(5*cos(x)**2 + 5*sin(x)**2) == 5
assert trigsimp(5*cos(x/2)**2 + 2*sin(x/2)**2) in \
[2 + 3*cos(x/2)**2, 5 - 3*sin(x/2)**2]
assert trigsimp(sin(x)/cos(x)) == tan(x)
assert trigsimp(2*tan(x)*cos(x)) == 2*sin(x)
assert trigsimp(cot(x)**3*sin(x)**3) == cos(x)**3
assert trigsimp(y*tan(x)**2/sin(x)**2) == y/cos(x)**2
assert trigsimp(cot(x)/cos(x)) == 1/sin(x)
assert trigsimp(sin(x + y) + sin(x - y)) == 2*sin(x)*cos(y)
assert trigsimp(sin(x + y) - sin(x - y)) == 2*sin(y)*cos(x)
assert trigsimp(cos(x + y) + cos(x - y)) == 2*cos(x)*cos(y)
assert trigsimp(cos(x + y) - cos(x - y)) == -2*sin(x)*sin(y)
assert ratsimp(trigsimp(tan(x + y) - tan(x)/(1 - tan(x)*tan(y)))) == \
-tan(y)/(tan(x)*tan(y) -1)
assert trigsimp(sinh(x + y) + sinh(x - y)) == 2*sinh(x)*cosh(y)
assert trigsimp(sinh(x + y) - sinh(x - y)) == 2*sinh(y)*cosh(x)
assert trigsimp(cosh(x + y) + cosh(x - y)) == 2*cosh(x)*cosh(y)
assert trigsimp(cosh(x + y) - cosh(x - y)) == 2*sinh(x)*sinh(y)
assert ratsimp(trigsimp(tanh(x + y) - tanh(x)/(1 + tanh(x)*tanh(y)))) == \
tanh(y)/(tanh(x)*tanh(y) + 1)
assert trigsimp(cos(0.12345)**2 + sin(0.12345)**2) == 1
e = 2*sin(x)**2 + 2*cos(x)**2
assert trigsimp(log(e), deep=True) == log(2)
def test_trigsimp2():
x, y = symbols('x,y')
assert trigsimp(cos(x)**2*sin(y)**2 + cos(x)**2*cos(y)**2 + sin(x)**2,
recursive=True) == 1
assert trigsimp(sin(x)**2*sin(y)**2 + sin(x)**2*cos(y)**2 + cos(x)**2,
recursive=True) == 1
def test_trigsimp_deep():
x, y = symbols('x,y')
assert trigsimp(Subs(x, x, sin(y)**2+cos(y)**2), deep=True) == Subs(x, x, 1)
assert simplify(Subs(x, x, sin(y)**2+cos(y)**2)) == Subs(x, x, 1)
def test_issue1274():
x = Symbol("x")
assert abs(trigsimp(2.0*sin(x)**2 + 2.0*cos(x)**2)-2.0) < 1e-10
def test_trigsimp3():
x, y = symbols('x,y')
assert trigsimp(sin(x)/cos(x)) == tan(x)
assert trigsimp(sin(x)**2/cos(x)**2) == tan(x)**2
assert trigsimp(sin(x)**3/cos(x)**3) == tan(x)**3
assert trigsimp(sin(x)**10/cos(x)**10) == tan(x)**10
assert trigsimp(cos(x)/sin(x)) == 1/tan(x)
assert trigsimp(cos(x)**2/sin(x)**2) == 1/tan(x)**2
assert trigsimp(cos(x)**10/sin(x)**10) == 1/tan(x)**10
assert trigsimp(tan(x)) == trigsimp(sin(x)/cos(x))
def test_trigsimp_issue_2515():
x = Symbol('x')
assert trigsimp(x*cos(x)*tan(x)) == x*sin(x)
assert trigsimp(-sin(x)+cos(x)*tan(x)) == 0
def test_trigsimp_noncommutative():
x, y = symbols('x,y')
A, B = symbols('A,B', commutative=False)
assert trigsimp(A - A*sin(x)**2) == A*cos(x)**2
assert trigsimp(A - A*cos(x)**2) == A*sin(x)**2
assert trigsimp(A*sin(x)**2 + A*cos(x)**2) == A
assert trigsimp(A + A*tan(x)**2) == A/cos(x)**2
assert trigsimp(A/cos(x)**2 - A) == A*tan(x)**2
assert trigsimp(A/cos(x)**2 - A*tan(x)**2) == A
assert trigsimp(A + A*cot(x)**2) == A/sin(x)**2
assert trigsimp(A/sin(x)**2 - A) == A*cot(x)**2
assert trigsimp(A/sin(x)**2 - A*cot(x)**2) == A
assert trigsimp(y*A*cos(x)**2 + y*A*sin(x)**2) == y*A
assert trigsimp(A*sin(x)/cos(x)) == A*tan(x)
assert trigsimp(A*tan(x)*cos(x)) == A*sin(x)
assert trigsimp(A*cot(x)**3*sin(x)**3) == A*cos(x)**3
assert trigsimp(y*A*tan(x)**2/sin(x)**2) == y*A/cos(x)**2
assert trigsimp(A*cot(x)/cos(x)) == A/sin(x)
assert trigsimp(A*sin(x + y) + A*sin(x - y)) == 2*A*sin(x)*cos(y)
assert trigsimp(A*sin(x + y) - A*sin(x - y)) == 2*A*sin(y)*cos(x)
assert trigsimp(A*cos(x + y) + A*cos(x - y)) == 2*A*cos(x)*cos(y)
assert trigsimp(A*cos(x + y) - A*cos(x - y)) == -2*A*sin(x)*sin(y)
assert trigsimp(A*sinh(x + y) + A*sinh(x - y)) == 2*A*sinh(x)*cosh(y)
assert trigsimp(A*sinh(x + y) - A*sinh(x - y)) == 2*A*sinh(y)*cosh(x)
assert trigsimp(A*cosh(x + y) + A*cosh(x - y)) == 2*A*cosh(x)*cosh(y)
assert trigsimp(A*cosh(x + y) - A*cosh(x - y)) == 2*A*sinh(x)*sinh(y)
assert trigsimp(A*cos(0.12345)**2 + A*sin(0.12345)**2) == 1.0*A
def test_hyperbolic_simp():
x, y = symbols('x,y')
assert trigsimp(sinh(x)**2 + 1) == cosh(x)**2
assert trigsimp(cosh(x)**2 - 1) == sinh(x)**2
assert trigsimp(cosh(x)**2 - sinh(x)**2) == 1
assert trigsimp(1 - tanh(x)**2) == 1/cosh(x)**2
assert trigsimp(1 - 1/cosh(x)**2) == tanh(x)**2
assert trigsimp(tanh(x)**2 + 1/cosh(x)**2) == 1
assert trigsimp(coth(x)**2 - 1) == 1/sinh(x)**2
assert trigsimp(1/sinh(x)**2 + 1) == coth(x)**2
assert trigsimp(coth(x)**2 - 1/sinh(x)**2) == 1
assert trigsimp(5*cosh(x)**2 - 5*sinh(x)**2) == 5
assert trigsimp(5*cosh(x/2)**2 - 2*sinh(x/2)**2) in \
[2 + 3*cosh(x/2)**2, 5 + 3*sinh(x/2)**2]
assert trigsimp(sinh(x)/cosh(x)) == tanh(x)
assert trigsimp(tanh(x)) == trigsimp(sinh(x)/cosh(x))
assert trigsimp(cosh(x)/sinh(x)) == 1/tanh(x)
assert trigsimp(2*tanh(x)*cosh(x)) == 2*sinh(x)
assert trigsimp(coth(x)**3*sinh(x)**3) == cosh(x)**3
assert trigsimp(y*tanh(x)**2/sinh(x)**2) == y/cosh(x)**2
assert trigsimp(coth(x)/cosh(x)) == 1/sinh(x)
e = 2*cosh(x)**2 - 2*sinh(x)**2
assert trigsimp(log(e), deep=True) == log(2)
assert trigsimp(cosh(x)**2*cosh(y)**2 - cosh(x)**2*sinh(y)**2 - sinh(x)**2,
recursive=True) == 1
assert trigsimp(sinh(x)**2*sinh(y)**2 - sinh(x)**2*cosh(y)**2 + cosh(x)**2,
recursive=True) == 1
assert abs(trigsimp(2.0*cosh(x)**2 - 2.0*sinh(x)**2)-2.0) < 1e-10
assert trigsimp(sinh(x)**2/cosh(x)**2) == tanh(x)**2
assert trigsimp(sinh(x)**3/cosh(x)**3) == tanh(x)**3
assert trigsimp(sinh(x)**10/cosh(x)**10) == tanh(x)**10
assert trigsimp(cosh(x)**3/sinh(x)**3) == 1/tanh(x)**3
assert trigsimp(cosh(x)/sinh(x)) == 1/tanh(x)
assert trigsimp(cosh(x)**2/sinh(x)**2) == 1/tanh(x)**2
assert trigsimp(cosh(x)**10/sinh(x)**10) == 1/tanh(x)**10
assert trigsimp(x*cosh(x)*tanh(x)) == x*sinh(x)
assert trigsimp(-sinh(x) + cosh(x)*tanh(x)) == 0
@XFAIL
def test_tan_cot():
x = Symbol('x')
### ???
assert tan(x) == 1/cot(x)
@XFAIL
def test_tan_cot2():
x = Symbol('x')
assert trigsimp(tan(x) - 1/cot(x)) == 0
assert trigsimp(3*tanh(x)**7 - 2/coth(x)**7) == tanh(x)**7
@XFAIL
def test_factorial_simplify():
# There are more tests in test_factorials.py. These are just to
# ensure that simplify() calls factorial_simplify correctly
from sympy.specfun.factorials import factorial
x = Symbol('x')
assert simplify(factorial(x)/x) == factorial(x-1)
assert simplify(factorial(factorial(x))) == factorial(factorial(x))
def test_simplify():
x, y, z, k, n, m, w, f, s, A = symbols('x,y,z,k,n,m,w,f,s,A')
assert all(simplify(tmp) == tmp for tmp in [I, E, oo, x, -x, -oo, -E, -I])
e = 1/x + 1/y
assert e != (x+y)/(x*y)
assert simplify(e) == (x+y)/(x*y)
e = A**2*s**4/(4*pi*k*m**3)
assert simplify(e) == e
e = (4+4*x-2*(2+2*x))/(2+2*x)
assert simplify(e) == 0
e = (-4*x*y**2-2*y**3-2*x**2*y)/(x+y)**2
assert simplify(e) == -2*y
e = -x-y-(x+y)**(-1)*y**2+(x+y)**(-1)*x**2
assert simplify(e) == -2*y
e = (x+x*y)/x
assert simplify(e) == 1 + y
e = (f(x)+y*f(x))/f(x)
assert simplify(e) == 1 + y
e = (2 * (1/n - cos(n * pi)/n))/pi
assert simplify(e) == 2*((1 - 1*cos(pi*n))/(pi*n))
e = integrate(1/(x**3+1), x).diff(x)
assert simplify(e) == 1/(x**3+1)
e = integrate(x/(x**2+3*x+1), x).diff(x)
assert simplify(e) == x/(x**2+3*x+1)
A = Matrix([[2*k-m*w**2, -k], [-k, k-m*w**2]]).inv()
assert simplify((A*Matrix([0,f]))[1]) == \
f*(2*k - m*w**2)/(k**2 - 3*k*m*w**2 + m**2*w**4)
a, b, c, d, e, f, g, h, i = symbols('a,b,c,d,e,f,g,h,i')
f_1 = x*a + y*b + z*c - 1
f_2 = x*d + y*e + z*f - 1
f_3 = x*g + y*h + z*i - 1
solutions = solve([f_1, f_2, f_3], x, y, z, simplify=False)
assert simplify(solutions[y]) == \
(a*i+c*d+f*g-a*f-c*g-d*i)/(a*e*i+b*f*g+c*d*h-a*f*h-b*d*i-c*e*g)
f = -x + y/(z + t) + z*x/(z + t) + z*a/(z + t) + t*x/(z + t)
assert simplify(f) == (y + a*z)/(z + t)
A, B = symbols('A,B', commutative=False)
assert simplify(A*B - B*A) == A*B - B*A
assert simplify(log(2) + log(3)) == log(6)
assert simplify(log(2*x) - log(2)) == log(x)
assert simplify(hyper([], [], x)) == exp(x)
def test_simplify_other():
assert simplify(sin(x)**2 + cos(x)**2) == 1
assert simplify(gamma(x + 1)/gamma(x)) == x
assert simplify(sin(x)**2 + cos(x)**2 + factorial(x)/gamma(x)) == 1 + x
assert simplify(Eq(sin(x)**2 + cos(x)**2, factorial(x)/gamma(x))) == Eq(1, x)
nc = symbols('nc', commutative=False)
assert simplify(x + x*nc) == x*(1 + nc)
# issue 3024
# f = exp(-I*(k*sqrt(t) + x/(2*sqrt(t)))**2)
# ans = integrate(f, (k, -oo, oo), conds='none')
ans = I*(-pi*x*exp(-3*I*pi/4 + I*x**2/(4*t))*erf(x*exp(-3*I*pi/4)/\
(2*sqrt(t)))/(2*sqrt(t)) + pi*x*exp(-3*I*pi/4 + I*x**2/(4*t))/\
(2*sqrt(t)))*exp(-I*x**2/(4*t))/(sqrt(pi)*x) - I*sqrt(pi)*\
(-erf(x*exp(I*pi/4)/(2*sqrt(t))) + 1)*exp(I*pi/4)/(2*sqrt(t))
assert simplify(ans) == -(-1)**(S(3)/4)*sqrt(pi)/sqrt(t)
# issue 3271
assert simplify(2**(2 + x)/4) == 2**x
def test_simplify_ratio():
# roots of x**3-3*x+5
roots = ['(1/2 - sqrt(3)*I/2)*(sqrt(21)/2 + 5/2)**(1/3) + 1/((1/2 - '
'sqrt(3)*I/2)*(sqrt(21)/2 + 5/2)**(1/3))',
'1/((1/2 + sqrt(3)*I/2)*(sqrt(21)/2 + 5/2)**(1/3)) + '
'(1/2 + sqrt(3)*I/2)*(sqrt(21)/2 + 5/2)**(1/3)',
'-(sqrt(21)/2 + 5/2)**(1/3) - 1/(sqrt(21)/2 + 5/2)**(1/3)']
for r in roots:
r = S(r)
assert count_ops(simplify(r, ratio=1)) <= count_ops(r)
# If ratio=oo, simplify() is always applied:
assert simplify(r, ratio=oo) is not r
def test_simplify_measure():
measure1 = lambda expr: len(str(expr))
measure2 = lambda expr: -count_ops(expr) # Return the most complicated result
expr = (x + 1)/(x + sin(x)**2 + cos(x)**2)
assert measure1(simplify(expr, measure=measure1)) <= measure1(expr)
assert measure2(simplify(expr, measure=measure2)) <= measure2(expr)
def test_simplify_issue_1308():
assert simplify(exp(-Rational(1, 2)) + exp(-Rational(3, 2))) == \
(1 + E)*exp(-Rational(3, 2))
def test_issue_2553():
assert simplify(E + exp(-E)) == E + exp(-E)
n = symbols('n', commutative=False)
assert simplify(n + n**(-n)) == n + n**(-n)
def test_simplify_fail1():
x = Symbol('x')
y = Symbol('y')
e = (x+y)**2/(-4*x*y**2-2*y**3-2*x**2*y)
assert simplify(e) == 1 / (-2*y)
def test_simplify_issue_3214():
c, p = symbols('c p', positive=True)
s = sqrt(c**2 - p**2)
b = (c + I*p - s)/(c + I*p + s)
assert radsimp(b) == (c*p - p*s + I*(-c**2 + c*s + p**2))/(c*p)
def test_fraction():
x, y, z = map(Symbol, 'xyz')
A = Symbol('A', commutative=False)
assert fraction(Rational(1, 2)) == (1, 2)
assert fraction(x) == (x, 1)
assert fraction(1/x) == (1, x)
assert fraction(x/y) == (x, y)
assert fraction(x/2) == (x, 2)
assert fraction(x*y/z) == (x*y, z)
assert fraction(x/(y*z)) == (x, y*z)
assert fraction(1/y**2) == (1, y**2)
assert fraction(x/y**2) == (x, y**2)
assert fraction((x**2+1)/y) == (x**2+1, y)
assert fraction(x*(y+1)/y**7) == (x*(y+1), y**7)
assert fraction(exp(-x), exact=True) == (exp(-x), 1)
assert fraction(x*A/y) == (x*A, y)
assert fraction(x*A**-1/y) == (x*A**-1, y)
n = symbols('n', negative=True)
assert fraction(exp(n)) == (1, exp(-n))
assert fraction(exp(-n)) == (exp(-n), 1)
def test_powsimp():
x, y, z, n = symbols('x,y,z,n')
f = Function('f')
assert powsimp( 4**x * 2**(-x) * 2**(-x) ) == 1
assert powsimp( (-4)**x * (-2)**(-x) * 2**(-x) ) == 1
assert powsimp( f(4**x * 2**(-x) * 2**(-x)) ) == f(4**x * 2**(-x) * 2**(-x))
assert powsimp( f(4**x * 2**(-x) * 2**(-x)), deep = True ) == f(1)
assert exp(x)*exp(y) == exp(x)*exp(y)
assert powsimp(exp(x)*exp(y)) == exp(x+y)
assert powsimp(exp(x)*exp(y)*2**x*2**y) == (2*E)**(x + y)
assert powsimp(exp(x)*exp(y)*2**x*2**y, combine='exp') == exp(x+y)*2**(x+y)
assert powsimp(exp(x)*exp(y)*exp(2)*sin(x)+sin(y)+2**x*2**y) == exp(2+x+y)*sin(x)+sin(y)+2**(x+y)
assert powsimp(sin(exp(x)*exp(y))) == sin(exp(x)*exp(y))
assert powsimp(sin(exp(x)*exp(y)), deep=True) == sin(exp(x+y))
assert powsimp(x**2*x**y) == x**(2+y)
# This should remain factored, because 'exp' with deep=True is supposed
# to act like old automatic exponent combining.
assert powsimp((1 + E*exp(E))*exp(-E), combine='exp', deep=True) == (1 + exp(1 + E))*exp(-E)
assert powsimp((1 + E*exp(E))*exp(-E), deep=True) == (1 + exp(1 + E))*exp(-E)
assert powsimp((1 + E*exp(E))*exp(-E)) == (1 + exp(1 + E))*exp(-E)
assert powsimp((1 + E*exp(E))*exp(-E), combine='exp') == (1 + exp(1 + E))*exp(-E)
assert powsimp((1 + E*exp(E))*exp(-E), combine='base') == (1 + E*exp(E))*exp(-E)
x,y = symbols('x,y', nonnegative=True)
n = Symbol('n', real=True)
assert powsimp( y**n * (y/x)**(-n) ) == x**n
assert powsimp(x**(x**(x*y)*y**(x*y))*y**(x**(x*y)*y**(x*y)),deep=True) == (x*y)**(x*y)**(x*y)
assert powsimp(2**(2**(2*x)*x), deep=False) == 2**(2**(2*x)*x)
assert powsimp(2**(2**(2*x)*x), deep=True) == 2**(x*4**x)
assert powsimp(exp(-x + exp(-x)*exp(-x*log(x))), deep=False, combine='exp') == exp(-x + exp(-x)*exp(-x*log(x)))
assert powsimp(exp(-x + exp(-x)*exp(-x*log(x))), deep=False, combine='exp') == exp(-x + exp(-x)*exp(-x*log(x)))
assert powsimp((x+y)/(3*z), deep=False, combine='exp') == (x+y)/(3*z)
assert powsimp((x/3+y/3)/z, deep=True, combine='exp') == (x/3+y/3)/z
assert powsimp(exp(x)/(1 + exp(x)*exp(y)), deep=True) == exp(x)/(1 + exp(x + y))
assert powsimp(x*y**(z**x*z**y), deep=True) == x*y**(z**(x + y))
assert powsimp((z**x*z**y)**x, deep=True) == (z**(x + y))**x
assert powsimp(x*(z**x*z**y)**x, deep=True) == x*(z**(x + y))**x
p = symbols('p', positive=True)
assert powsimp((1/x)**log(2)/x) == (1/x)**(1 + log(2))
assert powsimp((1/p)**log(2)/p) == p**(-1 - log(2))
# coefficient of exponent can only be simplified for positive bases
assert powsimp(2**(2*x)) == 4**x
assert powsimp((-1)**(2*x)) == (-1)**(2*x)
i = symbols('i', integer=True)
assert powsimp((-1)**(2*i)) == 1
assert powsimp((-1)**(-x)) != (-1)**x # could be 1/((-1)**x), but is not
# force=True overrides assumptions
assert powsimp((-1)**(2*x), force=True) == 1
eq = x**(2*a/3)
assert powsimp(eq).exp == eq.exp == 2*a/3 # eq != (x**a)**(2/3) (try x = -1 and a = 3 to see)
assert powsimp(2**(2*x)) == 4**x # powdenest goes the other direction
assert powsimp(exp(p/2)) == exp(p/2)
# issue 3269
eq = Mul(*[sqrt(Dummy(imaginary=True)) for i in range(3)])
assert powsimp(eq) == eq and eq.is_Mul
def test_issue_3268():
z = -5*sqrt(2)/(2*sqrt(2*sqrt(29) + 29)) + sqrt(-sqrt(29)/29 + S(1)/2)
assert Mul(*[powsimp(a) for a in Mul.make_args(z.normal())]) == 0
assert powsimp(z.normal()) == 0
assert simplify(z) == 0
assert powsimp(sqrt(2 + sqrt(3))*sqrt(2 - sqrt(3)) + 1) == 2
assert powsimp(z) != 0
def test_powsimp_polar():
from sympy import polar_lift, exp_polar
x, y, z = symbols('x y z')
p, q, r = symbols('p q r', polar=True)
assert (polar_lift(-1))**(2*x) == exp_polar(2*pi*I*x)
assert powsimp(p**x * q**x) == (p*q)**x
assert p**x * (1/p)**x == 1
assert (1/p)**x == p**(-x)
assert exp_polar(x)*exp_polar(y) == exp_polar(x)*exp_polar(y)
assert powsimp(exp_polar(x)*exp_polar(y)) == exp_polar(x+y)
assert powsimp(exp_polar(x)*exp_polar(y)*p**x*p**y) == (p*exp_polar(1))**(x + y)
assert powsimp(exp_polar(x)*exp_polar(y)*p**x*p**y, combine='exp') \
== exp_polar(x+y)*p**(x+y)
assert powsimp(exp_polar(x)*exp_polar(y)*exp_polar(2)*sin(x)+sin(y)+p**x*p**y) \
== p**(x+y) + sin(x)*exp_polar(2+x+y) + sin(y)
assert powsimp(sin(exp_polar(x)*exp_polar(y))) == sin(exp_polar(x)*exp_polar(y))
assert powsimp(sin(exp_polar(x)*exp_polar(y)), deep=True) == sin(exp_polar(x+y))
def test_powsimp_nc():
x, y, z = symbols('x,y,z')
A, B, C = symbols('A B C', commutative=False)
assert powsimp(A**x*A**y, combine='all') == A**(x+y)
assert powsimp(A**x*A**y, combine='base') == A**x*A**y
assert powsimp(A**x*A**y, combine='exp') == A**(x+y)
assert powsimp(A**x*B**x, combine='all') == (A*B)**x
assert powsimp(A**x*B**x, combine='base') == (A*B)**x
assert powsimp(A**x*B**x, combine='exp') == A**x*B**x
assert powsimp(B**x*A**x, combine='all') == (B*A)**x
assert powsimp(B**x*A**x, combine='base') == (B*A)**x
assert powsimp(B**x*A**x, combine='exp') == B**x*A**x
assert powsimp(A**x*A**y*A**z, combine='all') == A**(x+y+z)
assert powsimp(A**x*A**y*A**z, combine='base') == A**x*A**y*A**z
assert powsimp(A**x*A**y*A**z, combine='exp') == A**(x+y+z)
assert powsimp(A**x*B**x*C**x, combine='all') == (A*B*C)**x
assert powsimp(A**x*B**x*C**x, combine='base') == (A*B*C)**x
assert powsimp(A**x*B**x*C**x, combine='exp') == A**x*B**x*C**x
assert powsimp(B**x*A**x*C**x, combine='all') == (B*A*C)**x
assert powsimp(B**x*A**x*C**x, combine='base') == (B*A*C)**x
assert powsimp(B**x*A**x*C**x, combine='exp') == B**x*A**x*C**x
def test_collect_1():
"""Collect with respect to a Symbol"""
x, y, z, n = symbols('x,y,z,n')
assert collect( x + y*x, x ) == x * (1 + y)
assert collect( x + x**2, x ) == x + x**2
assert collect( x**2 + y*x**2, x ) == (x**2)*(1+y)
assert collect( x**2 + y*x, x ) == x*y + x**2
assert collect( 2*x**2 + y*x**2 + 3*x*y, [x] ) == x**2*(2+y) + 3*x*y
assert collect( 2*x**2 + y*x**2 + 3*x*y, [y] ) == 2*x**2 + y*(x**2+3*x)
assert collect( ((1 + y + x)**4).expand(), x) == ((1 + y)**4).expand() + \
x*(4*(1 + y)**3).expand() + x**2*(6*(1 + y)**2).expand() + \
x**3*(4*(1 + y)).expand() + x**4
# symbols can be given as any iterable
expr = x + y
assert collect(expr, expr.free_symbols) == expr
def test_collect_2():
"""Collect with respect to a sum"""
a, b, x = symbols('a,b,x')
assert collect(a*(cos(x)+sin(x)) + b*(cos(x)+sin(x)), sin(x)+cos(x)) == (a + b)*(cos(x) + sin(x))
def test_collect_3():
"""Collect with respect to a product"""
a, b, c = symbols('a,b,c')
f = Function('f')
x, y, z, n = symbols('x,y,z,n')
assert collect(-x/8 + x*y, -x) == x*(y - S(1)/8)
assert collect( 1 + x*(y**2), x*y ) == 1 + x*(y**2)
assert collect( x*y + a*x*y, x*y) == x*y*(1 + a)
assert collect( 1 + x*y + a*x*y, x*y) == 1 + x*y*(1 + a)
assert collect(a*x*f(x) + b*(x*f(x)), x*f(x)) == x*(a + b)*f(x)
assert collect(a*x*log(x) + b*(x*log(x)), x*log(x)) == x*(a + b)*log(x)
assert collect(a*x**2*log(x)**2 + b*(x*log(x))**2, x*log(x)) == x**2*log(x)**2*(a + b)
# with respect to a product of three symbols
assert collect(y*x*z+a*x*y*z, x*y*z) == (1 + a)*x*y*z
def test_collect_4():
"""Collect with respect to a power"""
a, b, c, x = symbols('a,b,c,x')
assert collect(a*x**c + b*x**c, x**c) == x**c*(a + b)
# issue 2997: 2 stays with c (unless c is integer or x is positive0
assert collect(a*x**(2*c) + b*x**(2*c), x**c) == x**(2*c)*(a + b)
def test_collect_5():
"""Collect with respect to a tuple"""
a, x, y, z, n = symbols('a,x,y,z,n')
assert collect(x**2*y**4 + z*(x*y**2)**2 + z + a*z, [x*y**2, z]) in [
z*(1 + a + x**2*y**4) + x**2*y**4,
z*(1 + a) + x**2*y**4*(1 + z) ]
assert collect((1+ (x+y) + (x+y)**2).expand(),
[x, y]) == 1 + y + x*(1 + 2*y) + x**2 + y**2
def test_collect_D():
D = Derivative
f = Function('f')
x, a, b = symbols('x,a,b')
fx = D(f(x), x)
fxx = D(f(x), x, x)
assert collect(a*fx + b*fx, fx) == (a + b)*fx
assert collect(a*D(fx, x) + b*D(fx, x), fx) == (a + b)*D(fx, x)
assert collect(a*fxx + b*fxx , fx) == (a + b)*D(fx, x)
# 1685
assert collect(5*f(x)+3*fx, fx) == 5*f(x) + 3*fx
assert collect(f(x) + f(x)*diff(f(x), x) + x*diff(f(x), x)*f(x), f(x).diff(x)) ==\
(x*f(x) + f(x))*D(f(x), x) + f(x)
assert collect(f(x) + f(x)*diff(f(x), x) + x*diff(f(x), x)*f(x), f(x).diff(x), exact=True) ==\
(x*f(x) + f(x))*D(f(x), x) + f(x)
assert collect(1/f(x) + 1/f(x)*diff(f(x), x) + x*diff(f(x), x)/f(x), f(x).diff(x), exact=True) ==\
(1/f(x) + x/f(x))*D(f(x), x) + 1/f(x)
@XFAIL
def collect_issues():
assert collect(1/f(x) + 1/f(x)*diff(f(x), x) + x*diff(f(x), x)/f(x), f(x).diff(x)) !=\
(1 + x*D(f(x), x) + D(f(x), x))/f(x)
def test_collect_D_0():
D = Derivative
f = Function('f')
x, a, b = symbols('x,a,b')
fxx = D(f(x), x, x)
# collect does not distinguish nested derivatives, so it returns
# -- (a + b)*D(D(f, x), x)
assert collect(a*fxx + b*fxx , fxx) == (a + b)*fxx
def test_collect_Wild():
"""Collect with respect to functions with Wild argument"""
a, b, x, y = symbols('a b x y')
f = Function('f')
w1 = Wild('.1')
w2 = Wild('.2')
assert collect(f(x) + a*f(x), f(w1)) == (1 + a)*f(x)
assert collect(f(x, y) + a*f(x, y), f(w1)) == f(x, y) + a*f(x, y)
assert collect(f(x, y) + a*f(x, y), f(w1, w2)) == (1 + a)*f(x, y)
assert collect(f(x, y) + a*f(x, y), f(w1, w1)) == f(x, y) + a*f(x, y)
assert collect(f(x, x) + a*f(x, x), f(w1, w1)) == (1 + a)*f(x, x)
assert collect(a*(x + 1)**y + (x + 1)**y, w1**y) == (1 + a)*(x + 1)**y
assert collect(a*(x + 1)**y + (x + 1)**y, w1**b) == a*(x + 1)**y + (x + 1)**y
assert collect(a*(x + 1)**y + (x + 1)**y, (x + 1)**w2) == (1 + a)*(x + 1)**y
assert collect(a*(x + 1)**y + (x + 1)**y, w1**w2) == (1 + a)*(x + 1)**y
def test_collect_func():
f = ((x + a + 1)**3).expand()
assert collect(f, x) == a**3 + 3*a**2 + 3*a + x**3 + x**2*(3*a + 3) + x*(3*a**2 + 6*a + 3) + 1
assert collect(f, x, factor) == x**3 + 3*x**2*(a + 1) + 3*x*(a + 1)**2 + (a + 1)**3
assert collect(f, x, evaluate=False) == {S.One: a**3 + 3*a**2 + 3*a + 1, x: 3*a**2 + 6*a + 3, x**2: 3*a + 3, x**3: 1}
@XFAIL
def test_collect_func_xfail():
# XXX: this test will pass when automatic constant distribution is removed (#1497)
assert collect(f, x, factor, evaluate=False) == {S.One: (a + 1)**3, x: 3*(a + 1)**2, x**2: 3*(a + 1), x**3: 1}
def test_collect_order():
a, b, x, t = symbols('a,b,x,t')
assert collect(t + t*x + t*x**2 + O(x**3), t) == t*(1 + x + x**2 + O(x**3))
assert collect(t + t*x + x**2 + O(x**3), t) == t*(1 + x + O(x**3)) + x**2 + O(x**3)
f = a*x + b*x + c*x**2 + d*x**2 + O(x**3)
g = x*(a + b) + x**2*(c + d) + O(x**3)
assert collect(f, x) == g
assert collect(f, x, distribute_order_term=False) == g
f = sin(a + b).series(b, 0, 10)
assert collect(f, [sin(a), cos(a)]) == \
sin(a)*cos(b).series(b, 0, 10) + cos(a)*sin(b).series(b, 0, 10)
assert collect(f, [sin(a), cos(a)], distribute_order_term=False) == \
sin(a)*cos(b).series(b, 0, 10).removeO() + cos(a)*sin(b).series(b, 0, 10).removeO() + O(b**10)
def test_rcollect():
assert rcollect((x**2*y + x*y + x + y)/(x + y), y) == (x + y*(1 + x + x**2))/(x + y)
assert rcollect(sqrt(-((x + 1)*(y + 1))), z) == sqrt(-((x + 1)*(y + 1)))
def test_separatevars():
x,y,z,n = symbols('x,y,z,n')
assert separatevars(2*n*x*z+2*x*y*z) == 2*x*z*(n+y)
assert separatevars(x*z+x*y*z) == x*z*(1+y)
assert separatevars(pi*x*z+pi*x*y*z) == pi*x*z*(1+y)
assert separatevars(x*y**2*sin(x) + x*sin(x)*sin(y)) == x*(sin(y) + y**2)*sin(x)
assert separatevars(x*exp(x+y)+x*exp(x)) == x*(1 + exp(y))*exp(x)
assert separatevars((x*(y+1))**z).is_Pow # != x**z*(1 + y)**z
assert separatevars(1+x+y+x*y) == (x+1)*(y+1)
assert separatevars(y/pi*exp(-(z - x)/cos(n))) == y*exp(x/cos(n))*exp(-z/cos(n))/pi
assert separatevars((x + y)*(x - y) + y**2 + 2*x + 1) == (x + 1)**2
# 1759
p=Symbol('p',positive=True)
assert separatevars(sqrt(p**2 + x*p**2)) == p*sqrt(1 + x)
assert separatevars(sqrt(y*(p**2 + x*p**2))) == p*sqrt(y*(1 + x))
assert separatevars(sqrt(y*(p**2 + x*p**2)), force=True) == p*sqrt(y)*sqrt(1 + x)
# 1766
assert separatevars(sqrt(x*y)).is_Pow
assert separatevars(sqrt(x*y), force=True) == sqrt(x)*sqrt(y)
# 1858
# any type sequence for symbols is fine
assert separatevars(((2*x+2)*y), dict=True, symbols=()) == {'coeff': 1, x: 2*x + 2, y: y}
# separable
assert separatevars(((2*x+2)*y), dict=True, symbols=[x]) == {'coeff': y, x: 2*x + 2}
assert separatevars(((2*x+2)*y), dict=True, symbols=[]) == {'coeff': 1, x: 2*x + 2, y: y}
assert separatevars(((2*x+2)*y), dict=True) == {'coeff': 1, x: 2*x + 2, y: y}
assert separatevars(((2*x+2)*y), dict=True, symbols=None) == {'coeff': y*(2*x + 2)}
# not separable
assert separatevars(3, dict=True) is None
assert separatevars(2*x+y, dict=True, symbols=()) is None
assert separatevars(2*x+y, dict=True) is None
assert separatevars(2*x+y, dict=True, symbols=None) == {'coeff': 2*x + y}
# 1709
n, m = symbols('n,m', commutative=False)
assert separatevars(m + n*m) == (1 + n)*m
assert separatevars(x + x*n) == x*(1 + n)
# 1811
f = Function('f')
assert separatevars(f(x) + x*f(x)) == f(x) + x*f(x)
# a noncommutable object present
eq = x*(1 + hyper((), (), y*z))
assert separatevars(eq) == eq
def test_separatevars_advanced_factor():
x,y,z = symbols('x,y,z')
assert separatevars(1 + log(x)*log(y) + log(x) + log(y)) == (log(x) + 1)*(log(y) + 1)
assert separatevars(1 + x - log(z) - x*log(z) - exp(y)*log(z) - x*exp(y)*log(z) + x*exp(y) + exp(y)) == \
-((x + 1)*(log(z) - 1)*(exp(y) + 1))
x, y = symbols('x,y', positive=True)
assert separatevars(1 + log(x**log(y)) + log(x*y)) == (log(x) + 1)*(log(y) + 1)
def test_hypersimp():
n, k = symbols('n,k', integer=True)
assert hypersimp(factorial(k), k) == k + 1
assert hypersimp(factorial(k**2), k) is None
assert hypersimp(1/factorial(k), k) == 1/(k + 1)
assert hypersimp(2**k/factorial(k)**2, k) == 2/(k**2 + 2*k + 1)
assert hypersimp(binomial(n, k), k) == (n-k)/(k+1)
assert hypersimp(binomial(n+1, k), k) == (n-k+1)/(k+1)
term = (4*k+1)*factorial(k)/factorial(2*k+1)
assert hypersimp(term, k) == (S(1)/2)*((4*k + 5)/(3 + 14*k + 8*k**2))
term = 1/((2*k-1)*factorial(2*k+1))
assert hypersimp(term, k) == (2*k - 1)/(3 + 11*k + 12*k**2 + 4*k**3)/2
term = binomial(n, k)*(-1)**k/factorial(k)
assert hypersimp(term, k) == (k - n)/(k**2 + 2*k + 1)
def test_nsimplify():
x = Symbol("x")
assert nsimplify(0) == 0
assert nsimplify(-1) == -1
assert nsimplify(1) == 1
assert nsimplify(1 + x) == 1 + x
assert nsimplify(2.7) == Rational(27, 10)
assert nsimplify(1 - GoldenRatio) == (1 - sqrt(5))/2
assert nsimplify((1+sqrt(5))/4, [GoldenRatio]) == GoldenRatio/2
assert nsimplify(2/GoldenRatio, [GoldenRatio]) == 2*GoldenRatio - 2
assert nsimplify(exp(5*pi*I/3, evaluate=False)) == sympify('1/2 - sqrt(3)*I/2')
assert nsimplify(sin(3*pi/5, evaluate=False)) == sympify('sqrt(sqrt(5)/8 + 5/8)')
assert nsimplify(sqrt(atan('1', evaluate=False))*(2+I), [pi]) == sqrt(pi) + sqrt(pi)/2*I
assert nsimplify(2 + exp(2*atan('1/4')*I)) == sympify('49/17 + 8*I/17')
assert nsimplify(pi, tolerance=0.01) == Rational(22, 7)
assert nsimplify(pi, tolerance=0.001) == Rational(355, 113)
assert nsimplify(0.33333, tolerance=1e-4) == Rational(1, 3)
assert nsimplify(2.0**(1/3.), tolerance=0.001) == Rational(635, 504)
assert nsimplify(2.0**(1/3.), tolerance=0.001, full=True) == 2**Rational(1, 3)
assert nsimplify(x + .5, rational=True) == Rational(1, 2) + x
assert nsimplify(1/.3 + x, rational=True) == Rational(10, 3) + x
assert nsimplify(log(3).n(), rational=True) == \
sympify('109861228866811/100000000000000')
assert nsimplify(Float(0.272198261287950), [pi,log(2)]) == pi*log(2)/8
assert nsimplify(Float(0.272198261287950).n(3), [pi,log(2)]) == \
-pi/4 - log(2) + S(7)/4
assert nsimplify(x/7.0) == x/7
assert nsimplify(pi/1e2) == pi/100
assert nsimplify(pi/1e2, rational=False) == pi/100.0
assert nsimplify(pi/1e-7) == 10000000*pi
assert not nsimplify(factor(-3.0*z**2*(z**2)**(-2.5) + 3*(z**2)**(-1.5))).atoms(Float)
def test_extract_minus_sign():
x = Symbol("x")
y = Symbol("y")
a = Symbol("a")
b = Symbol("b")
assert simplify(-x/-y) == x/y
assert simplify(-x/y) == -x/y
assert simplify(x/y) == x/y
assert simplify(x/-y) == -x/y
assert simplify(-x/0) == -oo*x
assert simplify(S(-5)/0) == -oo
assert simplify(-a*x/(-y-b)) == a*x/(b + y)
def test_diff():
x = Symbol("x")
y = Symbol("y")
f = Function("f")
g = Function("g")
assert simplify(g(x).diff(x)*f(x).diff(x)-f(x).diff(x)*g(x).diff(x)) == 0
assert simplify(2*f(x)*f(x).diff(x)-diff(f(x)**2, x)) == 0
assert simplify(diff(1/f(x), x)+f(x).diff(x)/f(x)**2) == 0
assert simplify(f(x).diff(x, y)-f(x).diff(y, x)) == 0
def test_logcombine_1():
x, y = symbols("x,y")
a = Symbol("a")
z, w = symbols("z,w", positive=True)
b = Symbol("b", real=True)
assert logcombine(log(x)+2*log(y)) == log(x) + 2*log(y)
assert logcombine(log(x)+2*log(y), force=True) == log(x*y**2)
assert logcombine(a*log(w)+log(z)) == a*log(w) + log(z)
assert logcombine(b*log(z)+b*log(x)) == log(z**b) + b*log(x)
assert logcombine(b*log(z)-log(w)) == log(z**b/w)
assert logcombine(log(x)*log(z)) == log(x)*log(z)
assert logcombine(log(w)*log(x)) == log(w)*log(x)
assert logcombine(cos(-2*log(z)+b*log(w))) in [cos(log(w**b/z**2)),
cos(log(z**2/w**b))]
assert logcombine(log(log(x)-log(y))-log(z), force=True) == \
log(log((x/y)**(1/z)))
assert logcombine((2+I)*log(x), force=True) == I*log(x)+log(x**2)
assert logcombine((x**2+log(x)-log(y))/(x*y), force=True) == \
log(x**(1/(x*y))*y**(-1/(x*y)))+x/y
assert logcombine(log(x)*2*log(y)+log(z), force=True) == \
log(z*y**log(x**2))
assert logcombine((x*y+sqrt(x**4+y**4)+log(x)-log(y))/(pi*x**Rational(2, 3)*\
sqrt(y)**3), force=True) == \
log(x**(1/(pi*x**Rational(2, 3)*sqrt(y)**3))*y**(-1/(pi*\
x**Rational(2, 3)*sqrt(y)**3))) + sqrt(x**4 + y**4)/(pi*\
x**Rational(2, 3)*sqrt(y)**3) + x**Rational(1, 3)/(pi*sqrt(y))
assert logcombine(Eq(log(x), -2*log(y)), force=True) == \
Eq(log(x*y**2), Integer(0))
assert logcombine(Eq(y, x*acos(-log(x/y))), force=True) == \
Eq(y, x*acos(log(y/x)))
assert logcombine(gamma(-log(x/y))*acos(-log(x/y)), force=True) == \
acos(log(y/x))*gamma(log(y/x))
assert logcombine((2+3*I)*log(x), force=True) == \
log(x**2)+3*I*log(x)
assert logcombine(Eq(y, -log(x)), force=True) == Eq(y, log(1/x))
@XFAIL
def test_logcombine_complex_coeff():
# TODO: Make the expand() call in logcombine smart enough so that both
# these hold.
assert logcombine(Integral((sin(x**2)+cos(x**3))/x, x), force=True) == \
Integral((sin(x**2)+cos(x**3))/x, x)
assert logcombine(Integral((sin(x**2)+cos(x**3))/x, x)+ (2+3*I)*log(x), \
force=True) == log(x**2)+3*I*log(x) + \
Integral((sin(x**2)+cos(x**3))/x, x)
def test_posify():
from sympy.abc import x
assert str(posify(
x +
Symbol('p', positive=True) +
Symbol('n', negative=True))) == '(_x + n + p, {_x: x})'
# log(1/x).expand() should be log(1/x) but it comes back as -log(x)
# when it is corrected, posify will allow the change to be made. The
# force=True option can do so as well when it is implemented.
eq, rep = posify(1/x)
assert log(eq).expand().subs(rep) == -log(x)
assert str(posify([x, 1 + x])) == '([_x, _x + 1], {_x: x})'
x = symbols('x')
p = symbols('p', positive=True)
n = symbols('n', negative=True)
orig = [x, n, p]
modified, reps = posify(orig)
assert str(modified) == '[_x, n, p]'
assert [w.subs(reps) for w in modified] == orig
def test_powdenest():
from sympy import powdenest
from sympy.abc import x, y, z, a, b
p, q = symbols('p q', positive=True)
i, j = symbols('i,j', integer=True)
assert powdenest(x) == x
assert powdenest(x + 2*(x**(2*a/3))**(3*x)) == (x + 2*(x**(2*a/3))**(3*x))
assert powdenest((exp(2*a/3))**(3*x)) # -X-> (exp(a/3))**(6*x)
assert powdenest((x**(2*a/3))**(3*x)) == ((x**(2*a/3))**(3*x))
assert powdenest(exp(3*x*log(2))) == 2**(3*x)
assert powdenest(sqrt(p**2)) == p
i, j = symbols('i,j', integer=True)
eq = p**(2*i)*q**(4*i)
assert powdenest(eq) == (p*q**2)**(2*i)
assert powdenest((x**x)**(i + j)) # -X-> (x**x)**i*(x**x)**j == x**(x*(i + j))
assert powdenest(exp(3*y*log(x))) == x**(3*y)
assert powdenest(exp(y*(log(a) + log(b)))) == (a*b)**y
assert powdenest(exp(3*(log(a) + log(b)))) == a**3*b**3
assert powdenest(((x**(2*i))**(3*y))**x) == ((x**(2*i))**(3*y))**x
assert powdenest(((x**(2*i))**(3*y))**x, force=True) == x**(6*i*x*y)
assert powdenest(((x**(2*a/3))**(3*y/i))**x) == (((x**(2*a/3))**(3*y/i))**x)
assert powdenest((x**(2*i)*y**(4*i))**z, force=True) == (x*y**2)**(2*i*z)
assert powdenest((p**(2*i)*q**(4*i))**j) == (p*q**2)**(2*i*j)
assert powdenest(((p**(2*a))**(3*y))**x) == p**(6*a*x*y)
e = ((x**2*y**4)**a)**(x*y)
assert powdenest(e) == e
e = (((x**2*y**4)**a)**(x*y))**3
assert powdenest(e) == ((x**2*y**4)**a)**(3*x*y)
assert powdenest((((x**2*y**4)**a)**(x*y)), force=True) == (x*y**2)**(2*a*x*y)
assert powdenest((((x**2*y**4)**a)**(x*y))**3, force=True) == (x*y**2)**(6*a*x*y)
assert powdenest((x**2*y**6)**i) != (x*y**3)**(2*i)
x, y = symbols('x,y', positive=True)
assert powdenest((x**2*y**6)**i) == (x*y**3)**(2*i)
assert powdenest((x**(2*i/3)*y**(i/2))**(2*i)) == (x**(S(4)/3)*y)**(i**2)
assert powdenest(sqrt(x**(2*i)*y**(6*i))) == (x*y**3)**i
assert powdenest(4**x) == 2**(2*x)
assert powdenest((4**x)**y) == 2**(2*x*y)
assert powdenest(4**x*y) == 2**(2*x)*y
def test_powdenest_polar():
x, y, z = symbols('x y z', polar=True)
a, b, c = symbols('a b c')
assert powdenest((x*y*z)**a) == x**a*y**a*z**a
assert powdenest((x**a*y**b)**c) == x**(a*c)*y**(b*c)
assert powdenest(((x**a)**b*y**c)**c) == x**(a*b*c)*y**(c**2)
def test_issue_2706():
arg = ((gamma(x)*hyper((),(),x))*pi)**2
assert powdenest(arg) == (pi*gamma(x)*hyper((), (), x))**2
assert arg.is_positive is None
def test_issue_1095():
# simplify should call cancel
from sympy.abc import x, y
f = Function('f')
assert simplify((4*x+6*f(y))/(2*x+3*f(y))) == 2
@XFAIL
def test_simplify_float_vs_integer():
# Test for issue 1374:
# http://code.google.com/p/sympy/issues/detail?id=1374
assert simplify(x**2.0-x**2) == 0
assert simplify(x**2-x**2.0) == 0
def test_combsimp():
from sympy.abc import n, k
assert combsimp(factorial(n)) == factorial(n)
assert combsimp(binomial(n, k)) == binomial(n, k)
assert combsimp(factorial(n)/factorial(n - 3)) == n*(-1 + n)*(-2 + n)
assert combsimp(binomial(n + 1, k + 1)/binomial(n, k)) == (1 + n)/(1 + k)
assert combsimp(binomial(3*n + 4, n + 1)/binomial(3*n + 1, n)) == \
S(3)/2*((3*n + 2)*(3*n + 4)/((n + 1)*(2*n + 3)))
assert combsimp(factorial(n)**2/factorial(n - 3)) == factorial(n)*n*(-1 + n)*(-2 + n)
assert combsimp(factorial(n)*binomial(n+1, k+1)/binomial(n, k)) == factorial(n)*(1 + n)/(1 + k)
assert combsimp(binomial(n - 1, k)) == -((-n + k)*binomial(n, k))/n
assert combsimp(binomial(n + 2, k + S(1)/2)) == \
4*((n + 1)*(n + 2)*binomial(n, k + S(1)/2))/((2*k - 2*n - 1)*(2*k - 2*n - 3))
assert combsimp(binomial(n + 2, k + 2.0)) == \
-((1.0*n + 2.0)*binomial(n + 1.0, k + 2.0))/(k - n)
# coverage tests
assert combsimp(factorial(n*(1 + n) - n**2 - n)) == 1
assert combsimp(binomial(n + k - 2, n)) \
== k*(k - 1)*binomial(n + k, n)/((n + k)*(n + k - 1))
i = Symbol('i', integer=True)
e = gamma(i + 3)
assert combsimp(e) == e
e = gamma(exp(i))
assert combsimp(e) == e
e = gamma(n + S(1)/3)*gamma(n + S(2)/3)
assert combsimp(e) == e
assert combsimp(gamma(4*n + S(1)/2)/gamma(2*n - S(3)/4)) \
== 2**(4*n - S(5)/2)*(8*n - 3)*gamma(2*n + S(3)/4)/sqrt(pi)
assert combsimp(6*FallingFactorial(-4, n)/factorial(n)) == (-1)**n*(n + 1)*(n + 2)*(n + 3)
assert combsimp(6*FallingFactorial(-4, n - 1)/factorial(n - 1)) == (-1)**(n - 1)*n*(n + 1)*(n + 2)
assert combsimp(6*FallingFactorial(-4, n - 3)/factorial(n - 3)) == (-1)**(n - 3)*n*(n - 1)*(n - 2)
assert combsimp(6*FallingFactorial(-4, -n - 1)/factorial(-n - 1)) == -(-1)**(-n - 1)*n*(n - 1)*(n - 2)
assert combsimp(6*RisingFactorial(4, n)/factorial(n)) == (n + 1)*(n + 2)*(n + 3)
assert combsimp(6*RisingFactorial(4, n - 1)/factorial(n - 1)) == n*(n + 1)*(n + 2)
assert combsimp(6*RisingFactorial(4, n - 3)/factorial(n - 3)) == n*(n - 1)*(n - 2)
assert combsimp(6*RisingFactorial(4, -n - 1)/factorial(-n - 1)) == -n*(n - 1)*(n - 2)
def test_issue_2516():
aA, Re, a, b, D = symbols('aA Re a b D')
e=((D**3*a + b*aA**3)/Re).expand()
assert collect(e, [aA**3/Re, a]) == e
def test_issue_2629():
b = x*sqrt(y)
a = sqrt(b)
c = sqrt(sqrt(x)*y)
assert powsimp(a*b) == sqrt(b)**3
assert powsimp(a*b**2*sqrt(y)) == sqrt(y)*a**5
assert powsimp(a*x**2*c**3*y) == c**3*a**5
assert powsimp(a*x*c**3*y**2) == c**7*a
assert powsimp(x*c**3*y**2) == c**7
assert powsimp(x*c**3*y) == x*y*c**3
assert powsimp(sqrt(x)*c**3*y) == c**5
assert powsimp(sqrt(x)*a**3*sqrt(y)) == sqrt(x)*sqrt(y)*a**3
assert powsimp(Mul(sqrt(x)*c**3*sqrt(y), y, evaluate=False)) == sqrt(x)*sqrt(y)**3*c**3
assert powsimp(a**2*a*x**2*y) == a**7
# symbolic powers work, too
b = x**y*y
a = b*sqrt(b)
assert a.is_Mul is True
assert powsimp(a) == sqrt(b)**3
# as does exp
a = x*exp(2*y/3)
assert powsimp(a*sqrt(a)) == sqrt(a)**3
assert powsimp(a**2*sqrt(a)) == sqrt(a)**5
assert powsimp(a**2*sqrt(sqrt(a))) == sqrt(sqrt(a))**9
def test_as_content_primitive():
# although the _as_content_primitive methods do not alter the underlying structure,
# the as_content_primitive function will touch up the expression and join
# bases that would otherwise have not been joined.
assert ((x*(2 + 2*x)*(3*x + 3)**2)).as_content_primitive() ==\
(18, x*(x + 1)**3)
assert (2 + 2*x + 2*y*(3 + 3*y)).as_content_primitive() ==\
(2, x + 3*y*(y + 1) + 1)
assert ((2 + 6*x)**2).as_content_primitive() ==\
(4, (3*x + 1)**2)
assert ((2 + 6*x)**(2*y)).as_content_primitive() ==\
(1, (_keep_coeff(S(2), (3*x + 1)))**(2*y))
assert (5 + 10*x + 2*y*(3+3*y)).as_content_primitive() ==\
(1, 10*x + 6*y*(y + 1) + 5)
assert ((5*(x*(1 + y)) + 2*x*(3 + 3*y))).as_content_primitive() ==\
(11, x*(y + 1))
assert ((5*(x*(1 + y)) + 2*x*(3 + 3*y))**2).as_content_primitive() ==\
(121, x**2*(y + 1)**2)
assert (y**2).as_content_primitive() ==\
(1, y**2)
assert (S.Infinity).as_content_primitive() == (1, oo)
eq = x**(2+y)
assert (eq).as_content_primitive() == (1, eq)
assert (S.Half**(2 + x)).as_content_primitive() == (S(1)/4, 2**-x)
assert ((-S.Half)**(2 + x)).as_content_primitive() == \
(S(1)/4, (-S.Half)**x)
assert ((-S.Half)**(2 + x)).as_content_primitive() == \
(S(1)/4, (-S.Half)**x)
assert (4**((1 + y)/2)).as_content_primitive() == (2, 4**(y/2))
assert (3**((1 + y)/2)).as_content_primitive() == \
(1, 3**(Mul(S(1)/2, 1 + y, evaluate=False)))
assert (5**(S(3)/4)).as_content_primitive() == (1, 5**(S(3)/4))
assert (5**(S(7)/4)).as_content_primitive() == (5, 5**(S(3)/4))
assert Add(5*z/7, 0.5*x, 3*y/2, evaluate=False).as_content_primitive() == \
(S(1)/14, 7.0*x + 21*y + 10*z)
assert (2**(S(3)/4) + 2**(S(1)/4)*sqrt(3)).as_content_primitive(radical=True) == \
(1, 2**(S(1)/4)*(sqrt(2) + sqrt(3)))
def test_radsimp():
r2=sqrt(2)
r3=sqrt(3)
r5=sqrt(5)
r7=sqrt(7)
assert radsimp(1/r2) == \
sqrt(2)/2
assert radsimp(1/(1 + r2)) == \
-1 + sqrt(2)
assert radsimp(1/(r2 + r3)) == \
-sqrt(2) + sqrt(3)
assert fraction(radsimp(1/(1 + r2 + r3))) == \
(-sqrt(6) + sqrt(2) + 2, 4)
assert fraction(radsimp(1/(r2 + r3 + r5))) == \
(-sqrt(30) + 2*sqrt(3) + 3*sqrt(2), 12)
assert fraction(radsimp(1/(1 + r2 + r3 + r5))) == \
(-34*sqrt(10) -
26*sqrt(15) -
55*sqrt(3) -
61*sqrt(2) +
14*sqrt(30) +
93 +
46*sqrt(6) +
53*sqrt(5), 71)
assert fraction(radsimp(1/(r2 + r3 + r5 + r7))) == \
(-50*sqrt(42) - 133*sqrt(5) - 34*sqrt(70) -
145*sqrt(3) + 22*sqrt(105) + 185*sqrt(2) +
62*sqrt(30) + 135*sqrt(7), 215)
z = radsimp(1/(1 + r2/3 + r3/5 + r5 + r7))
assert len((3616791619821680643598*z).args) == 16
assert radsimp(1/z) == 1/z
assert radsimp(1/z, max_terms=20).expand() == 1 + r2/3 + r3/5 + r5 + r7
assert radsimp(1/(r2*3)) == \
sqrt(2)/6
assert radsimp(1/(r2*a + r3 + r5 + r7)) == 1/(r2*a + r3 + r5 + r7)
assert radsimp(1/(r2*a + r2*b + r3 + r7)) == \
((sqrt(42)*(a + b) +
sqrt(3)*(-a**2 - 2*a*b - b**2 - 2) +
sqrt(7)*(-a**2 - 2*a*b - b**2 + 2) +
sqrt(2)*(a**3 + 3*a**2*b + 3*a*b**2 - 5*a + b**3 - 5*b))/
((a**4 + 4*a**3*b + 6*a**2*b**2 - 10*a**2 +
4*a*b**3 - 20*a*b + b**4 - 10*b**2 + 4)))/2
assert radsimp(1/(r2*a + r2*b + r2*c + r2*d)) == \
(sqrt(2)/(a + b + c + d))/2
assert radsimp(1/(1 + r2*a + r2*b + r2*c + r2*d)) == \
((sqrt(2)*(-a - b - c - d) + 1)/
(-2*a**2 - 4*a*b - 4*a*c - 4*a*d - 2*b**2 -
4*b*c - 4*b*d - 2*c**2 - 4*c*d - 2*d**2 + 1))
assert radsimp((y**2 - x)/(y - sqrt(x))) == \
sqrt(x) + y
assert radsimp(-(y**2 - x)/(y - sqrt(x))) == \
-(sqrt(x) + y)
assert radsimp(1/(1 - I + a*I)) == \
(I*(-a + 1) + 1)/(a**2 - 2*a + 2)
assert radsimp(1/((-x + y)*(x - sqrt(y)))) == (x + sqrt(y))/((-x + y)*(x**2 - y))
e = (3 + 3*sqrt(2))*x*(3*x - 3*sqrt(y))
assert radsimp(e) == 9*x*(1 + sqrt(2))*(x - sqrt(y))
assert radsimp(1/e) == (-1 + sqrt(2))*(x + sqrt(y))/(9*x*(x**2 - y))
assert radsimp(1 + 1/(1 + sqrt(3))) == Mul(S(1)/2, 1 + sqrt(3), evaluate=False)
A = symbols("A", commutative=False)
assert radsimp(x**2 + sqrt(2)*x**2 - sqrt(2)*x*A) == x**2 + sqrt(2)*(x**2 - x*A)
assert radsimp(1/sqrt(5 + 2 * sqrt(6))) == -sqrt(2) + sqrt(3)
assert radsimp(1/sqrt(5 + 2 * sqrt(6))**3) == -11*sqrt(2) + 9*sqrt(3)
# coverage not provided by above tests
assert collect_const(2*sqrt(3) + 4*a*sqrt(5)) == Mul(2, (2*sqrt(5)*a + sqrt(3)), evaluate=False)
assert collect_const(2*sqrt(3) + 4*a*sqrt(5), sqrt(3)) == 2*(2*sqrt(5)*a + sqrt(3))
assert collect_const(sqrt(2)*(1 + sqrt(2)) + sqrt(3) + x*sqrt(2)) == \
sqrt(2)*(x + 1 + sqrt(2)) + sqrt(3)
def test_issue2834():
from sympy import Polygon, RegularPolygon, denom
x = Polygon(*RegularPolygon((0, 0), 1, 5).vertices).centroid.x
assert abs(denom(x).n()) > 1e-12
assert abs(denom(radsimp(x))) > 1e-12 # in case simplify didn't handle it
def test_fraction_expand():
eq = (x + y)*y/x
assert eq.expand(frac=True) == fraction_expand(eq) == (x*y + y**2)/x
assert eq.expand() == y + y**2/x
def test_combsimp_gamma():
from sympy.abc import x, y
assert combsimp(gamma(x)) == gamma(x)
assert combsimp(gamma(x+1)/x) == gamma(x)
assert combsimp(gamma(x)/(x-1)) == gamma(x-1)
assert combsimp(x*gamma(x)) == gamma(x + 1)
assert combsimp((x+1)*gamma(x+1)) == gamma(x + 2)
assert combsimp(gamma(x+y)*(x+y)) == gamma(x + y + 1)
assert combsimp(x/gamma(x+1)) == 1/gamma(x)
assert combsimp((x+1)**2/gamma(x+2)) == (x + 1)/gamma(x + 1)
assert combsimp(x*gamma(x) + gamma(x+3)/(x+2)) == gamma(x+1) + gamma(x+2)
assert combsimp(gamma(2*x)*x) == gamma(2*x + 1)/2
assert combsimp(gamma(2*x)/(x - S(1)/2)) == 2*gamma(2*x - 1)
assert combsimp(gamma(x)*gamma(1-x)) == pi/sin(pi*x)
assert combsimp(gamma(x)*gamma(-x)) == -pi/(x*sin(pi*x))
assert combsimp(1/gamma(x+3)/gamma(1-x)) == sin(pi*x)/(pi*x*(x+1)*(x+2))
assert simplify(combsimp(gamma(x)*gamma(x+S(1)/2)*gamma(y)/gamma(x+y))) \
== 2*4**-x*sqrt(pi)*gamma(2*x)*gamma(y)/gamma(x + y)
assert combsimp(1/gamma(x)/gamma(x-S(1)/3)/gamma(x+S(1)/3)) \
== 3**(3*x - S(3)/2)/(2*pi*gamma(3*x - 1))
assert simplify(gamma(S(1)/2 + x/2)*gamma(1 + x/2)/gamma(1+x)/sqrt(pi)*2**x) \
== 1
assert combsimp(gamma(S(-1)/4)*gamma(S(-3)/4)) == 16*sqrt(2)*pi/3
assert simplify(combsimp(gamma(2*x)/gamma(x))) == \
4**x*gamma(x + S(1)/2)/sqrt(pi)/2
def test_polarify():
from sympy import polar_lift, polarify
x = Symbol('x')
z = Symbol('z', polar=True)
f = Function('f')
ES = {}
assert polarify(-1) == (polar_lift(-1), ES)
assert polarify(1 + I) == (polar_lift(1 + I), ES)
assert polarify(exp(x), subs=False) == exp(x)
assert polarify(1 + x, subs=False) == 1 + x
assert polarify(f(I) + x, subs=False) == f(polar_lift(I)) + x
assert polarify(x, lift=True) == polar_lift(x)
assert polarify(z, lift=True) == z
assert polarify(f(x), lift=True) == f(polar_lift(x))
assert polarify(1 + x, lift=True) == polar_lift(1 + x)
assert polarify(1 + f(x), lift=True) == polar_lift(1 + f(polar_lift(x)))
newex, subs = polarify(f(x) + z)
assert newex.subs(subs) == f(x) + z
mu = Symbol("mu")
sigma = Symbol("sigma", positive=True)
# Make sure polarify(lift=True) doesn't try to lift the integration
# variable
assert polarify(Integral(sqrt(2)*x*exp(-(-mu + x)**2/(2*sigma**2))/(2*sqrt(pi)*sigma),
(x, -oo, oo)), lift=True) == Integral(sqrt(2)*(sigma*exp_polar(0))**exp_polar(I*pi)*
exp((sigma*exp_polar(0))**(2*exp_polar(I*pi))*exp_polar(I*pi)*polar_lift(-mu + x)**
(2*exp_polar(0))/2)*exp_polar(0)*polar_lift(x)/(2*sqrt(pi)), (x, -oo, oo))
def test_unpolarify():
from sympy import (exp_polar, polar_lift, exp, unpolarify, sin,
principal_branch)
from sympy import gamma, erf, sin, tanh, uppergamma, Eq, Ne
from sympy.abc import x
p = exp_polar(7*I) + 1
u = exp(7*I) + 1
assert unpolarify(1) == 1
assert unpolarify(p) == u
assert unpolarify(p**2) == u**2
assert unpolarify(p**x) == p**x
assert unpolarify(p*x) == u*x
assert unpolarify(p + x) == u + x
assert unpolarify(sqrt(sin(p))) == sqrt(sin(u))
# Test reduction to principal branch 2*pi.
t = principal_branch(x, 2*pi)
assert unpolarify(t) == x
assert unpolarify(sqrt(t)) == sqrt(t)
# Test exponents_only.
assert unpolarify(p**p, exponents_only=True) == p**u
assert unpolarify(uppergamma(x, p**p)) == uppergamma(x, p**u)
# Test functions.
assert unpolarify(sin(p)) == sin(u)
assert unpolarify(tanh(p)) == tanh(u)
assert unpolarify(gamma(p)) == gamma(u)
assert unpolarify(erf(p)) == erf(u)
assert unpolarify(uppergamma(x, p)) == uppergamma(x, p)
assert unpolarify(uppergamma(sin(p), sin(p + exp_polar(0)))) == \
uppergamma(sin(u), sin(u + 1))
assert unpolarify(uppergamma(polar_lift(0), 2*exp_polar(0))) == uppergamma(0, 2)
assert unpolarify(Eq(p, 0)) == Eq(u, 0)
assert unpolarify(Ne(p, 0)) == Ne(u, 0)
assert unpolarify(polar_lift(x) > 0) == (x > 0)
# Test bools
assert unpolarify(True) is True
def test_issue_2998():
collect(a*y**(2.0*x)+b*y**(2.0*x),y**(x)) == y**(2.0*x)*(a + b)
collect(a*2**(2.0*x)+b*2**(2.0*x),2**(x)) == 2**(2.0*x)*(a + b)
def test_signsimp():
e = x*(-x + 1) + x*(x - 1)
assert signsimp(Eq(e, 0)) == True
def test_besselsimp():
from sympy import besselj, besseli, besselk, bessely, jn, yn, exp_polar, cosh
assert besselsimp(exp(-I*pi*y/2)*besseli(y, z*exp_polar(I*pi/2))) == \
besselj(y, z)
assert besselsimp(exp(-I*pi*a/2)*besseli(a, 2*sqrt(x)*exp_polar(I*pi/2))) == \
besselj(a, 2*sqrt(x))
assert besselsimp(sqrt(2)*sqrt(pi)*x**(S(1)/4)*exp(I*pi/4)*exp(-I*pi*a/2) * \
besseli(-S(1)/2, sqrt(x)*exp_polar(I*pi/2)) * \
besseli(a, sqrt(x)*exp_polar(I*pi/2))/2) == \
besselj(a, sqrt(x)) * cos(sqrt(x))
assert besselsimp(besseli(S(-1)/2, z)) == sqrt(2)*cosh(z)/(sqrt(pi)*sqrt(z))
assert besselsimp(besseli(a, z*exp_polar(-I*pi/2))) == exp(-I*pi*a/2)*besselj(a, z)
def test_Piecewise():
e1 = x*(x + y) - y*(x + y)
e2 = sin(x)**2 + cos(x)**2
e3 = expand((x + y)*y/x)
s1 = simplify(e1)
s2 = simplify(e2)
s3 = simplify(e3)
assert simplify(Piecewise((e1, x < e2), (e3, True))) \
== Piecewise((s1, x < s2), (s3, True))
| 41.430721 | 121 | 0.522254 |
2a2c29a44e251792ab0a4397e9bbbabd460b52e9 | 2,744 | py | Python | python/taichi/lang/expr.py | lin-hitonami/taichi | da2fa48f4ff8e9ead8ef7246a506bff597fe3a36 | [
"MIT"
] | null | null | null | python/taichi/lang/expr.py | lin-hitonami/taichi | da2fa48f4ff8e9ead8ef7246a506bff597fe3a36 | [
"MIT"
] | null | null | null | python/taichi/lang/expr.py | lin-hitonami/taichi | da2fa48f4ff8e9ead8ef7246a506bff597fe3a36 | [
"MIT"
] | null | null | null | import sys
import traceback
import numpy as np
from taichi.core.util import ti_core as _ti_core
from taichi.lang import impl
from taichi.lang.common_ops import TaichiOperations
from taichi.lang.util import is_taichi_class, python_scope
import taichi as ti
# Scalar, basic data type
class Expr(TaichiOperations):
"""A Python-side Expr wrapper, whose member variable `ptr` is an instance of C++ Expr class. A C++ Expr object contains member variable `expr` which holds an instance of C++ Expression class."""
def __init__(self, *args, tb=None):
_taichi_skip_traceback = 1
self.tb = tb
if len(args) == 1:
if isinstance(args[0], _ti_core.Expr):
self.ptr = args[0]
elif isinstance(args[0], Expr):
self.ptr = args[0].ptr
self.tb = args[0].tb
elif is_taichi_class(args[0]):
raise ValueError('cannot initialize scalar expression from '
f'taichi class: {type(args[0])}')
else:
# assume to be constant
arg = args[0]
try:
if isinstance(arg, np.ndarray):
arg = arg.dtype(arg)
except:
pass
self.ptr = impl.make_constant_expr(arg).ptr
else:
assert False
if self.tb:
self.ptr.set_tb(self.tb)
try:
self.ptr.type_check()
except RuntimeError as e:
if str(e).startswith('TypeError: '):
s = traceback.extract_stack()
for i, l in enumerate(s):
if 'taichi_ast_generator' in l:
s = s[i + 1:]
break
print('[Taichi] Compilation failed', file=sys.stderr)
print(traceback.format_list(s[:1])[0], end='', file=sys.stderr)
print(f'TaichiTypeError: {str(e)[11:]}', file=sys.stderr)
sys.exit(1)
raise e
def __hash__(self):
return self.ptr.get_raw_address()
def __str__(self):
return '<ti.Expr>'
def __repr__(self):
return '<ti.Expr>'
def make_var_list(size):
exprs = []
for _ in range(size):
exprs.append(_ti_core.make_id_expr(''))
return exprs
def make_expr_group(*exprs):
if len(exprs) == 1:
if isinstance(exprs[0], (list, tuple)):
exprs = exprs[0]
elif isinstance(exprs[0], ti.Matrix):
mat = exprs[0]
assert mat.m == 1
exprs = mat.entries
expr_group = _ti_core.ExprGroup()
for i in exprs:
expr_group.push_back(Expr(i).ptr)
return expr_group
| 32.282353 | 198 | 0.543003 |
e059b06db97555065f04a240bca55dba85d56738 | 5,441 | py | Python | openml/_api_calls.py | MichaelMMeskhi/openml-python | 6792015e03f3e31130c4cd00035a2f8f69384d59 | [
"BSD-3-Clause"
] | 1 | 2019-05-07T21:57:08.000Z | 2019-05-07T21:57:08.000Z | openml/_api_calls.py | MichaelMMeskhi/openml-python | 6792015e03f3e31130c4cd00035a2f8f69384d59 | [
"BSD-3-Clause"
] | null | null | null | openml/_api_calls.py | MichaelMMeskhi/openml-python | 6792015e03f3e31130c4cd00035a2f8f69384d59 | [
"BSD-3-Clause"
] | null | null | null | import time
import requests
import warnings
import xmltodict
from . import config
from .exceptions import (OpenMLServerError, OpenMLServerException,
OpenMLServerNoResult)
def _perform_api_call(call, request_method, data=None, file_elements=None):
"""
Perform an API call at the OpenML server.
Parameters
----------
call : str
The API call. For example data/list
request_method : str
The HTTP request method to perform the API call with. Legal values:
- get (reading functions, api key optional)
- post (writing functions, generaly require api key)
- delete (deleting functions, require api key)
See REST api documentation which request method is applicable.
data : dict
Dictionary with post-request payload.
file_elements : dict
Mapping of {filename: str} of strings which should be uploaded as
files to the server.
Returns
-------
return_code : int
HTTP return code
return_value : str
Return value of the OpenML server
"""
url = config.server
if not url.endswith("/"):
url += "/"
url += call
url = url.replace('=', '%3d')
if file_elements is not None:
if request_method != 'post':
raise ValueError('request method must be post when file elements '
'are present')
return _read_url_files(url, data=data, file_elements=file_elements)
return _read_url(url, request_method, data)
def _file_id_to_url(file_id, filename=None):
"""
Presents the URL how to download a given file id
filename is optional
"""
openml_url = config.server.split('/api/')
url = openml_url[0] + '/data/download/%s' % file_id
if filename is not None:
url += '/' + filename
return url
def _read_url_files(url, data=None, file_elements=None):
"""do a post request to url with data
and sending file_elements as files"""
data = {} if data is None else data
data['api_key'] = config.apikey
if file_elements is None:
file_elements = {}
# Using requests.post sets header 'Accept-encoding' automatically to
# 'gzip,deflate'
response = send_request(
request_method='post',
url=url,
data=data,
files=file_elements,
)
if response.status_code != 200:
raise _parse_server_exception(response, url=url)
if 'Content-Encoding' not in response.headers or \
response.headers['Content-Encoding'] != 'gzip':
warnings.warn('Received uncompressed content from OpenML for {}.'
.format(url))
return response.text
def _read_url(url, request_method, data=None):
data = {} if data is None else data
if config.apikey is not None:
data['api_key'] = config.apikey
response = send_request(request_method=request_method, url=url, data=data)
if response.status_code != 200:
raise _parse_server_exception(response, url=url)
if 'Content-Encoding' not in response.headers or \
response.headers['Content-Encoding'] != 'gzip':
warnings.warn('Received uncompressed content from OpenML for {}.'
.format(url))
return response.text
def send_request(
request_method,
url,
data,
files=None,
):
n_retries = config.connection_n_retries
response = None
with requests.Session() as session:
# Start at one to have a non-zero multiplier for the sleep
for i in range(1, n_retries + 1):
try:
if request_method == 'get':
response = session.get(url, params=data)
elif request_method == 'delete':
response = session.delete(url, params=data)
elif request_method == 'post':
response = session.post(url, data=data, files=files)
else:
raise NotImplementedError()
break
except (
requests.exceptions.ConnectionError,
requests.exceptions.SSLError,
) as e:
if i == n_retries:
raise e
else:
time.sleep(0.1 * i)
if response is None:
raise ValueError('This should never happen!')
return response
def _parse_server_exception(response, url=None):
# OpenML has a sophisticated error system
# where information about failures is provided. try to parse this
try:
server_exception = xmltodict.parse(response.text)
except Exception:
raise OpenMLServerError(
'Unexpected server error. Please contact the developers!\n'
'Status code: {}\n{}'.format(response.status_code, response.text))
server_error = server_exception['oml:error']
code = int(server_error['oml:code'])
message = server_error['oml:message']
additional_information = server_error.get('oml:additional_information')
if code in [372, 512, 500, 482, 542, 674]:
# 512 for runs, 372 for datasets, 500 for flows
# 482 for tasks, 542 for evaluations, 674 for setups
return OpenMLServerNoResult(code, message, additional_information)
return OpenMLServerException(
code=code,
message=message,
additional=additional_information,
url=url
)
| 33.176829 | 78 | 0.619923 |
ab8c9a9a876b8e8143826b4d99df70a690cd81dd | 6,534 | py | Python | tests/components/shelly/test_cover.py | mtarjoianu/core | 44e9146463ac505eb3d1c0651ad126cb25c28a54 | [
"Apache-2.0"
] | 30,023 | 2016-04-13T10:17:53.000Z | 2020-03-02T12:56:31.000Z | tests/components/shelly/test_cover.py | mtarjoianu/core | 44e9146463ac505eb3d1c0651ad126cb25c28a54 | [
"Apache-2.0"
] | 24,710 | 2016-04-13T08:27:26.000Z | 2020-03-02T12:59:13.000Z | tests/components/shelly/test_cover.py | mtarjoianu/core | 44e9146463ac505eb3d1c0651ad126cb25c28a54 | [
"Apache-2.0"
] | 11,956 | 2016-04-13T18:42:31.000Z | 2020-03-02T09:32:12.000Z | """The scene tests for the myq platform."""
from homeassistant.components.cover import (
ATTR_CURRENT_POSITION,
ATTR_POSITION,
DOMAIN as COVER_DOMAIN,
SERVICE_CLOSE_COVER,
SERVICE_OPEN_COVER,
SERVICE_SET_COVER_POSITION,
SERVICE_STOP_COVER,
STATE_CLOSED,
STATE_CLOSING,
STATE_OPEN,
STATE_OPENING,
)
from homeassistant.const import ATTR_ENTITY_ID
from homeassistant.helpers.entity_component import async_update_entity
ROLLER_BLOCK_ID = 1
async def test_block_device_services(hass, coap_wrapper, monkeypatch):
"""Test block device cover services."""
assert coap_wrapper
monkeypatch.setitem(coap_wrapper.device.settings, "mode", "roller")
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(coap_wrapper.entry, COVER_DOMAIN)
)
await hass.async_block_till_done()
await hass.services.async_call(
COVER_DOMAIN,
SERVICE_SET_COVER_POSITION,
{ATTR_ENTITY_ID: "cover.test_name", ATTR_POSITION: 50},
blocking=True,
)
state = hass.states.get("cover.test_name")
assert state.attributes[ATTR_CURRENT_POSITION] == 50
await hass.services.async_call(
COVER_DOMAIN,
SERVICE_OPEN_COVER,
{ATTR_ENTITY_ID: "cover.test_name"},
blocking=True,
)
assert hass.states.get("cover.test_name").state == STATE_OPENING
await hass.services.async_call(
COVER_DOMAIN,
SERVICE_CLOSE_COVER,
{ATTR_ENTITY_ID: "cover.test_name"},
blocking=True,
)
assert hass.states.get("cover.test_name").state == STATE_CLOSING
await hass.services.async_call(
COVER_DOMAIN,
SERVICE_STOP_COVER,
{ATTR_ENTITY_ID: "cover.test_name"},
blocking=True,
)
assert hass.states.get("cover.test_name").state == STATE_CLOSED
async def test_block_device_update(hass, coap_wrapper, monkeypatch):
"""Test block device update."""
assert coap_wrapper
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(coap_wrapper.entry, COVER_DOMAIN)
)
await hass.async_block_till_done()
monkeypatch.setattr(coap_wrapper.device.blocks[ROLLER_BLOCK_ID], "rollerPos", 0)
await async_update_entity(hass, "cover.test_name")
await hass.async_block_till_done()
assert hass.states.get("cover.test_name").state == STATE_CLOSED
monkeypatch.setattr(coap_wrapper.device.blocks[ROLLER_BLOCK_ID], "rollerPos", 100)
await async_update_entity(hass, "cover.test_name")
await hass.async_block_till_done()
assert hass.states.get("cover.test_name").state == STATE_OPEN
async def test_block_device_no_roller_blocks(hass, coap_wrapper, monkeypatch):
"""Test block device without roller blocks."""
assert coap_wrapper
monkeypatch.setattr(coap_wrapper.device.blocks[ROLLER_BLOCK_ID], "type", None)
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(coap_wrapper.entry, COVER_DOMAIN)
)
await hass.async_block_till_done()
assert hass.states.get("cover.test_name") is None
async def test_rpc_device_services(hass, rpc_wrapper, monkeypatch):
"""Test RPC device cover services."""
assert rpc_wrapper
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(rpc_wrapper.entry, COVER_DOMAIN)
)
await hass.async_block_till_done()
await hass.services.async_call(
COVER_DOMAIN,
SERVICE_SET_COVER_POSITION,
{ATTR_ENTITY_ID: "cover.test_cover_0", ATTR_POSITION: 50},
blocking=True,
)
state = hass.states.get("cover.test_cover_0")
assert state.attributes[ATTR_CURRENT_POSITION] == 50
monkeypatch.setitem(rpc_wrapper.device.status["cover:0"], "state", "opening")
await hass.services.async_call(
COVER_DOMAIN,
SERVICE_OPEN_COVER,
{ATTR_ENTITY_ID: "cover.test_cover_0"},
blocking=True,
)
rpc_wrapper.async_set_updated_data("")
assert hass.states.get("cover.test_cover_0").state == STATE_OPENING
monkeypatch.setitem(rpc_wrapper.device.status["cover:0"], "state", "closing")
await hass.services.async_call(
COVER_DOMAIN,
SERVICE_CLOSE_COVER,
{ATTR_ENTITY_ID: "cover.test_cover_0"},
blocking=True,
)
rpc_wrapper.async_set_updated_data("")
assert hass.states.get("cover.test_cover_0").state == STATE_CLOSING
monkeypatch.setitem(rpc_wrapper.device.status["cover:0"], "state", "closed")
await hass.services.async_call(
COVER_DOMAIN,
SERVICE_STOP_COVER,
{ATTR_ENTITY_ID: "cover.test_cover_0"},
blocking=True,
)
rpc_wrapper.async_set_updated_data("")
assert hass.states.get("cover.test_cover_0").state == STATE_CLOSED
async def test_rpc_device_no_cover_keys(hass, rpc_wrapper, monkeypatch):
"""Test RPC device without cover keys."""
assert rpc_wrapper
monkeypatch.delitem(rpc_wrapper.device.status, "cover:0")
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(rpc_wrapper.entry, COVER_DOMAIN)
)
await hass.async_block_till_done()
assert hass.states.get("cover.test_cover_0") is None
async def test_rpc_device_update(hass, rpc_wrapper, monkeypatch):
"""Test RPC device update."""
assert rpc_wrapper
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(rpc_wrapper.entry, COVER_DOMAIN)
)
await hass.async_block_till_done()
monkeypatch.setitem(rpc_wrapper.device.status["cover:0"], "state", "closed")
await async_update_entity(hass, "cover.test_cover_0")
await hass.async_block_till_done()
assert hass.states.get("cover.test_cover_0").state == STATE_CLOSED
monkeypatch.setitem(rpc_wrapper.device.status["cover:0"], "state", "open")
await async_update_entity(hass, "cover.test_cover_0")
await hass.async_block_till_done()
assert hass.states.get("cover.test_cover_0").state == STATE_OPEN
async def test_rpc_device_no_position_control(hass, rpc_wrapper, monkeypatch):
"""Test RPC device with no position control."""
assert rpc_wrapper
monkeypatch.setitem(rpc_wrapper.device.status["cover:0"], "pos_control", False)
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(rpc_wrapper.entry, COVER_DOMAIN)
)
await hass.async_block_till_done()
await async_update_entity(hass, "cover.test_cover_0")
await hass.async_block_till_done()
assert hass.states.get("cover.test_cover_0").state == STATE_OPEN
| 33.854922 | 87 | 0.725436 |
be1a3eb93e63eb416be28076522073d0e95cb71b | 35,605 | py | Python | classifier_utils.py | ycechungAI/ALBERT | 9196d09715b96e8f27a4f1ba2b0d42f3a514e945 | [
"Apache-2.0"
] | 1 | 2020-12-06T08:47:10.000Z | 2020-12-06T08:47:10.000Z | classifier_utils.py | ycechungAI/ALBERT | 9196d09715b96e8f27a4f1ba2b0d42f3a514e945 | [
"Apache-2.0"
] | null | null | null | classifier_utils.py | ycechungAI/ALBERT | 9196d09715b96e8f27a4f1ba2b0d42f3a514e945 | [
"Apache-2.0"
] | 1 | 2021-03-31T06:12:39.000Z | 2021-03-31T06:12:39.000Z | # coding=utf-8
# Copyright 2018 The Google AI Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for GLUE classification tasks."""
from __future__ import absolute_import
from __future__ import division
# from __future__ import google_type_annotations
from __future__ import print_function
import collections
import csv
import os
from albert import fine_tuning_utils
from albert import modeling
from albert import optimization
from albert import tokenization
import tensorflow.compat.v1 as tf
from tensorflow.contrib import data as contrib_data
from tensorflow.contrib import metrics as contrib_metrics
from tensorflow.contrib import tpu as contrib_tpu
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class PaddingInputExample(object):
"""Fake example so the num input examples is a multiple of the batch size.
When running eval/predict on the TPU, we need to pad the number of examples
to be a multiple of the batch size, because the TPU requires a fixed batch
size. The alternative is to drop the last batch, which is bad because it means
the entire output data won't be generated.
We use this class instead of `None` because treating `None` as padding
battches could cause silent errors.
"""
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self,
input_ids,
input_mask,
segment_ids,
label_id,
guid=None,
example_id=None,
is_real_example=True):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
self.example_id = example_id
self.guid = guid
self.is_real_example = is_real_example
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def __init__(self, use_spm, do_lower_case):
super(DataProcessor, self).__init__()
self.use_spm = use_spm
self.do_lower_case = do_lower_case
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_test_examples(self, data_dir):
"""Gets a collection of `InputExample`s for prediction."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with tf.gfile.Open(input_file, "r") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
lines.append(line)
return lines
def process_text(self, text):
if self.use_spm:
return tokenization.preprocess_text(text, lower=self.do_lower_case)
else:
return tokenization.convert_to_unicode(text)
class MnliProcessor(DataProcessor):
"""Processor for the MultiNLI data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "MNLI", "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "MNLI", "dev_matched.tsv")),
"dev_matched")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "MNLI", "test_matched.tsv")),
"test")
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
# Note(mingdachen): We will rely on this guid for GLUE submission.
guid = self.process_text(line[0])
text_a = self.process_text(line[8])
text_b = self.process_text(line[9])
if set_type == "test":
label = "contradiction"
else:
label = self.process_text(line[-1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class MisMnliProcessor(MnliProcessor):
"""Processor for the Mismatched MultiNLI data set (GLUE version)."""
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "MNLI", "dev_mismatched.tsv")),
"dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "MNLI", "test_mismatched.tsv")),
"test")
class MrpcProcessor(DataProcessor):
"""Processor for the MRPC data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "MRPC", "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "MRPC", "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "MRPC", "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = self.process_text(line[3])
text_b = self.process_text(line[4])
if set_type == "test":
guid = line[0]
label = "0"
else:
label = self.process_text(line[0])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class ColaProcessor(DataProcessor):
"""Processor for the CoLA data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "CoLA", "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "CoLA", "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "CoLA", "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
# Only the test set has a header
if set_type == "test" and i == 0:
continue
guid = "%s-%s" % (set_type, i)
if set_type == "test":
guid = line[0]
text_a = self.process_text(line[1])
label = "0"
else:
text_a = self.process_text(line[3])
label = self.process_text(line[1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
class Sst2Processor(DataProcessor):
"""Processor for the SST-2 data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "SST-2", "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "SST-2", "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "SST-2", "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
if set_type != "test":
guid = "%s-%s" % (set_type, i)
text_a = self.process_text(line[0])
label = self.process_text(line[1])
else:
guid = self.process_text(line[0])
# guid = "%s-%s" % (set_type, line[0])
text_a = self.process_text(line[1])
label = "0"
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
class StsbProcessor(DataProcessor):
"""Processor for the STS-B data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "STS-B", "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "STS-B", "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "STS-B", "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return [None]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = self.process_text(line[0])
# guid = "%s-%s" % (set_type, line[0])
text_a = self.process_text(line[7])
text_b = self.process_text(line[8])
if set_type != "test":
label = float(line[-1])
else:
label = 0
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class QqpProcessor(DataProcessor):
"""Processor for the QQP data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "QQP", "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "QQP", "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "QQP", "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = line[0]
# guid = "%s-%s" % (set_type, line[0])
if set_type != "test":
try:
text_a = self.process_text(line[3])
text_b = self.process_text(line[4])
label = self.process_text(line[5])
except IndexError:
continue
else:
text_a = self.process_text(line[1])
text_b = self.process_text(line[2])
label = "0"
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class QnliProcessor(DataProcessor):
"""Processor for the QNLI data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "QNLI", "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "QNLI", "dev.tsv")),
"dev_matched")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "QNLI", "test.tsv")),
"test_matched")
def get_labels(self):
"""See base class."""
return ["entailment", "not_entailment"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = self.process_text(line[0])
# guid = "%s-%s" % (set_type, line[0])
text_a = self.process_text(line[1])
text_b = self.process_text(line[2])
if set_type == "test_matched":
label = "entailment"
else:
label = self.process_text(line[-1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class RteProcessor(DataProcessor):
"""Processor for the RTE data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "RTE", "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "RTE", "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "RTE", "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["entailment", "not_entailment"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = self.process_text(line[0])
# guid = "%s-%s" % (set_type, line[0])
text_a = self.process_text(line[1])
text_b = self.process_text(line[2])
if set_type == "test":
label = "entailment"
else:
label = self.process_text(line[-1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class WnliProcessor(DataProcessor):
"""Processor for the WNLI data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "WNLI", "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "WNLI", "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "WNLI", "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = self.process_text(line[0])
# guid = "%s-%s" % (set_type, line[0])
text_a = self.process_text(line[1])
text_b = self.process_text(line[2])
if set_type != "test":
label = self.process_text(line[-1])
else:
label = "0"
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class AXProcessor(DataProcessor):
"""Processor for the AX data set (GLUE version)."""
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "diagnostic", "diagnostic.tsv")),
"test")
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
# Note(mingdachen): We will rely on this guid for GLUE submission.
guid = self.process_text(line[0])
text_a = self.process_text(line[1])
text_b = self.process_text(line[2])
if set_type == "test":
label = "contradiction"
else:
label = self.process_text(line[-1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def convert_single_example(ex_index, example, label_list, max_seq_length,
tokenizer, task_name):
"""Converts a single `InputExample` into a single `InputFeatures`."""
if isinstance(example, PaddingInputExample):
return InputFeatures(
input_ids=[0] * max_seq_length,
input_mask=[0] * max_seq_length,
segment_ids=[0] * max_seq_length,
label_id=0,
is_real_example=False)
if task_name != "sts-b":
label_map = {}
for (i, label) in enumerate(label_list):
label_map[label] = i
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
if tokens_b:
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[0:(max_seq_length - 2)]
# The convention in ALBERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
if tokens_b:
for token in tokens_b:
tokens.append(token)
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
if task_name != "sts-b":
label_id = label_map[example.label]
else:
label_id = example.label
if ex_index < 5:
tf.logging.info("*** Example ***")
tf.logging.info("guid: %s" % (example.guid))
tf.logging.info("tokens: %s" % " ".join(
[tokenization.printable_text(x) for x in tokens]))
tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
tf.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
tf.logging.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
tf.logging.info("label: %s (id = %d)" % (example.label, label_id))
feature = InputFeatures(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id,
is_real_example=True)
return feature
def file_based_convert_examples_to_features(
examples, label_list, max_seq_length, tokenizer, output_file, task_name):
"""Convert a set of `InputExample`s to a TFRecord file."""
writer = tf.python_io.TFRecordWriter(output_file)
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
tf.logging.info("Writing example %d of %d" % (ex_index, len(examples)))
feature = convert_single_example(ex_index, example, label_list,
max_seq_length, tokenizer, task_name)
def create_int_feature(values):
f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return f
def create_float_feature(values):
f = tf.train.Feature(float_list=tf.train.FloatList(value=list(values)))
return f
features = collections.OrderedDict()
features["input_ids"] = create_int_feature(feature.input_ids)
features["input_mask"] = create_int_feature(feature.input_mask)
features["segment_ids"] = create_int_feature(feature.segment_ids)
features["label_ids"] = create_float_feature([feature.label_id])\
if task_name == "sts-b" else create_int_feature([feature.label_id])
features["is_real_example"] = create_int_feature(
[int(feature.is_real_example)])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
writer.close()
def file_based_input_fn_builder(input_file, seq_length, is_training,
drop_remainder, task_name, use_tpu, bsz,
multiple=1):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
labeltype = tf.float32 if task_name == "sts-b" else tf.int64
name_to_features = {
"input_ids": tf.FixedLenFeature([seq_length * multiple], tf.int64),
"input_mask": tf.FixedLenFeature([seq_length * multiple], tf.int64),
"segment_ids": tf.FixedLenFeature([seq_length * multiple], tf.int64),
"label_ids": tf.FixedLenFeature([], labeltype),
"is_real_example": tf.FixedLenFeature([], tf.int64),
}
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
example = tf.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
return example
def input_fn(params):
"""The actual input function."""
if use_tpu:
batch_size = params["batch_size"]
else:
batch_size = bsz
# For training, we want a lot of parallel reading and shuffling.
# For eval, we want no shuffling and parallel reading doesn't matter.
d = tf.data.TFRecordDataset(input_file)
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.apply(
contrib_data.map_and_batch(
lambda record: _decode_record(record, name_to_features),
batch_size=batch_size,
drop_remainder=drop_remainder))
return d
return input_fn
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def create_model(albert_config, is_training, input_ids, input_mask, segment_ids,
labels, num_labels, use_one_hot_embeddings, task_name,
hub_module):
"""Creates a classification model."""
(output_layer, _) = fine_tuning_utils.create_albert(
albert_config=albert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings,
use_einsum=True,
hub_module=hub_module)
hidden_size = output_layer.shape[-1].value
output_weights = tf.get_variable(
"output_weights", [num_labels, hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02))
output_bias = tf.get_variable(
"output_bias", [num_labels], initializer=tf.zeros_initializer())
with tf.variable_scope("loss"):
if is_training:
# I.e., 0.1 dropout
output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)
logits = tf.matmul(output_layer, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
if task_name != "sts-b":
probabilities = tf.nn.softmax(logits, axis=-1)
predictions = tf.argmax(probabilities, axis=-1, output_type=tf.int32)
log_probs = tf.nn.log_softmax(logits, axis=-1)
one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)
per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
else:
probabilities = logits
logits = tf.squeeze(logits, [-1])
predictions = logits
per_example_loss = tf.square(logits - labels)
loss = tf.reduce_mean(per_example_loss)
return (loss, per_example_loss, probabilities, logits, predictions)
def model_fn_builder(albert_config, num_labels, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps, use_tpu,
use_one_hot_embeddings, task_name, hub_module=None,
optimizer="adamw"):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
tf.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
label_ids = features["label_ids"]
is_real_example = None
if "is_real_example" in features:
is_real_example = tf.cast(features["is_real_example"], dtype=tf.float32)
else:
is_real_example = tf.ones(tf.shape(label_ids), dtype=tf.float32)
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
(total_loss, per_example_loss, probabilities, logits, predictions) = \
create_model(albert_config, is_training, input_ids, input_mask,
segment_ids, label_ids, num_labels, use_one_hot_embeddings,
task_name, hub_module)
tvars = tf.trainable_variables()
initialized_variable_names = {}
scaffold_fn = None
if init_checkpoint:
(assignment_map, initialized_variable_names
) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
if use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
output_spec = None
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = optimization.create_optimizer(
total_loss, learning_rate, num_train_steps, num_warmup_steps,
use_tpu, optimizer)
output_spec = contrib_tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op,
scaffold_fn=scaffold_fn)
elif mode == tf.estimator.ModeKeys.EVAL:
if task_name not in ["sts-b", "cola"]:
def metric_fn(per_example_loss, label_ids, logits, is_real_example):
predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)
accuracy = tf.metrics.accuracy(
labels=label_ids, predictions=predictions,
weights=is_real_example)
loss = tf.metrics.mean(
values=per_example_loss, weights=is_real_example)
return {
"eval_accuracy": accuracy,
"eval_loss": loss,
}
elif task_name == "sts-b":
def metric_fn(per_example_loss, label_ids, logits, is_real_example):
"""Compute Pearson correlations for STS-B."""
# Display labels and predictions
concat1 = contrib_metrics.streaming_concat(logits)
concat2 = contrib_metrics.streaming_concat(label_ids)
# Compute Pearson correlation
pearson = contrib_metrics.streaming_pearson_correlation(
logits, label_ids, weights=is_real_example)
# Compute MSE
# mse = tf.metrics.mean(per_example_loss)
mse = tf.metrics.mean_squared_error(
label_ids, logits, weights=is_real_example)
loss = tf.metrics.mean(
values=per_example_loss,
weights=is_real_example)
return {"pred": concat1, "label_ids": concat2, "pearson": pearson,
"MSE": mse, "eval_loss": loss,}
elif task_name == "cola":
def metric_fn(per_example_loss, label_ids, logits, is_real_example):
"""Compute Matthew's correlations for COLA."""
predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)
# https://en.wikipedia.org/wiki/Matthews_correlation_coefficient
tp, tp_op = tf.metrics.true_positives(
labels=label_ids, predictions=predictions,
weights=is_real_example)
tn, tn_op = tf.metrics.true_negatives(
labels=label_ids, predictions=predictions,
weights=is_real_example)
fp, fp_op = tf.metrics.false_positives(
labels=label_ids, predictions=predictions,
weights=is_real_example)
fn, fn_op = tf.metrics.false_negatives(
labels=label_ids, predictions=predictions,
weights=is_real_example)
# Compute Matthew's correlation
mcc = tf.div_no_nan(
tp * tn - fp * fn,
tf.pow((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn), 0.5))
# Compute accuracy
accuracy = tf.metrics.accuracy(
labels=label_ids, predictions=predictions,
weights=is_real_example)
loss = tf.metrics.mean(
values=per_example_loss,
weights=is_real_example)
return {"matthew_corr": (mcc, tf.group(tp_op, tn_op, fp_op, fn_op)),
"eval_accuracy": accuracy, "eval_loss": loss,}
eval_metrics = (metric_fn,
[per_example_loss, label_ids, logits, is_real_example])
output_spec = contrib_tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
eval_metrics=eval_metrics,
scaffold_fn=scaffold_fn)
else:
output_spec = contrib_tpu.TPUEstimatorSpec(
mode=mode,
predictions={
"probabilities": probabilities,
"predictions": predictions
},
scaffold_fn=scaffold_fn)
return output_spec
return model_fn
# This function is not used by this file but is still used by the Colab and
# people who depend on it.
def input_fn_builder(features, seq_length, is_training, drop_remainder):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
all_input_ids = []
all_input_mask = []
all_segment_ids = []
all_label_ids = []
for feature in features:
all_input_ids.append(feature.input_ids)
all_input_mask.append(feature.input_mask)
all_segment_ids.append(feature.segment_ids)
all_label_ids.append(feature.label_id)
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
num_examples = len(features)
# This is for demo purposes and does NOT scale to large data sets. We do
# not use Dataset.from_generator() because that uses tf.py_func which is
# not TPU compatible. The right way to load data is with TFRecordReader.
d = tf.data.Dataset.from_tensor_slices({
"input_ids":
tf.constant(
all_input_ids, shape=[num_examples, seq_length],
dtype=tf.int32),
"input_mask":
tf.constant(
all_input_mask,
shape=[num_examples, seq_length],
dtype=tf.int32),
"segment_ids":
tf.constant(
all_segment_ids,
shape=[num_examples, seq_length],
dtype=tf.int32),
"label_ids":
tf.constant(all_label_ids, shape=[num_examples], dtype=tf.int32),
})
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder)
return d
return input_fn
# This function is not used by this file but is still used by the Colab and
# people who depend on it.
def convert_examples_to_features(examples, label_list, max_seq_length,
tokenizer, task_name):
"""Convert a set of `InputExample`s to a list of `InputFeatures`."""
features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
tf.logging.info("Writing example %d of %d" % (ex_index, len(examples)))
feature = convert_single_example(ex_index, example, label_list,
max_seq_length, tokenizer, task_name)
features.append(feature)
return features
| 34.169866 | 82 | 0.65047 |
adbc88fd0492c65730bd1296847d0f4dbbeafc52 | 7,162 | py | Python | src/data/run_on_files.py | ChesterHuynh/Wavenet-CPC-Music-Translation | 60632b0330a61a10bac1a129826c55372f685427 | [
"MIT"
] | null | null | null | src/data/run_on_files.py | ChesterHuynh/Wavenet-CPC-Music-Translation | 60632b0330a61a10bac1a129826c55372f685427 | [
"MIT"
] | null | null | null | src/data/run_on_files.py | ChesterHuynh/Wavenet-CPC-Music-Translation | 60632b0330a61a10bac1a129826c55372f685427 | [
"MIT"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
# Reference: https://raw.githubusercontent.com/facebookresearch/music-translation/master/src/run_on_files.py
from pathlib import Path
import librosa
import torch
from argparse import ArgumentParser
import matplotlib
import h5py
import tqdm
import src.data.utils as utils
import src.models.wavenet_models as wavenet_models
from src.data.utils import save_audio
from src.models.wavenet import WaveNet
from src.models.wavenet_generator import WavenetGenerator
from src.models.nv_wavenet_generator import NVWavenetGenerator
from src.models.cpc import CPC, CPCGRU
def extract_id(path):
decoder_id = str(path)[:-4].split('_')[-1]
return int(decoder_id)
def generate(args):
print('Starting')
matplotlib.use('agg')
checkpoints = args.checkpoint.parent.glob(args.checkpoint.name + '_*.pth')
checkpoints = [c for c in checkpoints if extract_id(c) in args.decoders]
assert len(checkpoints) >= 1, "No checkpoints found."
model_args = torch.load(args.checkpoint.parent / 'args.pth')[0]
if args.model_name == 'umt':
encoder = wavenet_models.Encoder(model_args)
elif args.model_name == 'umtcpc-gru':
encoder = CPCGRU(model_args)
else:
encoder = CPC(model_args)
encoder.load_state_dict(torch.load(checkpoints[0])['encoder_state'])
encoder.eval()
encoder = encoder.cuda()
def init_hidden(size, use_gpu=True):
if use_gpu: return torch.zeros(1, size, model_args.latent_d).cuda()
else: return torch.zeros(1, size, model_args.latent_d)
decoders = []
decoder_ids = []
if args.model_name == 'umt':
cond_repeat = 800
else:
cond_repeat = 160
for checkpoint in checkpoints:
decoder = WaveNet(model_args)
decoder.load_state_dict(torch.load(checkpoint)['decoder_state'])
decoder.eval()
decoder = decoder.cuda()
if args.py:
decoder = WavenetGenerator(decoder, args.batch_size, wav_freq=args.rate, cond_repeat=cond_repeat)
else:
decoder = NVWavenetGenerator(decoder, args.rate * (args.split_size // 20), args.batch_size, 3, cond_repeat=cond_repeat)
decoders += [decoder]
decoder_ids += [extract_id(checkpoint)]
xs = []
assert args.output_next_to_orig ^ (args.output_generated is not None)
if len(args.files) == 1 and args.files[0].is_dir():
top = args.files[0]
file_paths = list(top.glob('**/*.wav')) + list(top.glob('**/*.h5'))
else:
file_paths = args.files
if not args.skip_filter:
#file_paths = [f for f in file_paths if not '_' in str(f.name)]
# skip translated samples that end in "_x.wav"
file_paths = [f for f in file_paths if '_' not in f.name or len(str(f.name).split('_')[-1]) != 5]
for file_path in file_paths:
if file_path.suffix == '.wav':
data, rate = librosa.load(file_path, sr=16000)
assert rate == 16000
data = utils.mu_law(data)
elif file_path.suffix == '.h5':
data = utils.mu_law(h5py.File(file_path, 'r')['wav'][:] / (2 ** 15))
if data.shape[-1] % args.rate != 0:
data = data[:-(data.shape[-1] % args.rate)]
assert data.shape[-1] % args.rate == 0
print(data.shape)
else:
raise Exception(f'Unsupported filetype {file_path}')
if args.sample_len:
data = data[:args.sample_len]
else:
args.sample_len = len(data)
xs.append(torch.tensor(data).unsqueeze(0).float().cuda())
xs = torch.stack(xs).contiguous()
print(f'xs size: {xs.size()}')
def save(x, decoder_ix, filepath, model_name):
wav = utils.inv_mu_law(x.cpu().numpy())
print(f'X size: {x.shape}')
print(f'X min: {x.min()}, max: {x.max()}')
if args.output_next_to_orig:
save_audio(wav.squeeze(), filepath.parent / f'{model_name}_{filepath.stem}_{decoder_ix}.wav', rate=args.rate)
else:
save_audio(wav.squeeze(), args.output_generated / str(decoder_ix) / filepath.with_suffix('.wav').name, rate=args.rate)
yy = {}
with torch.no_grad():
zz = []
for xs_batch in torch.split(xs, args.batch_size):
if args.model_name == 'umt':
output = encoder(xs_batch)
else:
_, output = encoder(xs_batch)
zz += [output]
zz = torch.cat(zz, dim=0)
with utils.timeit("Generation timer"):
for i, decoder_id in enumerate(decoder_ids):
yy[decoder_id] = []
decoder = decoders[i]
for zz_batch in torch.split(zz, args.batch_size):
print(zz_batch.shape)
splits = torch.split(zz_batch, args.split_size, -1)
audio_data = []
decoder.reset()
for cond in tqdm.tqdm(splits):
audio_data += [decoder.generate(cond).cpu()]
audio_data = torch.cat(audio_data, -1)
yy[decoder_id] += [audio_data]
yy[decoder_id] = torch.cat(yy[decoder_id], dim=0)
del decoder
for decoder_ix, decoder_result in yy.items():
for sample_result, filepath in zip(decoder_result, file_paths):
save(sample_result, decoder_ix, filepath, args.model_name)
def main():
parser = ArgumentParser()
parser.add_argument('--model-name', type=str, required=True, choices=['umt', 'umtcpc-gru', 'umtcpc-wavenet'],
help='Type of model architecture')
parser.add_argument('--files', type=Path, nargs='+', required=False,
help='Top level directories of input music files')
parser.add_argument('-og', '--output-generated', type=Path,
help='Output directory for output files')
parser.add_argument('--checkpoint', type=Path, required=True,
help='Checkpoint path')
parser.add_argument('--decoders', type=int, nargs='*', default=[],
help='Only output for the following decoder ID')
parser.add_argument('--rate', type=int, default=16000,
help='Wav sample rate in samples/second')
parser.add_argument('--batch-size', type=int, default=6,
help='Batch size during inference')
parser.add_argument('--sample-len', type=int,
help='If specified, cuts sample lengths')
parser.add_argument('--split-size', type=int, default=20,
help='Size of splits')
parser.add_argument('--output-next-to-orig', action='store_true')
parser.add_argument('--skip-filter', action='store_true')
parser.add_argument('--py', action='store_true', help='Use python generator')
args = parser.parse_args()
generate(args)
if __name__ == '__main__':
with torch.no_grad():
main()
| 38.505376 | 131 | 0.611421 |
4a2c57c8150758f722dba2670994b860aec098c9 | 3,489 | py | Python | src/castile/transformer.py | catseye/Castile | 9a6bd7c604aeb9b91311fabcccf9da257df5c511 | [
"BSD-3-Clause"
] | 1 | 2021-02-05T17:00:03.000Z | 2021-02-05T17:00:03.000Z | src/castile/transformer.py | catseye/Castile | 9a6bd7c604aeb9b91311fabcccf9da257df5c511 | [
"BSD-3-Clause"
] | null | null | null | src/castile/transformer.py | catseye/Castile | 9a6bd7c604aeb9b91311fabcccf9da257df5c511 | [
"BSD-3-Clause"
] | null | null | null | """General AST manipulations.
"""
from castile.ast import AST
class FunctionLifter(object):
"""Bring all function definitions up to the toplevel (for target
languages like C).
"""
def __init__(self):
self.lifted_functions = []
self.count = 0
def make_name(self):
self.count += 1
return 'lifted_function%d' % self.count
def lift_functions(self, ast):
if ast.tag == 'Program':
children = []
for child in ast.children:
children.append(self.lift_functions(child))
lifted_defns = []
for (name, lf) in self.lifted_functions:
lifted_defns.append(AST('Defn', [lf], value=name))
# rearrange toplevels so that non-function defns come
# before function defns
non_fun_defns = []
non_lifted_defns = []
for child in children:
if child.children:
if child.children[0].tag == 'FunLit':
non_lifted_defns.append(child)
else:
non_fun_defns.append(child)
else:
non_fun_defns.append(child)
children = non_fun_defns + lifted_defns + non_lifted_defns
return ast.copy(children=children)
elif ast.tag == 'Defn':
# skip toplevel funlits; they don't have to be lifted.
children = []
for child in ast.children:
if child.tag == 'FunLit':
grandchildren = []
for grandchild in child.children:
grandchildren.append(self.lift_functions(grandchild))
children.append(child.copy(children=grandchildren))
else:
children.append(self.lift_functions(child))
return ast.copy(children=children)
elif ast.tag == 'FunLit':
children = []
for child in ast.children:
children.append(self.lift_functions(child))
name = self.make_name()
self.lifted_functions.append((name, ast.copy(children=children)))
return AST('VarRef', value=name, type=ast.type, aux='toplevel')
else:
children = []
for child in ast.children:
children.append(self.lift_functions(child))
return ast.copy(children=children)
class VarDeclTypeAssigner(object):
def __init__(self):
self.current_funlit = None
def find_vardecl(self, name):
body = self.current_funlit.children[1]
assert body.tag == 'Body'
vardecls = body.children[0]
assert vardecls.tag == 'VarDecls'
for child in vardecls.children:
if child.value == name:
return child
def assign_types(self, ast):
if ast.tag == 'FunLit':
save = self.current_funlit
self.current_funlit = ast
for child in ast.children:
self.assign_types(child)
self.current_funlit = save
elif ast.tag == 'Assignment':
if ast.aux == 'defining instance':
vardecl = self.find_vardecl(ast.children[0].value)
vardecl.type = ast.children[1].type
for child in ast.children:
self.assign_types(child)
else:
for child in ast.children:
self.assign_types(child)
| 35.602041 | 77 | 0.547721 |
66cd71e5c2fc6c5194a51333f0ab8e63ed7a5a98 | 2,109 | py | Python | scripts/get_shodan_banners.py | d066y/detectem | 648ddff159e17777e41b1dd266a759e9f0774ea8 | [
"MIT"
] | null | null | null | scripts/get_shodan_banners.py | d066y/detectem | 648ddff159e17777e41b1dd266a759e9f0774ea8 | [
"MIT"
] | 1 | 2021-03-26T00:23:57.000Z | 2021-03-26T00:23:57.000Z | scripts/get_shodan_banners.py | magnusmel/detectem | c40e39ac684f6838373b7108eaf50da61e354acf | [
"MIT"
] | 1 | 2019-07-28T10:11:01.000Z | 2019-07-28T10:11:01.000Z | import re
import sys
import os
import pprint
import click
try:
import shodan
except ImportError:
print('Install shodan: pip install shodan')
sys.exit(0)
try:
SHODAN_API_KEY = os.environ['SHODAN_API_KEY']
except KeyError:
print('Set SHODAN_API_KEY environment variable with your key')
sys.exit(0)
def get_headers(text):
header_string = re.findall(
'^(.*?)(?:[\r\n]{3,4})', text, flags=re.DOTALL | re.I
)
if not header_string:
return None
data = {}
for line in header_string[0].splitlines():
match = re.findall('^(.*?):(.*)', line)
if match:
key, value = map(lambda v: v.strip(), match[0])
data[key] = value
return data
@click.command()
@click.option(
'--filter',
default=None,
type=str,
help='Filter by header'
)
@click.option(
'--stats',
default=False,
is_flag=True,
help='Include stats'
)
@click.option(
'--show-names',
default=False,
is_flag=True,
help='Show header names'
)
@click.argument('query')
def main(filter, stats, show_names, query):
counter = 0
filtered_header = set()
api = shodan.Shodan(SHODAN_API_KEY)
try:
result = api.search(query)
except shodan.exception.APIError:
print('[-] API connection error.')
sys.exit(0)
for match in result['matches']:
server = '{}:{}'.format(match['ip_str'], match['port'])
hd = get_headers(match['data'])
if not hd:
continue
if show_names:
filtered_header.update(set(hd.keys()))
elif filter:
value = hd.get(filter)
if value:
filtered_header.add((server, value))
else:
pprint.pprint(hd, width=160)
counter += 1
if filtered_header:
pprint.pprint(filtered_header, width=160)
if stats:
print('\n--- Stats ---')
print('[+] n_matches: {}'.format(len(result['matches'])))
print('[+] n_printed: {}'.format(counter or len(filtered_header)))
if __name__ == '__main__':
main()
| 21.742268 | 74 | 0.577525 |
c53aa2e755e9862fd6cfd36de24e6c8432755e8a | 10,021 | py | Python | scorelib/score.py | CorticoAI/dscore | 003bf2ca2a055ca1493a4149b5513446cf98ea2f | [
"BSD-2-Clause"
] | null | null | null | scorelib/score.py | CorticoAI/dscore | 003bf2ca2a055ca1493a4149b5513446cf98ea2f | [
"BSD-2-Clause"
] | null | null | null | scorelib/score.py | CorticoAI/dscore | 003bf2ca2a055ca1493a4149b5513446cf98ea2f | [
"BSD-2-Clause"
] | null | null | null | """Functions for scoring paired system/reference RTTM files."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import defaultdict, namedtuple
import numpy as np
from scipy.linalg import block_diag
from . import metrics
from .six import iteritems, itervalues
from .utils import groupby
__all__ = ['flatten_labels', 'score', 'turns_to_frames', 'Scores']
def turns_to_frames(turns, score_regions, step=0.010):
"""Return frame-level labels corresponding to diarization.
Parameters
----------
turns : list of Turn
Speaker turns. Should all be from single file.
score_regions : list of tuple
Scoring regions from UEM.
step : float, optional
Frame step size in seconds.
(Default: 0.01)
Returns
-------
labels : ndarray, (n_frames, n_speakers)
Frame-level labels. The ``i,j``-th entry of this array is 1 if speaker
``j``-th speaker was present at frame ``i`` and 0 otherwise. If no
speaker turns were passed, the second dimension will be 0.
"""
file_ids = {turn.file_id for turn in turns}
if len(file_ids) > 1:
raise ValueError('Turns should be from a single file.')
# Create matrix whose i,j-th entry is True IFF the j-th speaker was
# present at frame i.
onsets = [turn.onset for turn in turns]
offsets = [turn.offset for turn in turns]
speaker_ids = [turn.speaker_id for turn in turns]
speaker_classes, speaker_class_inds = np.unique(
speaker_ids, return_inverse=True)
dur = max(score_offset for score_onset, score_offset in score_regions)
n_frames = int(dur/step)
X = np.zeros((n_frames, speaker_classes.size), dtype='int32')
times = step*np.arange(n_frames)
bis = np.searchsorted(times, onsets)
eis = np.searchsorted(times, offsets)
for bi, ei, speaker_class_ind in zip(bis, eis, speaker_class_inds):
X[bi:ei, speaker_class_ind] = 1
# Eliminate frames belonging to non-score regions.
keep = np.zeros(len(X), dtype=bool)
for score_onset, score_offset in score_regions:
bi, ei = np.searchsorted(times, (score_onset, score_offset))
keep[bi:ei] = True
X = X[keep, ]
return X
# TODO: Consider mapping all speech overlaps to a single class.
def flatten_labels(labels):
"""Helper function to convert output of ``turns_to_frames`` to 1-D array of
unique values.
Each row of ``labels`` is mapped to an integer representing an element of
the powerset ``2**n_speakers``. The result is a 1-D array of integer labels
in which each speaker, each possible overlap of speakers, and non-speech
are differentiated. This is a necessary pre-processing step for the
clustering metrics.
Parameters
---------
labels : ndarray, (n_frames, n_speakers)
Frame-level speaker labels. The ``i,j``-th entry of this array is 1
if speaker ``j``-th speaker was present at frame ``i`` and 0 otherwise.
Returns
-------
flattened_labels : ndarray, (n_frames,)
Flattened frame labels..
"""
pows = 2**np.arange(labels.shape[1])
flattened_labels = np.sum(pows*labels, axis=1)
return flattened_labels
class Scores(namedtuple(
'Scores',
['file_id', 'der', 'jer', 'bcubed_precision', 'bcubed_recall',
'bcubed_f1', 'tau_ref_sys', 'tau_sys_ref', 'ce_ref_sys',
'ce_sys_ref', 'mi', 'nmi'])):
"""Structure containing metrics.
Parameters
----------
file_id : str
File id for file scored.
der : float
Diarization error rate in percent.
jer : float
Jaccard error rate in percent.
bcubed_precision : float
B-cubed precision.
bcubed_recall : float
B-cubed recall.
bcubed_f1 : float
B-cubed F1.
tau_ref_sys : float
Value between 0 and 1 that is high when the reference diarization is
predictive of the system diarization and low when the reference
diarization provides essentially no information about the system
diarization.
tau_sys_ref : float
Value between 0 and 1 that is high when the system diarization is
predictive of the reference diarization and low when the system
diarization provides essentially no information about the reference
diarization.
ce_ref_sys : float
Conditional entropy of the reference diarization given the system
diarization.
ce_sys_ref : float
Conditional entropy of the system diarization given the reference
diarization.
mi : float
Mutual information.
nmi : float
Normalized mutual information.
"""
__slots__ = ()
class Scores2(namedtuple(
'Scores',
['file_id', 'der', 'missed', 'false_alarm', 'confusion'])):
"""Structure containing metrics.
Parameters
----------
file_id : str
File id for file scored.
der : float
Diarization error rate in percent.
"""
__slots__ = ()
def score(ref_turns, sys_turns, uem, step=0.010, nats=False, jer_min_ref_dur=0.0,
**kwargs):
"""Score diarization.
Parameters
----------
ref_turns : list of Turn
Reference speaker turns.
sys_turns : list of Turn
System speaker turns.
uem : UEM
Un-partitioned evaluation map.
step : float, optional
Frame step size in seconds. Not relevant for computation of DER.
(Default: 0.01)
nats : bool, optional
If True, use nats as unit for information theoretic metrics.
Otherwise, use bits.
(Default: False)
jer_min_ref_dur : float, optional
Minimum reference speaker duration in seconds for JER calculation.
Reference speakers with durations less than ``min_ref_dur`` will be
excluded for scoring purposes. Setting this to a small non-zero number
may stabilize JER when the reference segmentation contains multiple
extraneous speakers.
(Default: 0.0)
kwargs
Keyword arguments to be passed to ``metrics.der``.
Returns
-------
file_scores : list of Scores
Scores for all files.
global_scores : Scores
Global scores.
"""
if jer_min_ref_dur is not None:
jer_min_ref_dur = int(jer_min_ref_dur/step)
# Build contingency matrices.
file_to_ref_turns = defaultdict(
list,
{fid : list(g) for fid, g in groupby(ref_turns, lambda x: x.file_id)})
file_to_sys_turns =defaultdict(
list,
{fid : list(g) for fid, g in groupby(sys_turns, lambda x: x.file_id)})
file_to_cm = {} # Map from files to contingency matrices used by
# clustering metrics.
file_to_jer_cm = {} # Map from files to contingency matrices used by
# JER.
file_to_ref_durs = {} # Map from files to speaker durations in reference
# segmentation.
file_to_sys_durs = {} # Map from files to speaker durations in system
# segmentation.
for file_id, score_regions in iteritems(uem):
ref_labels = turns_to_frames(
file_to_ref_turns[file_id], score_regions, step=step)
sys_labels = turns_to_frames(
file_to_sys_turns[file_id], score_regions, step=step)
file_to_ref_durs[file_id] = ref_labels.sum(axis=0)
file_to_sys_durs[file_id] = sys_labels.sum(axis=0)
file_to_jer_cm[file_id] = metrics.contingency_matrix(
ref_labels, sys_labels)
file_to_cm[file_id] = metrics.contingency_matrix(
flatten_labels(ref_labels), flatten_labels(sys_labels))
global_cm = block_diag(*list(itervalues(file_to_cm)))
# Above line has the undesirable property of claiming silence on
# different files is a different category. However, leave it in for
# consistency with how the clustering metrics were computed in DIHARD I.
# Compute DER. This bit is slow as it relies on NIST's perl script.
(file_to_missed, global_missed,
file_to_false_alarms, global_false_alarms,
file_to_confusion, global_confusion,
file_to_der, global_der) = metrics.der(ref_turns, sys_turns, uem=uem, **kwargs)
# Compute JER.
file_to_jer, global_jer = metrics.jer(
file_to_ref_durs, file_to_sys_durs, file_to_jer_cm, jer_min_ref_dur)
# # Compute clustering metrics.
# def compute_metrics(fid, cm, der, jer):
# bcubed_precision, bcubed_recall, bcubed_f1 = metrics.bcubed(
# None, None, cm)
# tau_ref_sys, tau_sys_ref = metrics.goodman_kruskal_tau(
# None, None, cm)
# ce_ref_sys = metrics.conditional_entropy(None, None, cm, nats)
# ce_sys_ref = metrics.conditional_entropy(None, None, cm.T, nats)
# mi, nmi = metrics.mutual_information(None, None, cm, nats)
# return Scores(
# fid, der, jer, bcubed_precision, bcubed_recall, bcubed_f1,
# tau_ref_sys, tau_sys_ref, ce_ref_sys, ce_sys_ref, mi, nmi)
# file_scores = []
# for file_id, cm in iteritems(file_to_cm):
# file_scores.append(compute_metrics(
# file_id, cm, file_to_der[file_id], jer=file_to_jer[file_id]))
# global_scores = compute_metrics(
# '*** OVERALL ***', global_cm, global_der, global_jer)
def compute_metrics2(fid, der, missed, false_alarm, confusion):
return Scores2(fid, der, missed, false_alarm, confusion)
file_scores = []
for file_id, cm in iteritems(file_to_cm):
file_scores.append(compute_metrics2(
file_id, file_to_der[file_id], file_to_missed[file_id],
file_to_false_alarms[file_id], file_to_confusion[file_id]))
global_scores = compute_metrics2(
'*** OVERALL ***', global_der, global_missed,
global_false_alarms, global_confusion)
return file_scores, global_scores
| 34.555172 | 84 | 0.662409 |
840f8d1c4835fd66298ffb970ee3062e73eaa244 | 2,402 | py | Python | venv/Lib/site-packages/_TFL/ui_display.py | nasir733/airbnb-clone | 9ac746b6f3f3c8fc45f97773266e6f5f182d14b9 | [
"MIT"
] | 6 | 2016-12-10T17:51:10.000Z | 2021-10-11T07:51:48.000Z | venv/Lib/site-packages/_TFL/ui_display.py | nasir733/airbnb-clone | 9ac746b6f3f3c8fc45f97773266e6f5f182d14b9 | [
"MIT"
] | null | null | null | venv/Lib/site-packages/_TFL/ui_display.py | nasir733/airbnb-clone | 9ac746b6f3f3c8fc45f97773266e6f5f182d14b9 | [
"MIT"
] | 3 | 2020-03-29T07:37:03.000Z | 2021-01-21T16:08:40.000Z | # -*- coding: utf-8 -*-
# Copyright (C) 2015 Mag. Christian Tanzer All rights reserved
# Glasauergasse 32, A--1130 Wien, Austria. tanzer@swing.co.at
# #*** <License> ************************************************************#
# This module is part of the package TFL.
#
# This module is licensed under the terms of the BSD 3-Clause License
# <http://www.c-tanzer.at/license/bsd_3c.html>.
# #*** </License> ***********************************************************#
#
#++
# Name
# TFL.ui_display
#
# Purpose
# Generic function returning a string usable for display in user interface
#
# Revision Dates
# 6-Feb-2015 (CT) Creation
# ««revision-date»»···
#--
from _TFL import TFL
from _TFL._Meta.Single_Dispatch import Single_Dispatch
from _TFL.portable_repr import portable_repr
from _TFL.pyk import pyk
import decimal
@Single_Dispatch
def ui_display (obj) :
return portable_repr (obj)
# end def ui_display
@ui_display.add_type (decimal.Decimal, * pyk.int_types)
def _ui_display_int (obj) :
return str (obj)
# end def _ui_display_int
@ui_display.add_type (float)
def _ui_display_float (obj) :
return "%.2f" % obj
# end def _ui_display_float
@ui_display.add_type (* pyk.string_types)
def _ui_display_string (obj) :
return pyk.decoded (obj)
# end def _ui_display_string
__doc__ = """
``ui_display`` returns a string representation of `obj` usable for display
in an user interface.
Examples::
>>> print (ui_display (1))
1
>>> print (ui_display (1.))
1.00
>>> print (ui_display (1.2))
1.20
>>> print (ui_display (1.23))
1.23
>>> print (ui_display (1.234))
1.23
>>> print (ui_display (1.235))
1.24
>>> print (ui_display ("1"))
1
>>> print (ui_display ("a"))
a
>>> print (ui_display (u"a"))
a
For types with no specific implementation, ``ui_display`` returns the
``portable_repr``:
>>> import datetime
>>> d = datetime.date (2014, 2, 6)
>>> print (ui_display (d))
datetime.date(2014, 2, 6)
Adding an implementation for a specific type is easy enough, though::
>>> @ui_display.add_type (datetime.date)
... def _ui_display_date (obj) :
... return str (obj)
>>> print (ui_display (d))
2014-02-06
"""
if __name__ != "__main__" :
TFL._Export ("ui_display")
### __END__ TFL.ui_display
| 22.448598 | 78 | 0.605745 |
bb1d7673a14e3d4af50fda2113a108c57677fb99 | 4,406 | py | Python | src/game/gl_map.py | btdevel/bt | 23abdf0860484a4adcfbe2bcbe94eebca7f820fd | [
"MIT"
] | 1 | 2017-06-30T00:35:05.000Z | 2017-06-30T00:35:05.000Z | src/game/gl_map.py | btdevel/bt | 23abdf0860484a4adcfbe2bcbe94eebca7f820fd | [
"MIT"
] | null | null | null | src/game/gl_map.py | btdevel/bt | 23abdf0860484a4adcfbe2bcbe94eebca7f820fd | [
"MIT"
] | null | null | null | level = __import__("level00")
print level.dungeon_name
print level.map
from OpenGL.GL import *
from OpenGL.GLUT import *
from OpenGL.GLU import *
import sys
# Some api in the chain is translating the keystrokes to this octal string
# so instead of saying: ESCAPE = 27, we use the following.
ESCAPE = '\033'
# Number of the glut window.
window = 0
cammov = []
def DrawGLScene():
def make_square(x1, x2, y1, y2, z1, z2):
glVertex3f( x1, y1, z1)
glVertex3f( x1, y2, z1)
glVertex3f( x2, y2, z2)
glVertex3f( x2, y1, z2)
glMatrixMode(GL_PROJECTION)
while cammov:
cammov.pop()()
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
#glTranslatef(-50,-8.0,-140.0) # Move Right And Into The Screen
glBegin(GL_QUADS)
glColor3f(0.0, 0.7, 0.7)
map = level.map
for i in range(22):
for j in range(22):
east = map[3*i+1][3*j+0]
west = map[3*i+1][3*j+2]
north = map[3*i+0][3*j+1]
south = map[3*i+2][3*j+1]
delta = 0.1
y1, y2 = 0, 10
x1 = (j+0)*10 + delta
x2 = (j+1)*10 - delta
z1 = (22-i)*10 - delta
z2 = (21-i)*10 + delta
z1 = (i+0)*10 + delta
z2 = (i+1)*10 - delta
if east!=" ":
glNormal(1.0, 0.0, 0.0)
make_square(x1, x1, y1, y2, z1, z2)
if west!=" ":
glNormal(-1.0, 0.0, 0.0)
make_square(x2, x2, y1, y2, z1, z2)
if north!=" ":
glNormal(0.0, 0.0, 1.0)
make_square(x1, x2, y1, y2, z1, z1)
if south!=" ":
glNormal(0.0, 0.0, -1.0)
make_square(x1, x2, y1, y2, z2, z2)
glEnd()
glutSwapBuffers()
def keyPressed(key, x, y):
if key == ESCAPE:
glutDestroyWindow(window)
sys.exit()
elif key == '5':
cammov.append(lambda: glTranslate(0,1,0))
glutPostRedisplay()
elif key == '4':
cammov.append(lambda: glRotate(-1,0,1,0))
glutPostRedisplay()
elif key == '6':
cammov.append(lambda: glRotate(1,0,1,0))
glutPostRedisplay()
elif key == '8':
cammov.append(lambda: glRotate(-1,1,0,0))
glutPostRedisplay()
elif key == '2':
cammov.append(lambda: glRotate(1,1,0,0))
glutPostRedisplay()
def InitGL(Width, Height):
glClearColor(0.0, 0.0, 0.0, 0.0)
glClearDepth(1.0)
glDepthFunc(GL_LESS)
glEnable(GL_DEPTH_TEST)
glShadeModel(GL_SMOOTH)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(45.0, float(Width)/float(Height), 0.1, 100.0)
glEnable(GL_COLOR_MATERIAL)
glEnable(GL_LIGHTING)
amb = 0.05
glLightModelfv(GL_LIGHT_MODEL_AMBIENT, (amb, amb, amb, 1))
glEnable(GL_LIGHT0)
glLightfv(GL_LIGHT0, GL_POSITION, [300.0, 5.0, 300.0, 1.0])
#glLightfv(GL_LIGHT0, GL_POSITION, [00.0, 5.0, 00.0, 1.0])
glLightfv(GL_LIGHT0, GL_POSITION, [125.0, 5.0, 125.0, 1.0])
glLightfv(GL_LIGHT0, GL_POSITION, [5.0, 5.0, 215.0, 1.0])
glLight(GL_LIGHT0, GL_LINEAR_ATTENUATION, 0.03)
glMatrixMode(GL_MODELVIEW)
def ReSizeGLScene(Width, Height):
if Height == 0:
Height = 1
glViewport(0, 0, Width, Height)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(45.0, float(Width)/float(Height), 0.1, 600.0)
#gluLookAt(150., 300., 280.,
# 100., 0., 100.,
# 0., 1., 0.)
gluLookAt(15., 5., 215.,
5., 5., 210.,
0., 1., 0.)
glMatrixMode(GL_MODELVIEW)
def main():
global window
glutInit(sys.argv)
glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_ALPHA | GLUT_DEPTH)
glutInitWindowSize(640, 480)
glutInitWindowPosition(0, 0)
window = glutCreateWindow("Jeff Molofee's GL Code Tutorial ... NeHe '99")
glutDisplayFunc(DrawGLScene)
# glutFullScreen()
#glutIdleFunc(DrawGLScene)
glutReshapeFunc(ReSizeGLScene)
glutKeyboardFunc(keyPressed)
InitGL(640, 480)
# Print message to console, and kick off the main to get it rolling.
print "Hit ESC key to quit."
if __name__ == '__main__':
try:
GLU_VERSION_1_2
except:
print "Need GLU 1.2 to run this demo"
sys.exit(1)
main()
glutMainLoop()
| 25.034091 | 77 | 0.573082 |
1b97be99bc37670c4ab14d887218ab0f813a37f9 | 29,452 | py | Python | vnpy/appDesktop/widget/ctaStrategy/tools/ctaLineBar.py | chenzj810/vnpy-stock | ca30eb309e38f9f916e9877538b98096303e0b60 | [
"MIT"
] | 2 | 2021-01-03T05:28:14.000Z | 2021-01-03T05:28:19.000Z | vnpy/appDesktop/widget/ctaStrategy/tools/ctaLineBar.py | chenzj810/vnpy | ca30eb309e38f9f916e9877538b98096303e0b60 | [
"MIT"
] | null | null | null | vnpy/appDesktop/widget/ctaStrategy/tools/ctaLineBar.py | chenzj810/vnpy | ca30eb309e38f9f916e9877538b98096303e0b60 | [
"MIT"
] | 1 | 2021-04-26T14:08:23.000Z | 2021-04-26T14:08:23.000Z | # encoding: UTF-8
# AUTHOR:李来佳
# WeChat/QQ: 28888502
from vtConstant import *
from vnpy.trader.main.vtObject import VtBarData
from datetime import datetime
import talib as ta
import numpy
import copy,csv
DEBUGCTALOG = True
class CtaLineBar(object):
"""CTA K线"""
""" 使用方法:
1、在策略构造函数__init()中初始化
self.lineM = None # 1分钟K线
lineMSetting = {}
lineMSetting['name'] = u'M1'
lineMSetting['barTimeInterval'] = 60 # 1分钟对应60秒
lineMSetting['inputEma1Len'] = 7 # EMA线1的周期
lineMSetting['inputEma2Len'] = 21 # EMA线2的周期
lineMSetting['inputBollLen'] = 20 # 布林特线周期
lineMSetting['inputBollStdRate'] = 2 # 布林特线标准差
lineMSetting['minDiff'] = self.minDiff # 最小条
lineMSetting['shortSymbol'] = self.shortSymbol #商品短号
self.lineM = CtaLineBar(self, self.onBar, lineMSetting)
2、在onTick()中,需要导入tick数据
self.lineM.onTick(tick)
self.lineM5.onTick(tick) # 如果你使用2个周期
3、在onBar事件中,按照k线结束使用;其他任何情况下bar内使用,通过对象使用即可,self.lineM.lineBar[-1].close
"""
# 参数列表,保存了参数的名称
paramList = ['vtSymbol']
def __init__(self, strategy, onBarFunc, setting=None,):
# OnBar事件回调函数
self.onBarFunc = onBarFunc
# 参数列表
self.paramList.append('barTimeInterval')
self.paramList.append('inputPreLen')
self.paramList.append('inputEma1Len')
self.paramList.append('inputEma2Len')
self.paramList.append('inputDmiLen')
self.paramList.append('inputDmiMax')
self.paramList.append('inputAtr1Len')
self.paramList.append('inputAtr2Len')
self.paramList.append('inputAtr3Len')
self.paramList.append('inputVolLen')
self.paramList.append('inputRsiLen')
self.paramList.append('inputCmiLen')
self.paramList.append('inputBollLen')
self.paramList.append('inputBollStdRate')
self.paramList.append('minDiff')
self.paramList.append('shortSymbol')
self.paramList.append('activeDayJump')
self.paramList.append('name')
# 输入参数
self.name = 'LineBar'
self.barTimeInterval = 300
self.inputPreLen = EMPTY_INT #1
self.inputEma1Len = EMPTY_INT # 13
self.inputEma2Len = EMPTY_INT # 21
self.inputDmiLen = EMPTY_INT # 14 # DMI的计算周期
self.inputDmiMax = EMPTY_FLOAT # 30 # Dpi和Mdi的突破阈值
self.inputAtr1Len = EMPTY_INT # 10 # ATR波动率的计算周期(近端)
self.inputAtr2Len = EMPTY_INT # 26 # ATR波动率的计算周期(常用)
self.inputAtr3Len = EMPTY_INT # 50 # ATR波动率的计算周期(远端)
self.inputVolLen = EMPTY_INT # 14 # 平均交易量的计算周期
self.inputRsiLen = EMPTY_INT # 7 # RSI 相对强弱指数
self.shortSymbol = EMPTY_STRING # 商品的短代码
self.minDiff = 1 # 商品的最小价格单位
self.activeDayJump = False # 隔夜跳空
# 当前的Tick
self.curTick = None
# K 线服务的策略
self.strategy = strategy
# K线保存数据
self.bar = None # K线数据对象
self.lineBar = [] # K线缓存数据队列
self.barFirstTick =False # K线的第一条Tick数据
# K 线的相关计算结果数据
self.preHigh = [] # K线的前inputPreLen的的最高
self.preLow = [] # K线的前inputPreLen的的最低
self.lineEma1 = [] # K线的EMA1均线,周期是InputEmaLen1,包含当前bar
self.lineEma1MtmRate = [] # K线的EMA1均线 的momentum(3) 动能
self.lineEma2 = [] # K线的EMA2均线,周期是InputEmaLen2,包含当前bar
self.lineEma2MtmRate = [] # K线的EMA2均线 的momentum(3) 动能
# K线的DMI( Pdi,Mdi,ADX,Adxr) 计算数据
self.barPdi = EMPTY_FLOAT # bar内的升动向指标,即做多的比率
self.barMdi = EMPTY_FLOAT # bar内的下降动向指标,即做空的比率
self.linePdi = [] # 升动向指标,即做多的比率
self.lineMdi = [] # 下降动向指标,即做空的比率
self.lineDx = [] # 趋向指标列表,最大长度为inputM*2
self.barAdx = EMPTY_FLOAT # Bar内计算的平均趋向指标
self.lineAdx = [] # 平均趋向指标
self.barAdxr = EMPTY_FLOAT # 趋向平均值,为当日ADX值与M日前的ADX值的均值
self.lineAdxr = [] # 平均趋向变化指标
# K线的基于DMI、ADX计算的结果
self.barAdxTrend = EMPTY_FLOAT # ADX值持续高于前一周期时,市场行情将维持原趋势
self.barAdxrTrend = EMPTY_FLOAT # ADXR值持续高于前一周期时,波动率比上一周期高
self.buyFilterCond = False # 多过滤器条件,做多趋势的判断,ADX高于前一天,上升动向> inputMM
self.sellFilterCond = False # 空过滤器条件,做空趋势的判断,ADXR高于前一天,下降动向> inputMM
# K线的ATR技术数据
self.lineAtr1 = [] # K线的ATR1,周期为inputAtr1Len
self.lineAtr2 = [] # K线的ATR2,周期为inputAtr2Len
self.lineAtr3 = [] # K线的ATR3,周期为inputAtr3Len
self.barAtr1 = EMPTY_FLOAT
self.barAtr2 = EMPTY_FLOAT
self.barAtr3 = EMPTY_FLOAT
# K线的交易量平均
self.lineAvgVol = [] # K 线的交易量平均
# K线的RSI计算数据
self.lineRsi = [] # 记录K线对应的RSI数值,只保留inputRsiLen*8
self.lowRsi = 30 # RSI的最低线
self.highRsi = 70 # RSI的最高线
self.lineRsiTop = [] # 记录RSI的最高峰,只保留 inputRsiLen个
self.lineRsiButtom = [] # 记录RSI的最低谷,只保留 inputRsiLen个
self.lastRsiTopButtom = None # 最近的一个波峰/波谷
# K线的CMI计算数据
self.inputCmiLen = EMPTY_INT
self.lineCmi = [] # 记录K线对应的Cmi数值,只保留inputCmiLen*8
# K线的布林特计算数据
self.inputBollLen = EMPTY_INT # K线周期
self.inputBollStdRate = 1.5 # 两倍标准差
self.lineUpperBand = [] # 上轨
self.lineMiddleBand = [] # 中线
self.lineLowerBand = [] # 下轨
if setting:
self.setParam(setting)
def setParam(self, setting):
"""设置参数"""
d = self.__dict__
for key in self.paramList:
if key in setting:
d[key] = setting[key]
def onTick(self, tick):
"""行情更新
:type tick: object
"""
# Tick 有效性检查
#if (tick.datetime- datetime.now()).seconds > 10:
# self.writeCtaLog(u'无效的tick时间:{0}'.format(tick.datetime))
# return
if tick.datetime.hour == 8 or tick.datetime.hour == 20:
self.writeCtaLog('竞价排名tick时间:{0}'.format(tick.datetime))
return
self.curTick = tick
# 3.生成x K线,若形成新Bar,则触发OnBar事件
self.__drawLineBar(tick)
def addBar(self,bar):
"""予以外部初始化程序增加bar"""
l1 = len(self.lineBar)
if l1 == 0:
self.lineBar.append(bar)
self.onBar(bar)
return
# 与最后一个BAR的时间比对,判断是否超过K线的周期
lastBar = self.lineBar[-1]
if (bar.datetime - lastBar.datetime).seconds >= self.barTimeInterval:
self.lineBar.append(bar)
self.onBar(bar)
return
# 更新最后一个bar
lastBar.close = bar.close
lastBar.high = max(lastBar.high, bar.high)
lastBar.low = min(lastBar.low, bar.low)
lastBar.volume = lastBar.volume + bar.volume
def onBar(self, bar):
"""OnBar事件"""
# 计算相关数据
self.__recountPreHighLow()
self.__recountEma()
self.__recountDmi()
self.__recountAtr()
self.__recoundAvgVol()
self.__recountRsi()
self.__recountCmi()
self.__recountBoll()
# 回调上层调用者
self.onBarFunc(bar)
def __firstTick(self,tick):
""" K线的第一个Tick数据"""
self.bar = VtBarData() # 创建新的K线
self.bar.vtSymbol = tick.vtSymbol
self.bar.symbol = tick.symbol
self.bar.exchange = tick.exchange
self.bar.open = tick.lastPrice # O L H C
self.bar.high = tick.lastPrice
self.bar.low = tick.lastPrice
self.bar.close = tick.lastPrice
# K线的日期时间
self.bar.date = tick.date # K线的日期时间(去除秒)设为第一个Tick的时间
self.bar.time = tick.time # K线的日期时间(去除秒)设为第一个Tick的时间
self.bar.datetime = tick.datetime
self.bar.volume = tick.volume
self.bar.openInterest = tick.openInterest
self.barFirstTick = True # 标识该Tick属于该Bar的第一个tick数据
self.lineBar.append(self.bar) # 推入到lineBar队列
# ----------------------------------------------------------------------
def __drawLineBar(self, tick):
"""生成 line Bar """
l1 = len(self.lineBar)
# 保存第一个K线数据
if l1 == 0:
self.__firstTick(tick)
self.onBar(self.bar)
return
# 清除8交易小时前的数据,
if l1 > 60 * 8:
del self.lineBar[0]
# 与最后一个BAR的时间比对,判断是否超过5分钟
lastBar = self.lineBar[-1]
# 专门处理隔夜跳空。隔夜跳空会造成开盘后EMA和ADX的计算错误。
if len(self.lineAtr2) < 1:
priceInBar = 5 * self.minDiff
else:
priceInBar = self.lineAtr2[-1]
jumpBars = int(abs(tick.lastPrice - lastBar.close)/priceInBar)
# 开盘时间
if (tick.datetime.hour == 9 or tick.datetime.hour == 21) \
and tick.datetime.minute == 0 and tick.datetime.second == 0 \
and lastBar.datetime.hour != tick.datetime.hour \
and jumpBars > 0 and self.activeDayJump:
priceInYesterday = lastBar.close
self.writeCtaLog('line Bar jumpbars:{0}'.format(jumpBars))
if tick.lastPrice > priceInYesterday: # 价格往上跳
# 生成砖块递增K线,减小ATR变动
for i in range(0, jumpBars, 1):
upbar = copy.deepcopy(lastBar)
upbar.open = priceInYesterday + float(i * priceInBar)
upbar.low = upbar.open
upbar.close = priceInYesterday + float((i+1) * priceInBar)
upbar.high = upbar.close
upbar.volume = 0
self.lineBar.append(upbar)
self.onBar(upbar)
else: # 价格往下跳
# 生成递减K线,减小ATR变动
for i in range(0, jumpBars, 1):
downbar = copy.deepcopy(lastBar)
downbar.open = priceInYesterday - float(i * priceInBar)
downbar.high = downbar.open
downbar.close = priceInYesterday - float((i+1) * priceInBar)
downbar.low = downbar.close
downbar.volume = 0
self.lineBar.append(downbar)
self.onBar(downbar)
# 生成平移K线,减小Pdi,Mdi、ADX变动
for i in range(0, jumpBars*2, 1):
equalbar=copy.deepcopy(self.lineBar[-1])
equalbar.volume = 0
self.lineBar.append(equalbar)
self.onBar(equalbar)
# 重新指定为最后一个Bar
lastBar = self.lineBar[-1]
# 处理日内的间隔时段最后一个tick,如10:15分,11:30分,15:00 和 2:30分
endtick = False
if (tick.datetime.hour == 10 and tick.datetime.minute == 15 ) \
or (tick.datetime.hour == 11 and tick.datetime.minute == 30 ) \
or (tick.datetime.hour == 15 and tick.datetime.minute == 00 ) \
or (tick.datetime.hour == 2 and tick.datetime.minute == 30 ):
endtick = True
if self.shortSymbol in NIGHT_MARKET_SQ2 and tick.datetime.hour == 1 and tick.datetime.minute == 00:
endtick = True
if self.shortSymbol in NIGHT_MARKET_SQ3 and tick.datetime.hour == 23 and tick.datetime.minute == 00:
endtick = True
if self.shortSymbol in NIGHT_MARKET_ZZ or self.shortSymbol in NIGHT_MARKET_DL:
if tick.datetime.hour == 23 and tick.datetime.minute == 30:
endtick = True
# 满足时间要求
if (tick.datetime-lastBar.datetime).seconds >= self.barTimeInterval and not endtick:
# 创建并推入新的Bar
self.__firstTick(tick)
# 触发OnBar事件
self.onBar(lastBar)
else:
# 更新当前最后一个bar
self.barFirstTick = False
# 更新最高价、最低价、收盘价、成交量
lastBar.high = max(lastBar.high, tick.lastPrice)
lastBar.low = min(lastBar.low, tick.lastPrice)
lastBar.close = tick.lastPrice
lastBar.volume = lastBar.volume + tick.volume
# 更新Bar的颜色
if lastBar.close > lastBar.open:
lastBar.color = COLOR_RED
elif lastBar.close < lastBar.open:
lastBar.color = COLOR_BLUE
else:
lastBar.color = COLOR_EQUAL
# ----------------------------------------------------------------------
def __recountPreHighLow(self):
"""计算 K线的前周期最高和最低"""
if self.inputPreLen <= 0: # 不计算
return
# 1、lineBar满足长度才执行计算
if len(self.lineBar) < self.inputPreLen:
self.writeCtaLog('数据未充分,当前Bar数据数量:{0},计算High、Low需要:{1}'.
format(len(self.lineBar), self.inputPreLen))
return
# 2.计算前inputPreLen周期内(不包含当前周期)的Bar高点和低点
preHigh = EMPTY_FLOAT
preLow = EMPTY_FLOAT
for i in range(len(self.lineBar)-2, len(self.lineBar)-2-self.inputPreLen, -1):
if self.lineBar[i].high > preHigh or preHigh == EMPTY_FLOAT:
preHigh = self.lineBar[i].high # 前InputPreLen周期高点
if self.lineBar[i].low < preLow or preLow == EMPTY_FLOAT:
preLow = self.lineBar[i].low # 前InputPreLen周期低点
# 保存
if len(self.preHigh) > self.inputPreLen * 8:
del self.preHigh[0]
self.preHigh.append(preHigh)
# 保存
if len(self.preLow)> self.inputPreLen * 8:
del self.preLow[0]
self.preLow.append(preLow)
#----------------------------------------------------------------------
def __recountEma(self):
"""计算K线的EMA1 和EMA2"""
l = len(self.lineBar)
# 1、lineBar满足长度才执行计算
if len(self.lineBar) < max(7, self.inputEma1Len, self.inputEma2Len)+2:
self.debugCtaLog('数据未充分,当前Bar数据数量:{0},计算EMA需要:{1}'.
format(len(self.lineBar), max(7, self.inputEma1Len, self.inputEma2Len)+2))
return
# 计算第一条EMA均线
if self.inputEma1Len > 0:
if self.inputEma1Len > l:
ema1Len = l
else:
ema1Len = self.inputEma1Len
# 3、获取前InputN周期(不包含当前周期)的自适应均线
listClose=[x.close for x in self.lineBar[-ema1Len - 1:-1]]
barEma1 = ta.EMA(numpy.array(listClose, dtype=float), ema1Len)[-1]
barEma1 = round(float(barEma1), 3)
if len(self.lineEma1) > self.inputEma1Len*8:
del self.lineEma1[0]
self.lineEma1.append(barEma1)
# 计算第二条EMA均线
if self.inputEma2Len > 0:
if self.inputEma2Len > l:
ema2Len = l
else:
ema2Len = self.inputEma2Len
# 3、获取前InputN周期(不包含当前周期)的自适应均线
listClose=[x.close for x in self.lineBar[-ema2Len - 1:-1]]
barEma2 = ta.EMA(numpy.array(listClose, dtype=float), ema2Len)[-1]
barEma2 = round(float(barEma2), 3)
if len(self.lineEma2) > self.inputEma1Len*8:
del self.lineEma2[0]
self.lineEma2.append(barEma2)
def __recountDmi(self):
"""计算K线的DMI数据和条件"""
if self.inputDmiLen <= 0: # 不计算
return
# 1、lineMx满足长度才执行计算
if len(self.lineBar) < self.inputDmiLen+1:
self.debugCtaLog('数据未充分,当前Bar数据数量:{0},计算DMI需要:{1}'.format(len(self.lineBar), self.inputDmiLen+1))
return
# 2、根据当前High,Low,(不包含当前周期)重新计算TR1,PDM,MDM和ATR
barTr1 = EMPTY_FLOAT # 获取InputP周期内的价差最大值之和
barPdm = EMPTY_FLOAT # InputP周期内的做多价差之和
barMdm = EMPTY_FLOAT # InputP周期内的做空价差之和
for i in range(len(self.lineBar)-2, len(self.lineBar)-2-self.inputDmiLen, -1): # 周期 inputDmiLen
# 3.1、计算TR1
# 当前周期最高与最低的价差
high_low_spread = self.lineBar[i].high - self.lineBar[i].low
# 当前周期最高与昨收价的价差
high_preclose_spread = abs(self.lineBar[i].high - self.lineBar[i - 1].close)
# 当前周期最低与昨收价的价差
low_preclose_spread = abs(self.lineBar[i].low - self.lineBar[i - 1].close)
# 最大价差
max_spread = max(high_low_spread, high_preclose_spread, low_preclose_spread)
barTr1 = barTr1 + float(max_spread)
# 今高与昨高的价差
high_prehigh_spread = self.lineBar[i].high - self.lineBar[i - 1].high
# 昨低与今低的价差
low_prelow_spread = self.lineBar[i - 1].low - self.lineBar[i].low
# 3.2、计算周期内的做多价差之和
if high_prehigh_spread > 0 and high_prehigh_spread > low_prelow_spread:
barPdm = barPdm + high_prehigh_spread
# 3.3、计算周期内的做空价差之和
if low_prelow_spread > 0 and low_prelow_spread > high_prehigh_spread:
barMdm = barMdm + low_prelow_spread
# 6、计算上升动向指标,即做多的比率
if barTr1 == 0:
self.barPdi = 0
else:
self.barPdi = barPdm * 100 / barTr1
if len(self.linePdi) > self.inputDmiLen+1:
del self.linePdi[0]
self.linePdi.append(self.barPdi)
# 7、计算下降动向指标,即做空的比率
if barTr1 == 0:
self.barMdi = 0
else:
self.barMdi = barMdm * 100 / barTr1
# 8、计算平均趋向指标 Adx,Adxr
if self.barMdi + self.barPdi == 0:
dx = 0
else:
dx = 100 * abs(self.barMdi - self.barPdi) / (self.barMdi + self.barPdi)
if len(self.lineMdi) > self.inputDmiLen+1:
del self.lineMdi[0]
self.lineMdi.append(self.barMdi)
if len(self.lineDx) > self.inputDmiLen+1:
del self.lineDx[0]
self.lineDx.append(dx)
# 平均趋向指标,MA计算
if len(self.lineDx) < self.inputDmiLen+1:
self.barAdx = dx
else:
self.barAdx = ta.EMA(numpy.array(self.lineDx, dtype=float), self.inputDmiLen)[-1]
# 保存Adx值
if len(self.lineAdx) > self.inputDmiLen+1:
del self.lineAdx[0]
self.lineAdx.append(self.barAdx)
# 趋向平均值,为当日ADX值与1周期前的ADX值的均值
if len(self.lineAdx) == 1:
self.barAdxr = self.lineAdx[-1]
else:
self.barAdxr = (self.lineAdx[-1] + self.lineAdx[-2]) / 2
# 保存Adxr值
if len(self.lineAdxr) > self.inputDmiLen+1:
del self.lineAdxr[0]
self.lineAdxr.append(self.barAdxr)
# 7、计算A,ADX值持续高于前一周期时,市场行情将维持原趋势
if len(self.lineAdx) < 2:
self.barAdxTrend = False
elif self.lineAdx[-1] > self.lineAdx[-2]:
self.barAdxTrend = True
else:
self.barAdxTrend = False
# ADXR值持续高于前一周期时,波动率比上一周期高
if len(self.lineAdxr) < 2:
self.barAdxrTrend = False
elif self.lineAdxr[-1] > self.lineAdxr[-2]:
self.barAdxrTrend = True
else:
self.barAdxrTrend = False
# 多过滤器条件,做多趋势,ADX高于前一天,上升动向> inputDmiMax
if self.barPdi > self.barMdi and self.barAdxTrend and self.barAdxrTrend and self.barPdi >= self.inputDmiMax:
self.buyFilterCond = True
self.writeCtaLog('{0}[DEBUG]Buy Signal On Bar,Pdi:{1}>Mdi:{2},adx[-1]:{3}>Adx[-2]:{4}'
.format(self.curTick.datetime, self.barPdi, self.barMdi, self.lineAdx[-1], self.lineAdx[-2]))
else:
self.buyFilterCond = False
# 空过滤器条件 做空趋势,ADXR高于前一天,下降动向> inputMM
if self.barPdi < self.barMdi and self.barAdxTrend and self.barAdxrTrend and self.barMdi >= self.inputDmiMax:
self.sellFilterCond = True
self.writeCtaLog('{0}[DEBUG]Short Signal On Bar,Pdi:{1}<Mdi:{2},adx[-1]:{3}>Adx[-2]:{4}'
.format(self.curTick.datetime, self.barPdi, self.barMdi, self.lineAdx[-1], self.lineAdx[-2]))
else:
self.sellFilterCond = False
def __recountAtr(self):
"""计算Mx K线的各类数据和条件"""
# 1、lineMx满足长度才执行计算
maxAtrLen = max(self.inputAtr1Len, self.inputAtr2Len, self.inputAtr3Len)
if maxAtrLen <= 0: # 不计算
return
if len(self.lineBar) < maxAtrLen+1:
self.debugCtaLog('数据未充分,当前Bar数据数量:{0},计算ATR需要:{1}'.
format(len(self.lineBar), maxAtrLen+1))
return
# 首次计算
if (self.inputAtr1Len > 0 and len(self.lineAtr1) < 1) \
or (self.inputAtr2Len > 0 and len(self.lineAtr2) < 1) \
or (self.inputAtr3Len > 0 and len(self.lineAtr3) < 1):
# 根据当前High,Low,(不包含当前周期)重新计算TR1和ATR
barTr1 = EMPTY_FLOAT # 获取inputAtr1Len周期内的价差最大值之和
barTr2 = EMPTY_FLOAT # 获取inputAtr2Len周期内的价差最大值之和
barTr3 = EMPTY_FLOAT # 获取inputAtr3Len周期内的价差最大值之和
j = 0
for i in range(len(self.lineBar)-2, len(self.lineBar)-2-maxAtrLen, -1): # 周期 inputP
# 3.1、计算TR
# 当前周期最高与最低的价差
high_low_spread = self.lineBar[i].high - self.lineBar[i].low
# 当前周期最高与昨收价的价差
high_preclose_spread = abs(self.lineBar[i].high - self.lineBar[i - 1].close)
# 当前周期最低与昨收价的价差
low_preclose_spread = abs(self.lineBar[i].low - self.lineBar[i - 1].close)
# 最大价差
max_spread = max(high_low_spread, high_preclose_spread, low_preclose_spread)
if j < self.inputAtr1Len:
barTr1 = barTr1 + float(max_spread)
if j < self.inputAtr2Len:
barTr2 = barTr2 + float(max_spread)
if j < self.inputAtr3Len:
barTr3 = barTr3 + float(max_spread)
j = j + 1
else: # 只计算一个
# 当前周期最高与最低的价差
high_low_spread = self.lineBar[-2].high - self.lineBar[-2].low
# 当前周期最高与昨收价的价差
high_preclose_spread = abs(self.lineBar[-2].high - self.lineBar[-3].close)
# 当前周期最低与昨收价的价差
low_preclose_spread = abs(self.lineBar[-2].low - self.lineBar[-3].close)
# 最大价差
barTr1 = max(high_low_spread, high_preclose_spread, low_preclose_spread)
barTr2 = barTr1
barTr3 = barTr1
# 计算 ATR
if self.inputAtr1Len > 0:
if len(self.lineAtr1) < 1:
self.barAtr1 = round(barTr1 / self.inputAtr1Len, 3)
else:
self.barAtr1 = round((self.lineAtr1[-1]*(self.inputAtr1Len -1) + barTr1) / self.inputAtr1Len, 3)
if len(self.lineAtr1) > self. inputAtr1Len+1 :
del self.lineAtr1[0]
self.lineAtr1.append(self.barAtr1)
if self.inputAtr2Len > 0:
if len(self.lineAtr2) < 1:
self.barAtr2 = round(barTr2 / self.inputAtr2Len, 3)
else:
self.barAtr2 = round((self.lineAtr2[-1]*(self.inputAtr2Len -1) + barTr2) / self.inputAtr2Len, 3)
if len(self.lineAtr2) > self. inputAtr2Len+1:
del self.lineAtr2[0]
self.lineAtr2.append(self.barAtr2)
if self.inputAtr3Len > 0:
if len(self.lineAtr3) < 1:
self.barAtr3 = round(barTr3 / self.inputAtr3Len, 3)
else:
self.barAtr3 = round((self.lineAtr3[-1]*(self.inputAtr3Len -1) + barTr3) / self.inputAtr3Len, 3)
if len(self.lineAtr3) > self. inputAtr3Len+1:
del self.lineAtr3[0]
self.lineAtr3.append(self.barAtr3)
#----------------------------------------------------------------------
def __recoundAvgVol(self):
"""计算平均成交量"""
# 1、lineBar满足长度才执行计算
if self.inputVolLen <= 0: # 不计算
return
if len(self.lineBar) < self.inputVolLen+1:
self.debugCtaLog('数据未充分,当前Bar数据数量:{0},计算Avg Vol需要:{1}'.
format(len(self.lineBar), self.inputVolLen+1))
return
listVol = [x.volume for x in self.lineBar[-self.inputVolLen-1: -1]]
sumVol = ta.SUM(numpy.array(listVol, dtype=float), timeperiod=self.inputVolLen)[-1]
avgVol = round(sumVol/self.inputVolLen, 0)
self.lineAvgVol.append(avgVol)
# ----------------------------------------------------------------------
def __recountRsi(self):
"""计算K线的RSI"""
if self.inputRsiLen <= 0: return
# 1、lineBar满足长度才执行计算
if len(self.lineBar) < self.inputRsiLen+2:
self.debugCtaLog('数据未充分,当前Bar数据数量:{0},计算RSI需要:{1}'.
format(len(self.lineBar), self.inputRsiLen+2))
return
# 3、inputRsiLen(包含当前周期)的相对强弱
listClose=[x.close for x in self.lineBar[-self.inputRsiLen - 2:]]
barRsi = ta.RSI(numpy.array(listClose, dtype=float), self.inputRsiLen)[-1]
barRsi = round(float(barRsi), 3)
l = len(self.lineRsi)
if l > self.inputRsiLen*8:
del self.lineRsi[0]
self.lineRsi.append(barRsi)
if l > 3:
# 峰
if self.lineRsi[-1] < self.lineRsi[-2] and self.lineRsi[-3] < self.lineRsi[-2]:
t={}
t["Type"] = 'T'
t["RSI"] = self.lineRsi[-2]
t["Close"] = self.lineBar[-2].close
if len(self.lineRsiTop) > self.inputRsiLen:
del self.lineRsiTop[0]
self.lineRsiTop.append( t )
self.lastRsiTopButtom = self.lineRsiTop[-1]
# 谷
elif self.lineRsi[-1] > self.lineRsi[-2] and self.lineRsi[-3] > self.lineRsi[-2]:
b={}
b["Type"] = 'B'
b["RSI"] = self.lineRsi[-2]
b["Close"] = self.lineBar[-2].close
if len(self.lineRsiButtom) > self.inputRsiLen:
del self.lineRsiButtom[0]
self.lineRsiButtom.append(b)
self.lastRsiTopButtom = self.lineRsiButtom[-1]
def __recountCmi(self):
"""市场波动指数(Choppy Market Index,CMI)是一个用来判断市场走势类型的技术分析指标。
它通过计算当前收盘价与一定周期前的收盘价的差值与这段时间内价格波动的范围的比值,来判断目前的股价走势是趋势还是盘整。
市场波动指数CMI的计算公式:
CMI=(Abs(Close-ref(close,(n-1)))*100/(HHV(high,n)-LLV(low,n))
其中,Abs是绝对值。
n是周期数,例如30。
市场波动指数CMI的使用方法:
这个指标的重要用途是来区分目前的股价走势类型:盘整,趋势。当CMI指标小于20时,市场走势是盘整;当CMI指标大于20时,市场在趋势期。
CMI指标还可以用于预测股价走势类型的转变。因为物极必反,当CMI长期处于0附近,此时,股价走势很可能从盘整转为趋势;当CMI长期处于100附近,此时,股价趋势很可能变弱,形成盘整。
"""
if self.inputCmiLen <= EMPTY_INT: return
# 1、lineBar满足长度才执行计算
if len(self.lineBar) < self.inputCmiLen:
self.debugCtaLog('数据未充分,当前Bar数据数量:{0},计算CMI需要:{1}'.
format(len(self.lineBar), self.inputCmiLen))
return
listClose =[x.close for x in self.lineBar[-self.inputCmiLen:]]
hhv = max(listClose)
llv = min(listClose)
if hhv==llv:
cmi = 100
else:
cmi = abs(self.lineBar[-1].close-self.lineBar[-2].close)*100/(hhv-llv)
cmi = round(cmi, 2)
if len(self.lineCmi) > self.inputCmiLen:
del self.lineCmi[0]
self.lineCmi.append(cmi)
def __recountBoll(self):
"""布林特线"""
if self.inputBollLen < EMPTY_INT: return
l = len(self.lineBar)
if l < min(7, self.inputBollLen)+1:
self.debugCtaLog('数据未充分,当前Bar数据数量:{0},计算Boll需要:{1}'.
format(len(self.lineBar), min(7, self.inputBollLen)+1))
return
if l < self.inputBollLen+2:
bollLen = l-1
else:
bollLen = self.inputBollLen
# 不包含当前最新的Bar
listClose=[x.close for x in self.lineBar[-bollLen - 1:-1]]
#
upper, middle, lower = ta.BBANDS(numpy.array(listClose, dtype=float),
timeperiod=bollLen, nbdevup=self.inputBollStdRate,
nbdevdn=self.inputBollStdRate, matype=0)
self.lineUpperBand.append(upper[-1])
self.lineMiddleBand.append(middle[-1])
self.lineLowerBand.append(lower[-1])
# ----------------------------------------------------------------------
def writeCtaLog(self, content):
"""记录CTA日志"""
self.strategy.writeCtaLog('['+self.name+']'+content)
def debugCtaLog(self,content):
"""记录CTA日志"""
if DEBUGCTALOG:
self.strategy.writeCtaLog('['+self.name+'-DEBUG]'+content) | 34.978622 | 127 | 0.529811 |
b6dbbd58a4452f8b70067a0c9361e81b2397849b | 3,255 | py | Python | napari/_vispy/visuals/filters/material.py | donovanr/napari | 580b5eab8cc40af53aef780a65adb9216d968a32 | [
"BSD-3-Clause"
] | null | null | null | napari/_vispy/visuals/filters/material.py | donovanr/napari | 580b5eab8cc40af53aef780a65adb9216d968a32 | [
"BSD-3-Clause"
] | 1 | 2019-05-24T17:01:51.000Z | 2019-05-24T18:06:22.000Z | napari/_vispy/visuals/filters/material.py | AllenCellModeling/napari | 3566383e6310d02e8673b564b6f63411fa176708 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
"""
Material components are modular shader components used for modifying fragment
colors to change the visual's appearance.
These generally create a function in the fragment shader that accepts a vec4
color as its only argument and returns a modified vec4 color.
"""
from __future__ import division
from .component import VisualComponent
from ..shaders import Varying
class GridContourComponent(VisualComponent):
"""
Draw grid lines across a surface.
"""
SHADERS = dict(
frag_color="""
vec4 grid_contour(vec4 color) {
if ( mod($pos.x, $spacing.x) < 0.005 ||
mod($pos.y, $spacing.y) < 0.005 ||
mod($pos.z, $spacing.z) < 0.005 ) {
return color + 0.7 * (vec4(1,1,1,1) - color);
}
else {
return color;
}
}
""",
vert_post_hook="""
void grid_contour_support() {
$output_pos = local_position();
}
""")
def __init__(self, spacing):
super(GridContourComponent, self).__init__()
self.spacing = spacing
# Create Varying to connect vertex / fragment shaders
var = Varying('pos', dtype='vec4')
self._funcs['frag_color']['pos'] = var
self._funcs['vert_post_hook']['output_pos'] = var
@property
def color(self):
return self._color
@color.setter
def color(self, c):
self._color = c
def activate(self, program, mode):
ff = self._funcs['frag_color']
ff['spacing'] = self.spacing # uniform vec3
class ShadingComponent(VisualComponent):
"""
Phong reflection and shading material.
"""
SHADERS = dict(
frag_color="""
vec4 shading(vec4 color) {
vec3 norm = normalize($normal().xyz);
vec3 light = normalize($light_direction.xyz);
float p = dot(light, norm);
p = (p < 0. ? 0. : p);
vec4 diffuse = $light_color * p;
diffuse.a = 1.0;
p = dot(reflect(light, norm), vec3(0,0,1));
if (p < 0.0) {
p = 0.0;
}
vec4 specular = $light_color * 5.0 * pow(p, 100.);
return color * ($ambient + diffuse) + specular;
}
""")
def __init__(self, normal_comp, lights, ambient=0.2):
super(ShadingComponent, self).__init__()
self.normal_comp = normal_comp
self._deps = [normal_comp]
self.lights = lights
self.ambient = ambient
def activate(self, program, mode):
# Normals are generated by output of another component
ff = self._funcs['frag_color']
ff['normal'] = self.normal_comp.normal_shader()
# TODO: add support for multiple lights
ff['light_direction'] = tuple(self.lights[0][0][:3]) + (1,) # u vec4
ff['light_color'] = tuple(self.lights[0][1][:3]) + (1,) # u vec4
ff['ambient'] = self.ambient # u float
| 31.298077 | 77 | 0.55023 |
1a66cad367a6a3d705ae58dab4ac824fca586dad | 176 | py | Python | docs/conf.py | federicober/funk-lines | 04f31b78379c57bcf4e38485a6b2e15bd2b85b07 | [
"MIT"
] | 1 | 2021-03-14T17:57:42.000Z | 2021-03-14T17:57:42.000Z | docs/conf.py | federicober/funk-lines | 04f31b78379c57bcf4e38485a6b2e15bd2b85b07 | [
"MIT"
] | 138 | 2020-09-28T06:55:14.000Z | 2022-02-06T17:47:00.000Z | docs/conf.py | federicober/funk-lines | 04f31b78379c57bcf4e38485a6b2e15bd2b85b07 | [
"MIT"
] | null | null | null | """Sphinx configuration."""
project = "Funk Lines"
author = "Federico Oberndorfer"
extensions = ["sphinx.ext.autodoc", "sphinx.ext.napoleon"]
autodoc_typehints = "description"
| 29.333333 | 58 | 0.744318 |
d44ec814e03494ab006ee564b9c9b47b08ffcedd | 10,403 | py | Python | tests/test_templating.py | jetbrains-infra/k8s-handle | 5b4a30a719a439dd39ba8cecfd87df6d59e1531a | [
"Apache-2.0"
] | 152 | 2018-08-23T12:41:16.000Z | 2022-02-02T15:16:15.000Z | tests/test_templating.py | jetbrains-infra/k8s-handle | 5b4a30a719a439dd39ba8cecfd87df6d59e1531a | [
"Apache-2.0"
] | 124 | 2018-08-20T03:55:18.000Z | 2021-09-28T09:01:15.000Z | tests/test_templating.py | jetbrains-infra/k8s-handle | 5b4a30a719a439dd39ba8cecfd87df6d59e1531a | [
"Apache-2.0"
] | 32 | 2018-10-06T00:48:26.000Z | 2022-03-24T14:39:44.000Z | import os
import yaml
import shutil
import unittest
from k8s_handle import settings
from k8s_handle import config
from k8s_handle import templating
from k8s_handle.templating import TemplateRenderingError
class TestTemplating(unittest.TestCase):
def setUp(self):
settings.CONFIG_FILE = 'tests/fixtures/config.yaml'
settings.TEMPLATES_DIR = 'templates/tests'
os.environ['CUSTOM_ENV'] = 'My value'
os.environ['K8S_CONFIG_DIR'] = '/tmp/kube/'
def tearDown(self):
if os.path.exists(settings.TEMP_DIR):
shutil.rmtree(settings.TEMP_DIR)
os.environ.pop('CUSTOM_ENV')
os.environ.pop('K8S_CONFIG_DIR')
def test_renderer_init(self):
r = templating.Renderer('/tmp/test')
self.assertEqual(r._templates_dir, '/tmp/test')
def test_none_context(self):
r = templating.Renderer('templates')
with self.assertRaises(RuntimeError) as context:
r.generate_by_context(None)
self.assertTrue('Can\'t generate templates from None context' in str(context.exception), str(context.exception))
def test_generate_templates(self):
r = templating.Renderer(os.path.join(os.path.dirname(__file__), 'templates_tests'))
context = config.load_context_section('test_dirs')
r.generate_by_context(context)
file_path_1 = '{}/template1.yaml'.format(settings.TEMP_DIR)
file_path_2 = '{}/template2.yaml'.format(settings.TEMP_DIR)
file_path_3 = '{}/template3.yaml'.format(settings.TEMP_DIR)
file_path_4 = '{}/innerdir/template1.yaml'.format(settings.TEMP_DIR)
file_path_5 = '{}/template_include_file.yaml'.format(settings.TEMP_DIR)
file_path_6 = '{}/template_list_files.yaml'.format(settings.TEMP_DIR)
self.assertTrue(os.path.exists(file_path_1))
self.assertTrue(os.path.exists(file_path_2))
self.assertTrue(os.path.exists(file_path_3))
with open(file_path_1, 'r') as f:
content = f.read()
self.assertEqual(content, "{'ha_ha': 'included_var'}")
with open(file_path_2, 'r') as f:
content = f.read()
self.assertEqual(content, 'TXkgdmFsdWU=')
with open(file_path_3, 'r') as f:
content = f.read()
self.assertEqual(content, 'My value')
with open(file_path_4, 'r') as f:
content = f.read()
self.assertEqual(content, "{'ha_ha': 'included_var'}")
with open(file_path_5, 'r') as f:
content = f.read()
self.assertEqual(content, "test: |\n {{ hello world }}\n new\n line\n {{ hello world1 }}\n")
with open(file_path_6, 'r') as f:
content = f.read()
self.assertEqual(content, "test: |\n template1.yaml.j2:\n my_file.txt:\n my_file1.txt:\n ")
def test_no_templates_in_kubectl(self):
r = templating.Renderer(os.path.join(os.path.dirname(__file__), 'templates_tests'))
with self.assertRaises(RuntimeError) as context:
r.generate_by_context(config.load_context_section('no_templates'))
self.assertTrue('Templates section doesn\'t have any template items' in str(context.exception))
def test_render_not_existent_template(self):
r = templating.Renderer(os.path.join(os.path.dirname(__file__), 'templates_tests'))
with self.assertRaises(TemplateRenderingError) as context:
r.generate_by_context(config.load_context_section('not_existent_template'))
self.assertTrue('doesnotexist.yaml.j2' in str(context.exception), context.exception)
def test_generate_templates_with_kubectl_section(self):
r = templating.Renderer(os.path.join(os.path.dirname(__file__), 'templates_tests'))
context = config.load_context_section('section_with_kubectl')
r.generate_by_context(context)
file_path_1 = '{}/template1.yaml'.format(settings.TEMP_DIR)
file_path_2 = '{}/template2.yaml'.format(settings.TEMP_DIR)
file_path_3 = '{}/template3.yaml'.format(settings.TEMP_DIR)
file_path_4 = '{}/innerdir/template1.yaml'.format(settings.TEMP_DIR)
self.assertTrue(os.path.exists(file_path_1))
self.assertTrue(os.path.exists(file_path_2))
self.assertTrue(os.path.exists(file_path_3))
with open(file_path_1, 'r') as f:
content = f.read()
self.assertEqual(content, "{'ha_ha': 'included_var'}")
with open(file_path_2, 'r') as f:
content = f.read()
self.assertEqual(content, 'TXkgdmFsdWU=')
with open(file_path_3, 'r') as f:
content = f.read()
self.assertEqual(content, 'My value')
with open(file_path_4, 'r') as f:
content = f.read()
self.assertEqual(content, "{'ha_ha': 'included_var'}")
def test_io_2709(self):
r = templating.Renderer(os.path.join(os.path.dirname(__file__), 'templates_tests'))
with self.assertRaises(TemplateRenderingError) as context:
c = config.load_context_section('io_2709')
r.generate_by_context(c)
self.assertTrue('due to: \'undefined_variable\' is undefined' in str(context.exception))
def test_evaluate_tags(self):
r = templating.Renderer(os.path.join(os.path.dirname(__file__), 'templates_tests'))
tags = {'tag1', 'tag2', 'tag3'}
self.assertTrue(r._evaluate_tags(tags, only_tags=['tag1'], skip_tags=None))
self.assertFalse(r._evaluate_tags(tags, only_tags=['tag4'], skip_tags=None))
self.assertFalse(r._evaluate_tags(tags, only_tags=['tag1'], skip_tags=['tag1']))
self.assertFalse(r._evaluate_tags(tags, only_tags=None, skip_tags=['tag1']))
self.assertTrue(r._evaluate_tags(tags, only_tags=None, skip_tags=['tag4']))
tags = set()
self.assertFalse(r._evaluate_tags(tags, only_tags=['tag4'], skip_tags=None))
self.assertTrue(r._evaluate_tags(tags, only_tags=None, skip_tags=['tag4']))
def test_get_template_tags(self):
r = templating.Renderer(os.path.join(os.path.dirname(__file__), 'templates_tests'))
template_1 = {'template': 'template.yaml.j2', 'tags': ['tag1', 'tag2', 'tag3']}
template_2 = {'template': 'template.yaml.j2', 'tags': 'tag1,tag2,tag3'}
template_3 = {'template': 'template.yaml.j2', 'tags': ['tag1']}
template_4 = {'template': 'template.yaml.j2', 'tags': 'tag1'}
self.assertEqual(r._get_template_tags(template_1), {'tag1', 'tag2', 'tag3'})
self.assertEqual(r._get_template_tags(template_2), {'tag1', 'tag2', 'tag3'})
self.assertEqual(r._get_template_tags(template_3), {'tag1'})
self.assertEqual(r._get_template_tags(template_4), {'tag1'})
def test_get_template_tags_unexpected_type(self):
r = templating.Renderer(os.path.join(os.path.dirname(__file__), 'templates_tests'))
template = {'template': 'template.yaml.j2', 'tags': {'tag': 'unexpected'}}
with self.assertRaises(TypeError) as context:
r._get_template_tags(template)
self.assertTrue('unexpected type' in str(context.exception))
def test_generate_group_templates(self):
r = templating.Renderer(os.path.join(os.path.dirname(__file__), 'templates_tests'))
context = config.load_context_section('test_groups')
r.generate_by_context(context)
file_path_1 = '{}/template1.yaml'.format(settings.TEMP_DIR)
file_path_2 = '{}/template2.yaml'.format(settings.TEMP_DIR)
file_path_3 = '{}/template3.yaml'.format(settings.TEMP_DIR)
self.assertTrue(os.path.exists(file_path_1))
self.assertTrue(os.path.exists(file_path_2))
self.assertTrue(os.path.exists(file_path_3))
def test_templates_regex(self):
r = templating.Renderer(os.path.join(os.path.dirname(__file__), 'templates_tests'))
context = config.load_context_section('templates_regex')
file_path_1 = '{}/template1.yaml'.format(settings.TEMP_DIR)
file_path_2 = '{}/template2.yaml'.format(settings.TEMP_DIR)
file_path_3 = '{}/template3.yaml'.format(settings.TEMP_DIR)
file_path_4 = '{}/template4.yaml'.format(settings.TEMP_DIR)
file_path_5 = '{}/innerdir/template1.yaml'.format(settings.TEMP_DIR)
file_path_6 = '{}/template_include_file.yaml'.format(settings.TEMP_DIR)
r.generate_by_context(context)
self.assertTrue(os.path.exists(file_path_1))
self.assertFalse(os.path.exists(file_path_2))
self.assertFalse(os.path.exists(file_path_3))
self.assertFalse(os.path.exists(file_path_4))
self.assertTrue(os.path.exists(file_path_5))
self.assertFalse(os.path.exists(file_path_6))
def test_templates_regex_parse_failed(self):
r = templating.Renderer(os.path.join(os.path.dirname(__file__), 'templates_tests'))
c = config.load_context_section('templates_regex_invalid')
with self.assertRaises(TemplateRenderingError) as context:
r.generate_by_context(c)
self.assertTrue('Processing [: template [ hasn\'t been found' in str(context.exception))
def test_filters(self):
r = templating.Renderer(os.path.join(os.path.dirname(__file__), 'templates_tests'))
context = config.load_context_section('test_filters')
r.generate_by_context(context)
result = '{}/filters.yaml'.format(settings.TEMP_DIR)
with open(result, 'r') as f:
actual = yaml.safe_load(f)
self.assertEqual('aGVsbG8gd29ybGQ=', actual.get('b64encode'))
self.assertEqual('k8s-handle', actual.get('b64decode'))
self.assertEqual('8fae6dd899aace000fd494fd6d795e26e2c85bf8e59d4262ef56b03dc91e924c', actual.get('sha256'))
affinity = [
{'effect': 'NoSchedule', 'key': 'dedicated', 'operator': 'Equal', 'value': 'monitoring'},
{'effect': 'NoSchedule', 'key': 'dedicated', 'operator': 'Equal', 'value': {'hello': 'world'}}
]
self.assertEqual(affinity, actual.get('affinity'))
def test_dashes(self):
r = templating.Renderer(os.path.join(os.path.dirname(__file__), 'templates_tests'))
context = config.load_context_section('test_dashes')
r.generate_by_context(context)
result = '{}/template-dashes.yaml'.format(settings.TEMP_DIR)
with open(result, 'r') as f:
actual = yaml.safe_load(f)
self.assertEqual('do this', actual)
| 52.276382 | 120 | 0.669326 |
3ec21abf0bbdd1b7d5ebdeb472a1d5de6552e289 | 1,923 | py | Python | userbot/modules/nhentai.py | ratumelda/ratumelda | 73cc41062ccd10cdae3b964eaa41ff3068fcb4c3 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 28 | 2020-09-03T08:53:36.000Z | 2022-03-31T02:51:05.000Z | userbot/modules/nhentai.py | PerU-MoNsteR/ProjectAlf | c846dfddcb0d4266badd2b0ccfff398fceb121e9 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 13 | 2020-08-27T07:46:08.000Z | 2021-12-19T22:48:56.000Z | userbot/modules/nhentai.py | PerU-MoNsteR/ProjectAlf | c846dfddcb0d4266badd2b0ccfff398fceb121e9 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 259 | 2020-08-06T13:59:30.000Z | 2022-01-02T06:16:30.000Z | # Copyright (C) 2020 KeselekPermen69
#
# Licensed under the Raphielscape Public License, Version 1.d (the "License");
# you may not use this file except in compliance with the License.
#
from asyncio.exceptions import TimeoutError
from telethon import events
from telethon.errors.rpcerrorlist import YouBlockedUserError
from userbot import CMD_HELP, bot
from userbot.events import register
@register(outgoing=True, pattern=r"^\.nhentai(?: |$)(.*)")
async def _(hentai):
if hentai.fwd_from:
return
link = hentai.pattern_match.group(1)
if not link:
return await hentai.edit("`I can't search nothing`")
chat = "@nHentaiBot"
await hentai.edit("```Processing```")
try:
async with bot.conversation(chat) as conv:
try:
response = conv.wait_event(
events.NewMessage(incoming=True, from_users=424466890)
)
msg = await bot.send_message(chat, link)
response = await response
await bot.send_read_acknowledge(conv.chat_id)
except YouBlockedUserError:
await hentai.reply("```Please unblock @nHentaiBot and try again```")
return
if response.text.startswith("**Sorry I couldn't get manga from**"):
await hentai.edit("```I think this is not the right link```")
else:
await hentai.delete()
await bot.send_message(hentai.chat_id, response.message)
await bot.send_read_acknowledge(hentai.chat_id)
await hentai.client.delete_messages(conv.chat_id, [msg.id, response.id])
except TimeoutError:
await hentai.edit("`@nHentaiBot isnt responding..`")
await hentai.client.delete_messages(conv.chat_id, [msg.id])
CMD_HELP.update(
{"nhentai": "`.nhentai` <link / code>" "\nUsage: view nhentai in telegra.ph :v"}
)
| 36.980769 | 88 | 0.633385 |
58a875d422e7b0c1812ac5d1756d7ee200f79a3d | 986 | py | Python | nesc/whip6/platforms/tools/debugger/ti-checkjtag/checkjtag.py | wojtex/whip6-pub | 7aca863e45199f4f1354f24b1c88afd8cb34c2ba | [
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause-Clear",
"Intel",
"BSD-3-Clause"
] | 1 | 2017-02-21T16:44:56.000Z | 2017-02-21T16:44:56.000Z | nesc/whip6/platforms/tools/debugger/ti-checkjtag/checkjtag.py | wojtex/whip6-pub | 7aca863e45199f4f1354f24b1c88afd8cb34c2ba | [
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause-Clear",
"Intel",
"BSD-3-Clause"
] | 9 | 2017-02-21T16:43:31.000Z | 2021-06-10T19:28:41.000Z | nesc/whip6/platforms/tools/debugger/ti-checkjtag/checkjtag.py | wojtex/whip6-pub | 7aca863e45199f4f1354f24b1c88afd8cb34c2ba | [
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause-Clear",
"Intel",
"BSD-3-Clause"
] | 12 | 2016-12-19T12:04:17.000Z | 2020-09-17T14:44:39.000Z | #
# whip6: Warsaw High-performance IPv6.
#
# Copyright (c) 2012-2017 Szymon Acedanski
# All rights reserved.
#
# This file is distributed under the terms in the attached LICENSE
# files.
#
import os
import os.path
import sys
from build_step import BuildStep
CONFIG = 'ti jtag config file'
class CheckJTAG(BuildStep):
def __init__(self, project_root, configs, flags):
BuildStep.__init__(self, project_root, configs, flags)
def run_step(self):
config_file = self.find_config_value(CONFIG)
if not config_file:
raise RuntimeError("Platform did not define the mandatory "
"setting '%s' in build.spec" % (CONFIG,))
config_file = os.path.join(self.project_root, config_file)
config_file = os.path.abspath(config_file)
self.call('dbgjtag', '-f', config_file, '-rv', '-o',
'-F', 'inform,logfile=yes', '-S', 'pathlength', '-S', 'integrity')
# Exports the BuildStep to make it visible for smake
BuildStepImpl = CheckJTAG
| 29 | 78 | 0.693712 |
f0e86a719ff3e7c6dfa0365f2b9f1eea16c698b3 | 12,918 | py | Python | src/m1r_accumulator_examples.py | meisnehb/04-TheAccumulatorPattern | fa6c4f69ee11a6c49fc72a4c3597a6b16d847ac7 | [
"MIT"
] | null | null | null | src/m1r_accumulator_examples.py | meisnehb/04-TheAccumulatorPattern | fa6c4f69ee11a6c49fc72a4c3597a6b16d847ac7 | [
"MIT"
] | null | null | null | src/m1r_accumulator_examples.py | meisnehb/04-TheAccumulatorPattern | fa6c4f69ee11a6c49fc72a4c3597a6b16d847ac7 | [
"MIT"
] | null | null | null | """
This module demonstrates the ACCUMULATOR pattern in three classic forms:
SUMMING: total = total + number
COUNTING: count = count + 1
IN GRAPHICS: x = x + pixels
Authors: David Mutchler, Vibha Alangar, Matt Boutell, Dave Fisher, Mark Hays,
Aaron Wilkin, their colleagues, and Hannah Meisner.
""" # DONE: 1. PUT YOUR NAME IN THE ABOVE LINE.
###############################################################################
#
# DONE: 2.
# RUN this program, then READ its code.
# Then answer the following, GETTING HELP AS NEED! (Ask questions!!!)
# Write your answers in any reasonable way (your choice).
#
# For the first several questions, some students find the following
# picture helpful. (Your instructor may explain it in whole-group.)
#
# 0 1 2 3 4 ... r-1 r r+1 r+2 r+3 ... s
# |..... r numbers .....|
# |................ s+1 numbers ..................|
# Hence: |... (s+1)-r numbers ...|
#
# a. If you want a loop that runs r times,
# which of the following three choices would you use?
#
# for k in range(r - 1):
# for k in range(r): X
# for k in range(r + 1):
#
# b. If you want a loop that runs from 0 to s, inclusive,
# what expression would you use in the _____ below?
#
# for k in range(s + 1):
#
# c. If you want a loop that runs from r to s, inclusive, assuming s >= r,
# what expression would you use in the _____ below?
#
# for k in range(s + 1 - r):
#
# d. If you want a loop that runs from (r + 4) to (s - 10),
# including the (r + 4) but not including the (s - 10),
# what expression would you use in the _____ below?
#
# for k in range((s-10)-(r+4)):
#
# e. The following code snippet attempts to return the number
# of integers from r to s, inclusive, whose cosines are positive.
# It has at least 5 distinct errors (one per line). What are they?
#
# for k in range(r-s): count = 0
# count = 0 for k in range((s + 1) - r):
# if math.cos(r) > 0: if math.cos(k+r) > 0:
# count = 1 count = count + 1
# return count return count
#
# f. The code in the "graphics accumulation" example below includes:
# for _ in range(n):
# What does the _ (underscore) mean?
# you can ignore this variable, like 'k'
# g. The code in the "graphics accumulation" example below includes:
#
# x = starting_point.x
# for _ in range(n):
# center = rg.Point(x, y)
# circle = rg.Circle(point, radius)
# circle.attach_to(window)
# x = x + diameter
#
# If you want the row-of-circles that the above creates,
# one of the following two attempts is a CORRECT attempt
# (i.e., is equivalent in its functionality to the above)
# and one is WRONG. Which is the WRONG one?
#
# first one is correct, second one is incorrect
#
# x = starting_point.x
# for k in range(n):
# center = rg.Point(x + (k * diameter), y)
# circle = rg.Circle(point, radius)
# circle.attach_to(window)
#
# x = starting_point.x
# for k in range(n):
# center = rg.Point(x + (k * diameter), y)
# circle = rg.Circle(point, radius)
# circle.attach_to(window)
# x = x + (2 * radius)
#
###############################################################################
# *** MAKE SURE YOU UNDERSTAND THE 3 ACCUMULATOR PATTERNS ***
# *** shown in this module: SUMMING, COUNTING, and IN GRAPHICS ***
###############################################################################
#
# When you are confident that you understand the 3 accumulator patterns
# and have correct answers to the above questions (ASK QUESTIONS AS NEEDED!),
# check your work by asking a student assistant to look at your answers.
#
# After checking your work (making corrections as needed),
# change the above _TODO_ to DONE.
#
###############################################################################
import rosegraphics as rg
import math
def main():
""" Calls the TEST functions in this module. """
run_test_summing_example()
run_test_counting_example()
run_test_draw_row_of_circles()
def run_test_summing_example():
""" Tests the summing_example function. """
print()
print('--------------------------------------------------')
print('Testing the summing_example function:')
print('--------------------------------------------------')
# Test 1:
expected = 100
answer = summing_example(4)
print('Test 1 expected:', expected)
print(' actual: ', answer)
# Test 2:
expected = 44100
answer = summing_example(20)
print('Test 2 expected:', expected)
print(' actual: ', answer)
# Test 3:
expected = 0
answer = summing_example(0)
print('Test 3 expected:', expected)
print(' actual: ', answer)
def summing_example(n):
"""
What comes in: The sole argument is a non-negative integer n.
What goes out: Returns the sum
(1 cubed) + (2 cubed) + (3 cubed) + ... + (n cubed).
Side effects: None.
Examples:
-- If the integer is 4,
this function returns (1 + 8 + 27 + 64), which is 100.
-- If the integer is 20, this function returns 44,100.
"""
total = 0 # Initialize to 0 BEFORE the loop
for k in range(n): # Loop
total = total + ((k + 1) ** 3) # Accumulate INSIDE the loop.
return total # Return the result AFTER the loop
def run_test_counting_example():
""" Tests the counting_example function. """
print()
print('--------------------------------------------------')
print('Testing the counting_example function:')
print('--------------------------------------------------')
# Test 1:
expected = 2
answer = counting_example(2)
print('Test 1 expected:', expected)
print(' actual: ', answer)
# Test 2:
expected = 12
answer = counting_example(20)
print('Test 2 expected:', expected)
print(' actual: ', answer)
# Test 3:
expected = 1
answer = counting_example(0)
print('Test 3 expected:', expected)
print(' actual: ', answer)
def counting_example(n):
"""
What comes in: The sole argument is a non-negative integer n.
What goes out: Returns the number of integers from 0 to n,
inclusive, whose cosine is positive.
Side effects: None.
Examples:
-- counting_example(2) returns 2
since the cosine(0) is 1 (positive)
and the cosine(1) is about 0.54 (positive)
and the cosine(2) is about -0.42 (negative)
-- counting_example(20) returns 12
since the cosines of 0, 1, 5, 6, 7, 11, 12, 13, 14, 18, 19 and 20
are positive
-- counting_example(0) returns 1
since the cosine(0) is positive.
"""
count = 0 # Initialize to 0 BEFORE the loop
for k in range(n + 1): # Loop
if math.cos(k) > 0: # If the condition holds:
count = count + 1 # Increment INSIDE the loop.
return count # Return the result AFTER the loop
def run_test_draw_row_of_circles():
""" Tests the draw_row_of_circles function. """
print()
print('--------------------------------------------------')
print('Testing the draw_row_of_circles function:')
print(' See the graphics windows that pop up.')
print('--------------------------------------------------')
# -------------------------------------------------------------------------
# TWO tests on ONE window.
# -------------------------------------------------------------------------
title = 'Tests 1 and 2 of DRAW_ROW_OF_CIRCLES:'
title = title + ' 7 GREEN circles, 4 BLUE circles!'
window1 = rg.RoseWindow(500, 250, title)
# Test 1:
center = rg.Point(50, 50)
draw_row_of_circles(7, center, 'green', window1)
# Test 2:
center = rg.Point(100, 150)
draw_row_of_circles(4, center, 'blue', window1)
window1.close_on_mouse_click()
# -------------------------------------------------------------------------
# A third test on ANOTHER window.
# -------------------------------------------------------------------------
title = 'Test 3 of DRAW_ROW_OF_CIRCLES: Row of 12 RED circles!'
window2 = rg.RoseWindow(600, 150, title)
# Test 3:
center = rg.Point(50, 50)
draw_row_of_circles(12, center, 'red', window2)
window2.close_on_mouse_click()
def draw_row_of_circles(n, starting_point, color, window):
"""
What comes in: The four arguments are:
-- A positive integer n.
-- An rg.Point.
-- A color appropriate for rosegraphics (e.g. 'red')
-- An rg.RoseWindow.
What goes out: Nothing (i.e., None).
Side effects:
Draws n rg.Circle objects in a row,
all on the given rg.RoseWindow, such that:
-- The first rg.Circle is centered at the given starting_point.
-- Each rg.Circle just touches the previous one (to its left).
-- Each rg.Circle has radius 20.
-- Each rg.Circle is filled with the given color.
Must ** render ** but ** NOT close ** the rg.RoseWindow.
Type hints:
:type n: int
:type starting_point: rg.Point
:type color: str
:type window: rg.RoseWindow
"""
# -------------------------------------------------------------------------
# The example below shows one way to solve problems using
# HELPER variables (aka AUXILIARY variables)
# In this approach:
# 1. You determine all the variables that you need
# to construct/draw whatever the problem calls for.
# We call these HELPER variables.
# 2. You initialize them BEFORE the loop, choosing values that
# make them just right for constructing and drawing the
# FIRST object to be drawn, in the FIRST time through the loop.
# For example, x = starting_point.x in the example below.
# 3. You determine how many times the loop should run
# (generally, however many objects you want to draw)
# and write the FOR statement for the loop.
# For example, for _ in range(n): in the example below.
# 4. Inside the loop you write the statements to construct and
# draw the FIRST object to be drawn, using your helper
# variables. This is easy because you chose just the right
# values for those helper variables for this FIRST object.
# 5. Test: Make sure the FIRST object appears.
# (It will be redrawn many times, that is OK).
# 6. Add code at the BOTTOM of the loop that changes the helper
# variables appropriately for the NEXT time through the loop.
# For example, x = x + diameter in the example below.
# 7. Test and fix as needed.
#
# Many students (and professionals) find this technique less
# error-prone that using the loop variable to do all the work.
# -------------------------------------------------------------------------
radius = 20
diameter = 2 * radius
x = starting_point.x # Initialize x and y BEFORE the loop. Choose ...
y = starting_point.y # ... values that make the FIRST object easy to draw.
for _ in range(n): # Loop that does NOT use its index variable
# ---------------------------------------------------------------------
# Construct the relevant object(s),
# based on the current x, y and other variables.
# ---------------------------------------------------------------------
center = rg.Point(x, y)
circle = rg.Circle(center, radius)
circle.fill_color = color
# Attach the object(s) to the window.
circle.attach_to(window)
# ---------------------------------------------------------------------
# Increment x (and in other problems, other variables)
# for the thing(s) to draw in the NEXT iteration of the loop.
# ---------------------------------------------------------------------
x = x + diameter
window.render()
# -----------------------------------------------------------------------------
# Calls main to start the ball rolling.
# -----------------------------------------------------------------------------
main()
| 38.332344 | 80 | 0.515482 |
cad86a7243e528c06f55303f3ff41fefe2d757f3 | 4,241 | py | Python | devito/passes/clusters/cse.py | dabiged/devito | 3cea137538b641bb1788fde65176ffcf82ab0990 | [
"MIT"
] | null | null | null | devito/passes/clusters/cse.py | dabiged/devito | 3cea137538b641bb1788fde65176ffcf82ab0990 | [
"MIT"
] | 3 | 2020-11-30T05:38:22.000Z | 2022-03-07T14:02:05.000Z | devito/passes/clusters/cse.py | maelso/devito | d45932b08733ea7437e6a83fff816ee20d3fbada | [
"MIT"
] | null | null | null | from collections import OrderedDict
from devito.ir import DummyEq, Cluster, Scope
from devito.passes.clusters.utils import cluster_pass, makeit_ssa
from devito.symbolics import count, estimate_cost, q_xop, q_leaf, uxreplace
from devito.types import Scalar
__all__ = ['cse']
@cluster_pass
def cse(cluster, template, *args):
"""
Common sub-expressions elimination (CSE).
"""
make = lambda: Scalar(name=template(), dtype=cluster.dtype).indexify()
processed = _cse(cluster.exprs, make)
return cluster.rebuild(processed)
def _cse(maybe_exprs, make, mode='default'):
"""
Main common sub-expressions elimination routine.
Note: the output is guaranteed to be topologically sorted.
Parameters
----------
maybe_exprs : expr-like or list of expr-like or Cluster
One or more expressions to which CSE is applied.
make : callable
Build symbols to store temporary, redundant values.
mode : str, optional
The CSE algorithm applied. Accepted: ['default'].
"""
# Note: not defaulting to SymPy's CSE() function for three reasons:
# - it also captures array index access functions (eg, i+1 in A[i+1] and B[i+1]);
# - it sometimes "captures too much", losing factorization opportunities;
# - very slow
# TODO: a second "sympy" mode will be provided, relying on SymPy's CSE() but
# also ensuring some form of post-processing
assert mode == 'default' # Only supported mode ATM
# Just for flexibility, accept either Clusters or exprs
if isinstance(maybe_exprs, Cluster):
cluster = maybe_exprs
processed = list(cluster.exprs)
scope = cluster.scope
else:
processed = list(maybe_exprs)
scope = Scope(maybe_exprs)
# Some sub-expressions aren't really "common" -- that's the case of Dimension-
# independent data dependences. For example:
#
# ... = ... a[i] + 1 ...
# a[i] = ...
# ... = ... a[i] + 1 ...
#
# `a[i] + 1` will be excluded, as there's a flow Dimension-independent data
# dependence involving `a`
exclude = {i.source.indexed for i in scope.d_flow.independent()}
mapped = []
while True:
# Detect redundancies
counted = count(mapped + processed, q_xop).items()
targets = OrderedDict([(k, estimate_cost(k, True)) for k, v in counted if v > 1])
# Rule out Dimension-independent data dependencies
targets = OrderedDict([(k, v) for k, v in targets.items()
if not k.free_symbols & exclude])
if not targets:
break
# Create temporaries
hit = max(targets.values())
picked = [k for k, v in targets.items() if v == hit]
mapper = OrderedDict([(e, make()) for i, e in enumerate(picked)])
# Apply replacements
processed = [uxreplace(e, mapper) for e in processed]
mapped = [uxreplace(e, mapper) for e in mapped]
mapped = [DummyEq(v, k) for k, v in reversed(list(mapper.items()))] + mapped
# Update `exclude` for the same reasons as above -- to rule out CSE across
# Dimension-independent data dependences
exclude.update({i for i in mapper.values()})
# Prepare for the next round
for k in picked:
targets.pop(k)
processed = mapped + processed
# At this point we may have useless temporaries (e.g., r0=r1). Let's drop them
processed = _compact_temporaries(processed)
return processed
def _compact_temporaries(exprs):
"""
Drop temporaries consisting of isolated symbols.
"""
# First of all, convert to SSA
exprs = makeit_ssa(exprs)
# What's gonna be dropped
mapper = {e.lhs: e.rhs for e in exprs
if e.lhs.is_Symbol and (q_leaf(e.rhs) or e.rhs.is_Function)}
processed = []
for e in exprs:
if e.lhs not in mapper:
# The temporary is retained, and substitutions may be applied
expr = e
while True:
handle = uxreplace(expr, mapper)
if handle == expr:
break
else:
expr = handle
processed.append(handle)
return processed
| 32.875969 | 89 | 0.621552 |
cdfe2fe56719800b955b1069a20dbd45eb6587ae | 2,690 | py | Python | homeassistant/components/light/rfxtrx.py | don66/home-assistant | a277470363c0758bb305410aad49c257ff8bac40 | [
"Apache-2.0"
] | 37 | 2018-05-22T07:17:26.000Z | 2022-03-03T13:14:46.000Z | homeassistant/components/light/rfxtrx.py | don66/home-assistant | a277470363c0758bb305410aad49c257ff8bac40 | [
"Apache-2.0"
] | 125 | 2018-12-11T07:31:20.000Z | 2021-07-27T08:20:03.000Z | homeassistant/components/light/rfxtrx.py | don66/home-assistant | a277470363c0758bb305410aad49c257ff8bac40 | [
"Apache-2.0"
] | 8 | 2018-05-30T20:05:26.000Z | 2021-02-19T14:17:05.000Z | """
Support for RFXtrx lights.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/light.rfxtrx/
"""
import logging
import voluptuous as vol
import homeassistant.components.rfxtrx as rfxtrx
from homeassistant.components.light import (
ATTR_BRIGHTNESS, SUPPORT_BRIGHTNESS, Light, PLATFORM_SCHEMA)
from homeassistant.const import CONF_NAME
from homeassistant.components.rfxtrx import (
CONF_AUTOMATIC_ADD, CONF_FIRE_EVENT, DEFAULT_SIGNAL_REPETITIONS,
CONF_SIGNAL_REPETITIONS, CONF_DEVICES)
from homeassistant.helpers import config_validation as cv
DEPENDENCIES = ['rfxtrx']
_LOGGER = logging.getLogger(__name__)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_DEVICES, default={}): {
cv.string: vol.Schema({
vol.Required(CONF_NAME): cv.string,
vol.Optional(CONF_FIRE_EVENT, default=False): cv.boolean
})
},
vol.Optional(CONF_AUTOMATIC_ADD, default=False): cv.boolean,
vol.Optional(CONF_SIGNAL_REPETITIONS, default=DEFAULT_SIGNAL_REPETITIONS):
vol.Coerce(int),
})
SUPPORT_RFXTRX = SUPPORT_BRIGHTNESS
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the RFXtrx platform."""
import RFXtrx as rfxtrxmod
lights = rfxtrx.get_devices_from_config(config, RfxtrxLight)
add_devices(lights)
def light_update(event):
"""Handle light updates from the RFXtrx gateway."""
if not isinstance(event.device, rfxtrxmod.LightingDevice) or \
not event.device.known_to_be_dimmable:
return
new_device = rfxtrx.get_new_device(event, config, RfxtrxLight)
if new_device:
add_devices([new_device])
rfxtrx.apply_received_command(event)
# Subscribe to main RFXtrx events
if light_update not in rfxtrx.RECEIVED_EVT_SUBSCRIBERS:
rfxtrx.RECEIVED_EVT_SUBSCRIBERS.append(light_update)
class RfxtrxLight(rfxtrx.RfxtrxDevice, Light):
"""Representation of a RFXtrx light."""
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
return self._brightness
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_RFXTRX
def turn_on(self, **kwargs):
"""Turn the light on."""
brightness = kwargs.get(ATTR_BRIGHTNESS)
if brightness is None:
self._brightness = 255
self._send_command('turn_on')
else:
self._brightness = brightness
_brightness = (brightness * 100 // 255)
self._send_command('dim', _brightness)
| 31.27907 | 78 | 0.701115 |
7887e7c1e0e4d8d0fcd7c025d784eceb8b34bddd | 358 | py | Python | Algorithms/Solve Me First.py | lin826/Hackerrank-Codes | 6b2b83dd1f6a7b92b575d8f8d4d1dbe4cb845403 | [
"MIT"
] | 70 | 2020-10-04T09:23:15.000Z | 2022-02-01T09:44:39.000Z | Algorithms/Solve Me First.py | lin826/Hackerrank-Codes | 6b2b83dd1f6a7b92b575d8f8d4d1dbe4cb845403 | [
"MIT"
] | 148 | 2020-06-05T15:32:12.000Z | 2020-11-01T08:29:01.000Z | Algorithms/Solve Me First.py | lin826/Hackerrank-Codes | 6b2b83dd1f6a7b92b575d8f8d4d1dbe4cb845403 | [
"MIT"
] | 298 | 2020-10-04T04:27:01.000Z | 2022-03-07T04:02:59.000Z | '''
Complete the function solveMeFirst to compute the sum of two integers.
Function prototype:
int solveMeFirst(int a, int b);
where,
a is the first integer input.
b is the second integer input
Return values
sum of the above two integers
Sample Input
a = 2
b = 3
Sample Output
5
'''
num1 = int(input())
num2 = int(input())
s = num1 + num2
print(s) | 11.933333 | 70 | 0.709497 |
09fd45bcd48e64b00bc7a749222280ea5e9052bb | 1,682 | py | Python | setup.py | flowersteam/TeachMyAgent | a8f71cbfce4cb8ca6da24d00ea690495e3afbd2e | [
"MIT"
] | 45 | 2021-03-19T00:16:57.000Z | 2022-03-20T14:02:18.000Z | setup.py | flowersteam/TeachMyAgent | a8f71cbfce4cb8ca6da24d00ea690495e3afbd2e | [
"MIT"
] | 5 | 2021-04-26T06:21:10.000Z | 2021-12-24T02:57:02.000Z | setup.py | flowersteam/TeachMyAgent | a8f71cbfce4cb8ca6da24d00ea690495e3afbd2e | [
"MIT"
] | 5 | 2021-03-23T20:21:14.000Z | 2022-03-22T14:55:11.000Z | from setuptools import setup
import sys
assert sys.version_info.major == 3 and sys.version_info.minor in [6, 7] , \
"This repo is designed to work with Python 3.6 or 3.7." \
+ "Please install it before proceeding."
setup(
name='TeachMyAgent',
py_modules=['TeachMyAgent'],
version="1.0",
install_requires=[
'cloudpickle==1.2.0',
'gym[atari,box2d,classic_control]>=0.10.8',
'ipython',
'joblib',
'matplotlib',
'numpy',
'pandas',
'pytest',
'psutil',
'scipy',
'sklearn',
'imageio',
'seaborn==0.8.1',
'dm-sonnet<2',
'tensorflow-probability==0.7.0',
'torch==1.4.0',
'setuptools',
'setuptools_scm',
'pep517',
'treelib',
'gizeh',
'tqdm',
'emcee',
'notebook'
],
description="TeachMyAgent: A benchmark to study and compare ACL algorithms for DeepRL in continuous procedural environments.",
author="Clément Romac",
)
# ensure there is some tensorflow build with version above 1.4
import pkg_resources
import re
tf_pkg = None
for tf_pkg_name in ['tensorflow', 'tensorflow-gpu', 'tf-nightly', 'tf-nightly-gpu']:
try:
tf_pkg = pkg_resources.get_distribution(tf_pkg_name)
except pkg_resources.DistributionNotFound:
pass
assert tf_pkg is not None, 'TensorFlow needed, of version above 1.4'
from distutils.version import LooseVersion
tf_version = LooseVersion(re.sub(r'-?rc\d+$', '', tf_pkg.version))
assert tf_version >= LooseVersion('1.4.0') and tf_version <= LooseVersion('1.15.5'), \
'TensorFlow version between 1.4 and 1.15.5 required'
| 30.035714 | 130 | 0.622473 |
71b28dc2464d25510a6231d91649162bf32c9cbd | 1,430 | py | Python | pointcnn_cls/jbb_b128_s0512_e0512_v500.py | hft-ba/PointCNN | d99086ba4eac2357c6330d23891a4a81f1b4162f | [
"MIT"
] | null | null | null | pointcnn_cls/jbb_b128_s0512_e0512_v500.py | hft-ba/PointCNN | d99086ba4eac2357c6330d23891a4a81f1b4162f | [
"MIT"
] | null | null | null | pointcnn_cls/jbb_b128_s0512_e0512_v500.py | hft-ba/PointCNN | d99086ba4eac2357c6330d23891a4a81f1b4162f | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import os
import sys
import math
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import data_utils
load_fn = data_utils.load_cls_train_val
balance_fn = None
map_fn = None
keep_remainder = True
save_ply_fn = None
num_class = 40
batch_size = 128
sample_num = 512
num_epochs = 512
step_val = 500
learning_rate_base = 0.01
decay_steps = 8000
decay_rate = 0.5
learning_rate_min = 1e-6
weight_decay = 1e-5
jitter = 0.0
jitter_val = 0.0
jitter_test = 0.0
rotation_range = [0, 0, 0, 'u']
rotation_range_val = [0, 0, 0, 'u']
rotation_range_test = [0, 0, 0, 'u']
rotation_order = 'rxyz'
scaling_range = [0, 0, 0, 'g']
scaling_range_val = [0, 0, 0, 'u']
scaling_range_test = [0, 0, 0, 'u']
sample_num_variance = 1 // 8
sample_num_clip = 1 // 4
x = 3
xconv_param_name = ('K', 'D', 'P', 'C', 'links')
xconv_params = [dict(zip(xconv_param_name, xconv_param)) for xconv_param in
[(8, 1, -1, 16 * x, []),
(12, 2, 384, 32 * x, []),
(16, 2, 128, 64 * x, []),
(16, 3, 128, 128 * x, [])]]
with_global = True
fc_param_name = ('C', 'dropout_rate')
fc_params = [dict(zip(fc_param_name, fc_param)) for fc_param in
[(128 * x, 0.0),
(64 * x, 0.8)]]
sampling = 'random'
optimizer = 'adam'
epsilon = 1e-2
data_dim = 6
use_extra_features = False
with_X_transformation = True
sorting_method = None
| 19.589041 | 76 | 0.63007 |
513e1a2cb3bb576c8a46555559c480487a598679 | 2,898 | py | Python | prepare/gamma_decode_gen_py.py | nick-lifx/hsbk_rgb | d48d4b18c9725c7654b032234523a6b87f6f0e9b | [
"MIT"
] | 2 | 2020-07-25T07:16:02.000Z | 2021-01-12T11:38:25.000Z | prepare/gamma_decode_gen_py.py | nick-lifx/hsbk_rgb | d48d4b18c9725c7654b032234523a6b87f6f0e9b | [
"MIT"
] | null | null | null | prepare/gamma_decode_gen_py.py | nick-lifx/hsbk_rgb | d48d4b18c9725c7654b032234523a6b87f6f0e9b | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2020 Nick Downing
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# put utils into path
# temporary until we have proper Python packaging
import os.path
import sys
dirname = os.path.dirname(__file__)
sys.path.append(os.path.join(dirname, '..'))
import mpmath
import numpy
import utils.yaml_io
EXIT_SUCCESS = 0
EXIT_FAILURE = 1
mpmath.mp.prec = 106
#numpy.set_printoptions(threshold = numpy.inf)
if len(sys.argv) < 3:
print(f'usage: {sys.argv[0]:s} gamma_decode_fit_in.yml device')
sys.exit(EXIT_FAILURE)
gamma_decode_fit_in = sys.argv[1]
device = sys.argv[2]
gamma_decode_fit = utils.yaml_io._import(
utils.yaml_io.read_file(gamma_decode_fit_in)
)
gamma_a = gamma_decode_fit['gamma_a']
gamma_b = gamma_decode_fit['gamma_b']
gamma_c = gamma_decode_fit['gamma_c']
gamma_d = gamma_decode_fit['gamma_d']
gamma_e = gamma_decode_fit['gamma_e']
p = gamma_decode_fit['p']
err = gamma_decode_fit['err']
exp0 = gamma_decode_fit['exp0']
exp1 = gamma_decode_fit['exp1']
post_factor = gamma_decode_fit['post_factor']
p = numpy.array(p, numpy.double)
sys.stdout.write(
sys.stdin.read().format(
post_factor = ','.join(
[
f'\n {post_factor[i]:.16e}'
for i in range(post_factor.shape[0])
]
),
str_gamma_a = str(gamma_a),
str_gamma_b = str(gamma_b),
str_gamma_c = str(gamma_c),
str_gamma_d = str(gamma_d),
str_gamma_e = str(gamma_e),
two_minus_gamma_c = str(2. - gamma_c),
err = err,
device = device,
gamma_a_gamma_b = gamma_a * gamma_b,
gamma_a_recip = 1. / gamma_a,
gamma_c = gamma_c,
exp1_plus_one = exp1 + 1,
p_last = p[-1],
p = ''.join(
[
' y = y * x {0:s} {1:.16e}\n'.format(
'-' if p[i] < 0. else '+',
abs(p[i])
)
for i in range(p.shape[0] - 2, -1, -1)
]
),
minus_exp0 = -exp0
)
)
| 30.1875 | 79 | 0.697378 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.