code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1
value |
|---|---|---|---|---|---|
import datetime
__all__ = ["easter", "EASTER_JULIAN", "EASTER_ORTHODOX", "EASTER_WESTERN"]
EASTER_JULIAN = 1
EASTER_ORTHODOX = 2
EASTER_WESTERN = 3
def easter(year, method=EASTER_WESTERN):
"""
This method was ported from the work done by GM Arts,
on top of the algorithm by Claus Tondering, which was
based in part on the algorithm of Ouding (1940), as
quoted in "Explanatory Supplement to the Astronomical
Almanac", P. Kenneth Seidelmann, editor.
This algorithm implements three different Easter
calculation methods:
1. Original calculation in Julian calendar, valid in
dates after 326 AD
2. Original method, with date converted to Gregorian
calendar, valid in years 1583 to 4099
3. Revised method, in Gregorian calendar, valid in
years 1583 to 4099 as well
These methods are represented by the constants:
* ``EASTER_JULIAN = 1``
* ``EASTER_ORTHODOX = 2``
* ``EASTER_WESTERN = 3``
The default method is method 3.
More about the algorithm may be found at:
`GM Arts: Easter Algorithms <http://www.gmarts.org/index.php?go=415>`_
and
`The Calendar FAQ: Easter <https://www.tondering.dk/claus/cal/easter.php>`_
"""
if not (1 <= method <= 3):
raise ValueError("invalid method")
# g - Golden year - 1
# c - Century
# h - (23 - Epact) mod 30
# i - Number of days from March 21 to Paschal Full Moon
# j - Weekday for PFM (0=Sunday, etc)
# p - Number of days from March 21 to Sunday on or before PFM
# (-6 to 28 methods 1 & 3, to 56 for method 2)
# e - Extra days to add for method 2 (converting Julian
# date to Gregorian date)
y = year
g = y % 19
e = 0
if method < 3:
# Old method
i = (19*g + 15) % 30
j = (y + y//4 + i) % 7
if method == 2:
# Extra dates to convert Julian to Gregorian date
e = 10
if y > 1600:
e = e + y//100 - 16 - (y//100 - 16)//4
else:
# New method
c = y//100
h = (c - c//4 - (8*c + 13)//25 + 19*g + 15) % 30
i = h - (h//28)*(1 - (h//28)*(29//(h + 1))*((21 - g)//11))
j = (y + y//4 + i + 2 - c + c//4) % 7
# p can be from -6 to 56 corresponding to dates 22 March to 23 May
# (later dates apply to method 2, although 23 May never actually occurs)
p = i - j + e
d = 1 + (p + 27 + (p + 6)//40) % 31
m = 3 + (p + 26)//30
return datetime.date(int(y), int(m), int(d)) | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/vendored/robocorp_ls_core/libs/robocop_lib/dateutil/easter.py | 0.599133 | 0.386156 | easter.py | pypi |
import datetime
import calendar
import operator
from math import copysign
from six import integer_types
from warnings import warn
from ._common import weekday
MO, TU, WE, TH, FR, SA, SU = weekdays = tuple(weekday(x) for x in range(7))
__all__ = ["relativedelta", "MO", "TU", "WE", "TH", "FR", "SA", "SU"]
class relativedelta(object):
"""
The relativedelta type is designed to be applied to an existing datetime and
can replace specific components of that datetime, or represents an interval
of time.
It is based on the specification of the excellent work done by M.-A. Lemburg
in his
`mx.DateTime <https://www.egenix.com/products/python/mxBase/mxDateTime/>`_ extension.
However, notice that this type does *NOT* implement the same algorithm as
his work. Do *NOT* expect it to behave like mx.DateTime's counterpart.
There are two different ways to build a relativedelta instance. The
first one is passing it two date/datetime classes::
relativedelta(datetime1, datetime2)
The second one is passing it any number of the following keyword arguments::
relativedelta(arg1=x,arg2=y,arg3=z...)
year, month, day, hour, minute, second, microsecond:
Absolute information (argument is singular); adding or subtracting a
relativedelta with absolute information does not perform an arithmetic
operation, but rather REPLACES the corresponding value in the
original datetime with the value(s) in relativedelta.
years, months, weeks, days, hours, minutes, seconds, microseconds:
Relative information, may be negative (argument is plural); adding
or subtracting a relativedelta with relative information performs
the corresponding arithmetic operation on the original datetime value
with the information in the relativedelta.
weekday:
One of the weekday instances (MO, TU, etc) available in the
relativedelta module. These instances may receive a parameter N,
specifying the Nth weekday, which could be positive or negative
(like MO(+1) or MO(-2)). Not specifying it is the same as specifying
+1. You can also use an integer, where 0=MO. This argument is always
relative e.g. if the calculated date is already Monday, using MO(1)
or MO(-1) won't change the day. To effectively make it absolute, use
it in combination with the day argument (e.g. day=1, MO(1) for first
Monday of the month).
leapdays:
Will add given days to the date found, if year is a leap
year, and the date found is post 28 of february.
yearday, nlyearday:
Set the yearday or the non-leap year day (jump leap days).
These are converted to day/month/leapdays information.
There are relative and absolute forms of the keyword
arguments. The plural is relative, and the singular is
absolute. For each argument in the order below, the absolute form
is applied first (by setting each attribute to that value) and
then the relative form (by adding the value to the attribute).
The order of attributes considered when this relativedelta is
added to a datetime is:
1. Year
2. Month
3. Day
4. Hours
5. Minutes
6. Seconds
7. Microseconds
Finally, weekday is applied, using the rule described above.
For example
>>> from datetime import datetime
>>> from dateutil.relativedelta import relativedelta, MO
>>> dt = datetime(2018, 4, 9, 13, 37, 0)
>>> delta = relativedelta(hours=25, day=1, weekday=MO(1))
>>> dt + delta
datetime.datetime(2018, 4, 2, 14, 37)
First, the day is set to 1 (the first of the month), then 25 hours
are added, to get to the 2nd day and 14th hour, finally the
weekday is applied, but since the 2nd is already a Monday there is
no effect.
"""
def __init__(self, dt1=None, dt2=None,
years=0, months=0, days=0, leapdays=0, weeks=0,
hours=0, minutes=0, seconds=0, microseconds=0,
year=None, month=None, day=None, weekday=None,
yearday=None, nlyearday=None,
hour=None, minute=None, second=None, microsecond=None):
if dt1 and dt2:
# datetime is a subclass of date. So both must be date
if not (isinstance(dt1, datetime.date) and
isinstance(dt2, datetime.date)):
raise TypeError("relativedelta only diffs datetime/date")
# We allow two dates, or two datetimes, so we coerce them to be
# of the same type
if (isinstance(dt1, datetime.datetime) !=
isinstance(dt2, datetime.datetime)):
if not isinstance(dt1, datetime.datetime):
dt1 = datetime.datetime.fromordinal(dt1.toordinal())
elif not isinstance(dt2, datetime.datetime):
dt2 = datetime.datetime.fromordinal(dt2.toordinal())
self.years = 0
self.months = 0
self.days = 0
self.leapdays = 0
self.hours = 0
self.minutes = 0
self.seconds = 0
self.microseconds = 0
self.year = None
self.month = None
self.day = None
self.weekday = None
self.hour = None
self.minute = None
self.second = None
self.microsecond = None
self._has_time = 0
# Get year / month delta between the two
months = (dt1.year - dt2.year) * 12 + (dt1.month - dt2.month)
self._set_months(months)
# Remove the year/month delta so the timedelta is just well-defined
# time units (seconds, days and microseconds)
dtm = self.__radd__(dt2)
# If we've overshot our target, make an adjustment
if dt1 < dt2:
compare = operator.gt
increment = 1
else:
compare = operator.lt
increment = -1
while compare(dt1, dtm):
months += increment
self._set_months(months)
dtm = self.__radd__(dt2)
# Get the timedelta between the "months-adjusted" date and dt1
delta = dt1 - dtm
self.seconds = delta.seconds + delta.days * 86400
self.microseconds = delta.microseconds
else:
# Check for non-integer values in integer-only quantities
if any(x is not None and x != int(x) for x in (years, months)):
raise ValueError("Non-integer years and months are "
"ambiguous and not currently supported.")
# Relative information
self.years = int(years)
self.months = int(months)
self.days = days + weeks * 7
self.leapdays = leapdays
self.hours = hours
self.minutes = minutes
self.seconds = seconds
self.microseconds = microseconds
# Absolute information
self.year = year
self.month = month
self.day = day
self.hour = hour
self.minute = minute
self.second = second
self.microsecond = microsecond
if any(x is not None and int(x) != x
for x in (year, month, day, hour,
minute, second, microsecond)):
# For now we'll deprecate floats - later it'll be an error.
warn("Non-integer value passed as absolute information. " +
"This is not a well-defined condition and will raise " +
"errors in future versions.", DeprecationWarning)
if isinstance(weekday, integer_types):
self.weekday = weekdays[weekday]
else:
self.weekday = weekday
yday = 0
if nlyearday:
yday = nlyearday
elif yearday:
yday = yearday
if yearday > 59:
self.leapdays = -1
if yday:
ydayidx = [31, 59, 90, 120, 151, 181, 212,
243, 273, 304, 334, 366]
for idx, ydays in enumerate(ydayidx):
if yday <= ydays:
self.month = idx+1
if idx == 0:
self.day = yday
else:
self.day = yday-ydayidx[idx-1]
break
else:
raise ValueError("invalid year day (%d)" % yday)
self._fix()
def _fix(self):
if abs(self.microseconds) > 999999:
s = _sign(self.microseconds)
div, mod = divmod(self.microseconds * s, 1000000)
self.microseconds = mod * s
self.seconds += div * s
if abs(self.seconds) > 59:
s = _sign(self.seconds)
div, mod = divmod(self.seconds * s, 60)
self.seconds = mod * s
self.minutes += div * s
if abs(self.minutes) > 59:
s = _sign(self.minutes)
div, mod = divmod(self.minutes * s, 60)
self.minutes = mod * s
self.hours += div * s
if abs(self.hours) > 23:
s = _sign(self.hours)
div, mod = divmod(self.hours * s, 24)
self.hours = mod * s
self.days += div * s
if abs(self.months) > 11:
s = _sign(self.months)
div, mod = divmod(self.months * s, 12)
self.months = mod * s
self.years += div * s
if (self.hours or self.minutes or self.seconds or self.microseconds
or self.hour is not None or self.minute is not None or
self.second is not None or self.microsecond is not None):
self._has_time = 1
else:
self._has_time = 0
@property
def weeks(self):
return int(self.days / 7.0)
@weeks.setter
def weeks(self, value):
self.days = self.days - (self.weeks * 7) + value * 7
def _set_months(self, months):
self.months = months
if abs(self.months) > 11:
s = _sign(self.months)
div, mod = divmod(self.months * s, 12)
self.months = mod * s
self.years = div * s
else:
self.years = 0
def normalized(self):
"""
Return a version of this object represented entirely using integer
values for the relative attributes.
>>> relativedelta(days=1.5, hours=2).normalized()
relativedelta(days=+1, hours=+14)
:return:
Returns a :class:`dateutil.relativedelta.relativedelta` object.
"""
# Cascade remainders down (rounding each to roughly nearest microsecond)
days = int(self.days)
hours_f = round(self.hours + 24 * (self.days - days), 11)
hours = int(hours_f)
minutes_f = round(self.minutes + 60 * (hours_f - hours), 10)
minutes = int(minutes_f)
seconds_f = round(self.seconds + 60 * (minutes_f - minutes), 8)
seconds = int(seconds_f)
microseconds = round(self.microseconds + 1e6 * (seconds_f - seconds))
# Constructor carries overflow back up with call to _fix()
return self.__class__(years=self.years, months=self.months,
days=days, hours=hours, minutes=minutes,
seconds=seconds, microseconds=microseconds,
leapdays=self.leapdays, year=self.year,
month=self.month, day=self.day,
weekday=self.weekday, hour=self.hour,
minute=self.minute, second=self.second,
microsecond=self.microsecond)
def __add__(self, other):
if isinstance(other, relativedelta):
return self.__class__(years=other.years + self.years,
months=other.months + self.months,
days=other.days + self.days,
hours=other.hours + self.hours,
minutes=other.minutes + self.minutes,
seconds=other.seconds + self.seconds,
microseconds=(other.microseconds +
self.microseconds),
leapdays=other.leapdays or self.leapdays,
year=(other.year if other.year is not None
else self.year),
month=(other.month if other.month is not None
else self.month),
day=(other.day if other.day is not None
else self.day),
weekday=(other.weekday if other.weekday is not None
else self.weekday),
hour=(other.hour if other.hour is not None
else self.hour),
minute=(other.minute if other.minute is not None
else self.minute),
second=(other.second if other.second is not None
else self.second),
microsecond=(other.microsecond if other.microsecond
is not None else
self.microsecond))
if isinstance(other, datetime.timedelta):
return self.__class__(years=self.years,
months=self.months,
days=self.days + other.days,
hours=self.hours,
minutes=self.minutes,
seconds=self.seconds + other.seconds,
microseconds=self.microseconds + other.microseconds,
leapdays=self.leapdays,
year=self.year,
month=self.month,
day=self.day,
weekday=self.weekday,
hour=self.hour,
minute=self.minute,
second=self.second,
microsecond=self.microsecond)
if not isinstance(other, datetime.date):
return NotImplemented
elif self._has_time and not isinstance(other, datetime.datetime):
other = datetime.datetime.fromordinal(other.toordinal())
year = (self.year or other.year)+self.years
month = self.month or other.month
if self.months:
assert 1 <= abs(self.months) <= 12
month += self.months
if month > 12:
year += 1
month -= 12
elif month < 1:
year -= 1
month += 12
day = min(calendar.monthrange(year, month)[1],
self.day or other.day)
repl = {"year": year, "month": month, "day": day}
for attr in ["hour", "minute", "second", "microsecond"]:
value = getattr(self, attr)
if value is not None:
repl[attr] = value
days = self.days
if self.leapdays and month > 2 and calendar.isleap(year):
days += self.leapdays
ret = (other.replace(**repl)
+ datetime.timedelta(days=days,
hours=self.hours,
minutes=self.minutes,
seconds=self.seconds,
microseconds=self.microseconds))
if self.weekday:
weekday, nth = self.weekday.weekday, self.weekday.n or 1
jumpdays = (abs(nth) - 1) * 7
if nth > 0:
jumpdays += (7 - ret.weekday() + weekday) % 7
else:
jumpdays += (ret.weekday() - weekday) % 7
jumpdays *= -1
ret += datetime.timedelta(days=jumpdays)
return ret
def __radd__(self, other):
return self.__add__(other)
def __rsub__(self, other):
return self.__neg__().__radd__(other)
def __sub__(self, other):
if not isinstance(other, relativedelta):
return NotImplemented # In case the other object defines __rsub__
return self.__class__(years=self.years - other.years,
months=self.months - other.months,
days=self.days - other.days,
hours=self.hours - other.hours,
minutes=self.minutes - other.minutes,
seconds=self.seconds - other.seconds,
microseconds=self.microseconds - other.microseconds,
leapdays=self.leapdays or other.leapdays,
year=(self.year if self.year is not None
else other.year),
month=(self.month if self.month is not None else
other.month),
day=(self.day if self.day is not None else
other.day),
weekday=(self.weekday if self.weekday is not None else
other.weekday),
hour=(self.hour if self.hour is not None else
other.hour),
minute=(self.minute if self.minute is not None else
other.minute),
second=(self.second if self.second is not None else
other.second),
microsecond=(self.microsecond if self.microsecond
is not None else
other.microsecond))
def __abs__(self):
return self.__class__(years=abs(self.years),
months=abs(self.months),
days=abs(self.days),
hours=abs(self.hours),
minutes=abs(self.minutes),
seconds=abs(self.seconds),
microseconds=abs(self.microseconds),
leapdays=self.leapdays,
year=self.year,
month=self.month,
day=self.day,
weekday=self.weekday,
hour=self.hour,
minute=self.minute,
second=self.second,
microsecond=self.microsecond)
def __neg__(self):
return self.__class__(years=-self.years,
months=-self.months,
days=-self.days,
hours=-self.hours,
minutes=-self.minutes,
seconds=-self.seconds,
microseconds=-self.microseconds,
leapdays=self.leapdays,
year=self.year,
month=self.month,
day=self.day,
weekday=self.weekday,
hour=self.hour,
minute=self.minute,
second=self.second,
microsecond=self.microsecond)
def __bool__(self):
return not (not self.years and
not self.months and
not self.days and
not self.hours and
not self.minutes and
not self.seconds and
not self.microseconds and
not self.leapdays and
self.year is None and
self.month is None and
self.day is None and
self.weekday is None and
self.hour is None and
self.minute is None and
self.second is None and
self.microsecond is None)
# Compatibility with Python 2.x
__nonzero__ = __bool__
def __mul__(self, other):
try:
f = float(other)
except TypeError:
return NotImplemented
return self.__class__(years=int(self.years * f),
months=int(self.months * f),
days=int(self.days * f),
hours=int(self.hours * f),
minutes=int(self.minutes * f),
seconds=int(self.seconds * f),
microseconds=int(self.microseconds * f),
leapdays=self.leapdays,
year=self.year,
month=self.month,
day=self.day,
weekday=self.weekday,
hour=self.hour,
minute=self.minute,
second=self.second,
microsecond=self.microsecond)
__rmul__ = __mul__
def __eq__(self, other):
if not isinstance(other, relativedelta):
return NotImplemented
if self.weekday or other.weekday:
if not self.weekday or not other.weekday:
return False
if self.weekday.weekday != other.weekday.weekday:
return False
n1, n2 = self.weekday.n, other.weekday.n
if n1 != n2 and not ((not n1 or n1 == 1) and (not n2 or n2 == 1)):
return False
return (self.years == other.years and
self.months == other.months and
self.days == other.days and
self.hours == other.hours and
self.minutes == other.minutes and
self.seconds == other.seconds and
self.microseconds == other.microseconds and
self.leapdays == other.leapdays and
self.year == other.year and
self.month == other.month and
self.day == other.day and
self.hour == other.hour and
self.minute == other.minute and
self.second == other.second and
self.microsecond == other.microsecond)
def __hash__(self):
return hash((
self.weekday,
self.years,
self.months,
self.days,
self.hours,
self.minutes,
self.seconds,
self.microseconds,
self.leapdays,
self.year,
self.month,
self.day,
self.hour,
self.minute,
self.second,
self.microsecond,
))
def __ne__(self, other):
return not self.__eq__(other)
def __div__(self, other):
try:
reciprocal = 1 / float(other)
except TypeError:
return NotImplemented
return self.__mul__(reciprocal)
__truediv__ = __div__
def __repr__(self):
l = []
for attr in ["years", "months", "days", "leapdays",
"hours", "minutes", "seconds", "microseconds"]:
value = getattr(self, attr)
if value:
l.append("{attr}={value:+g}".format(attr=attr, value=value))
for attr in ["year", "month", "day", "weekday",
"hour", "minute", "second", "microsecond"]:
value = getattr(self, attr)
if value is not None:
l.append("{attr}={value}".format(attr=attr, value=repr(value)))
return "{classname}({attrs})".format(classname=self.__class__.__name__,
attrs=", ".join(l))
def _sign(x):
return int(copysign(1, x))
# vim:ts=4:sw=4:et | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/vendored/robocorp_ls_core/libs/robocop_lib/dateutil/relativedelta.py | 0.738292 | 0.528655 | relativedelta.py | pypi |
from datetime import datetime, timedelta, time, date
import calendar
from dateutil import tz
from functools import wraps
import re
import six
__all__ = ["isoparse", "isoparser"]
def _takes_ascii(f):
@wraps(f)
def func(self, str_in, *args, **kwargs):
# If it's a stream, read the whole thing
str_in = getattr(str_in, 'read', lambda: str_in)()
# If it's unicode, turn it into bytes, since ISO-8601 only covers ASCII
if isinstance(str_in, six.text_type):
# ASCII is the same in UTF-8
try:
str_in = str_in.encode('ascii')
except UnicodeEncodeError as e:
msg = 'ISO-8601 strings should contain only ASCII characters'
six.raise_from(ValueError(msg), e)
return f(self, str_in, *args, **kwargs)
return func
class isoparser(object):
def __init__(self, sep=None):
"""
:param sep:
A single character that separates date and time portions. If
``None``, the parser will accept any single character.
For strict ISO-8601 adherence, pass ``'T'``.
"""
if sep is not None:
if (len(sep) != 1 or ord(sep) >= 128 or sep in '0123456789'):
raise ValueError('Separator must be a single, non-numeric ' +
'ASCII character')
sep = sep.encode('ascii')
self._sep = sep
@_takes_ascii
def isoparse(self, dt_str):
"""
Parse an ISO-8601 datetime string into a :class:`datetime.datetime`.
An ISO-8601 datetime string consists of a date portion, followed
optionally by a time portion - the date and time portions are separated
by a single character separator, which is ``T`` in the official
standard. Incomplete date formats (such as ``YYYY-MM``) may *not* be
combined with a time portion.
Supported date formats are:
Common:
- ``YYYY``
- ``YYYY-MM`` or ``YYYYMM``
- ``YYYY-MM-DD`` or ``YYYYMMDD``
Uncommon:
- ``YYYY-Www`` or ``YYYYWww`` - ISO week (day defaults to 0)
- ``YYYY-Www-D`` or ``YYYYWwwD`` - ISO week and day
The ISO week and day numbering follows the same logic as
:func:`datetime.date.isocalendar`.
Supported time formats are:
- ``hh``
- ``hh:mm`` or ``hhmm``
- ``hh:mm:ss`` or ``hhmmss``
- ``hh:mm:ss.ssssss`` (Up to 6 sub-second digits)
Midnight is a special case for `hh`, as the standard supports both
00:00 and 24:00 as a representation. The decimal separator can be
either a dot or a comma.
.. caution::
Support for fractional components other than seconds is part of the
ISO-8601 standard, but is not currently implemented in this parser.
Supported time zone offset formats are:
- `Z` (UTC)
- `±HH:MM`
- `±HHMM`
- `±HH`
Offsets will be represented as :class:`dateutil.tz.tzoffset` objects,
with the exception of UTC, which will be represented as
:class:`dateutil.tz.tzutc`. Time zone offsets equivalent to UTC (such
as `+00:00`) will also be represented as :class:`dateutil.tz.tzutc`.
:param dt_str:
A string or stream containing only an ISO-8601 datetime string
:return:
Returns a :class:`datetime.datetime` representing the string.
Unspecified components default to their lowest value.
.. warning::
As of version 2.7.0, the strictness of the parser should not be
considered a stable part of the contract. Any valid ISO-8601 string
that parses correctly with the default settings will continue to
parse correctly in future versions, but invalid strings that
currently fail (e.g. ``2017-01-01T00:00+00:00:00``) are not
guaranteed to continue failing in future versions if they encode
a valid date.
.. versionadded:: 2.7.0
"""
components, pos = self._parse_isodate(dt_str)
if len(dt_str) > pos:
if self._sep is None or dt_str[pos:pos + 1] == self._sep:
components += self._parse_isotime(dt_str[pos + 1:])
else:
raise ValueError('String contains unknown ISO components')
if len(components) > 3 and components[3] == 24:
components[3] = 0
return datetime(*components) + timedelta(days=1)
return datetime(*components)
@_takes_ascii
def parse_isodate(self, datestr):
"""
Parse the date portion of an ISO string.
:param datestr:
The string portion of an ISO string, without a separator
:return:
Returns a :class:`datetime.date` object
"""
components, pos = self._parse_isodate(datestr)
if pos < len(datestr):
raise ValueError('String contains unknown ISO ' +
'components: {!r}'.format(datestr.decode('ascii')))
return date(*components)
@_takes_ascii
def parse_isotime(self, timestr):
"""
Parse the time portion of an ISO string.
:param timestr:
The time portion of an ISO string, without a separator
:return:
Returns a :class:`datetime.time` object
"""
components = self._parse_isotime(timestr)
if components[0] == 24:
components[0] = 0
return time(*components)
@_takes_ascii
def parse_tzstr(self, tzstr, zero_as_utc=True):
"""
Parse a valid ISO time zone string.
See :func:`isoparser.isoparse` for details on supported formats.
:param tzstr:
A string representing an ISO time zone offset
:param zero_as_utc:
Whether to return :class:`dateutil.tz.tzutc` for zero-offset zones
:return:
Returns :class:`dateutil.tz.tzoffset` for offsets and
:class:`dateutil.tz.tzutc` for ``Z`` and (if ``zero_as_utc`` is
specified) offsets equivalent to UTC.
"""
return self._parse_tzstr(tzstr, zero_as_utc=zero_as_utc)
# Constants
_DATE_SEP = b'-'
_TIME_SEP = b':'
_FRACTION_REGEX = re.compile(b'[\\.,]([0-9]+)')
def _parse_isodate(self, dt_str):
try:
return self._parse_isodate_common(dt_str)
except ValueError:
return self._parse_isodate_uncommon(dt_str)
def _parse_isodate_common(self, dt_str):
len_str = len(dt_str)
components = [1, 1, 1]
if len_str < 4:
raise ValueError('ISO string too short')
# Year
components[0] = int(dt_str[0:4])
pos = 4
if pos >= len_str:
return components, pos
has_sep = dt_str[pos:pos + 1] == self._DATE_SEP
if has_sep:
pos += 1
# Month
if len_str - pos < 2:
raise ValueError('Invalid common month')
components[1] = int(dt_str[pos:pos + 2])
pos += 2
if pos >= len_str:
if has_sep:
return components, pos
else:
raise ValueError('Invalid ISO format')
if has_sep:
if dt_str[pos:pos + 1] != self._DATE_SEP:
raise ValueError('Invalid separator in ISO string')
pos += 1
# Day
if len_str - pos < 2:
raise ValueError('Invalid common day')
components[2] = int(dt_str[pos:pos + 2])
return components, pos + 2
def _parse_isodate_uncommon(self, dt_str):
if len(dt_str) < 4:
raise ValueError('ISO string too short')
# All ISO formats start with the year
year = int(dt_str[0:4])
has_sep = dt_str[4:5] == self._DATE_SEP
pos = 4 + has_sep # Skip '-' if it's there
if dt_str[pos:pos + 1] == b'W':
# YYYY-?Www-?D?
pos += 1
weekno = int(dt_str[pos:pos + 2])
pos += 2
dayno = 1
if len(dt_str) > pos:
if (dt_str[pos:pos + 1] == self._DATE_SEP) != has_sep:
raise ValueError('Inconsistent use of dash separator')
pos += has_sep
dayno = int(dt_str[pos:pos + 1])
pos += 1
base_date = self._calculate_weekdate(year, weekno, dayno)
else:
# YYYYDDD or YYYY-DDD
if len(dt_str) - pos < 3:
raise ValueError('Invalid ordinal day')
ordinal_day = int(dt_str[pos:pos + 3])
pos += 3
if ordinal_day < 1 or ordinal_day > (365 + calendar.isleap(year)):
raise ValueError('Invalid ordinal day' +
' {} for year {}'.format(ordinal_day, year))
base_date = date(year, 1, 1) + timedelta(days=ordinal_day - 1)
components = [base_date.year, base_date.month, base_date.day]
return components, pos
def _calculate_weekdate(self, year, week, day):
"""
Calculate the day of corresponding to the ISO year-week-day calendar.
This function is effectively the inverse of
:func:`datetime.date.isocalendar`.
:param year:
The year in the ISO calendar
:param week:
The week in the ISO calendar - range is [1, 53]
:param day:
The day in the ISO calendar - range is [1 (MON), 7 (SUN)]
:return:
Returns a :class:`datetime.date`
"""
if not 0 < week < 54:
raise ValueError('Invalid week: {}'.format(week))
if not 0 < day < 8: # Range is 1-7
raise ValueError('Invalid weekday: {}'.format(day))
# Get week 1 for the specific year:
jan_4 = date(year, 1, 4) # Week 1 always has January 4th in it
week_1 = jan_4 - timedelta(days=jan_4.isocalendar()[2] - 1)
# Now add the specific number of weeks and days to get what we want
week_offset = (week - 1) * 7 + (day - 1)
return week_1 + timedelta(days=week_offset)
def _parse_isotime(self, timestr):
len_str = len(timestr)
components = [0, 0, 0, 0, None]
pos = 0
comp = -1
if len_str < 2:
raise ValueError('ISO time too short')
has_sep = False
while pos < len_str and comp < 5:
comp += 1
if timestr[pos:pos + 1] in b'-+Zz':
# Detect time zone boundary
components[-1] = self._parse_tzstr(timestr[pos:])
pos = len_str
break
if comp == 1 and timestr[pos:pos+1] == self._TIME_SEP:
has_sep = True
pos += 1
elif comp == 2 and has_sep:
if timestr[pos:pos+1] != self._TIME_SEP:
raise ValueError('Inconsistent use of colon separator')
pos += 1
if comp < 3:
# Hour, minute, second
components[comp] = int(timestr[pos:pos + 2])
pos += 2
if comp == 3:
# Fraction of a second
frac = self._FRACTION_REGEX.match(timestr[pos:])
if not frac:
continue
us_str = frac.group(1)[:6] # Truncate to microseconds
components[comp] = int(us_str) * 10**(6 - len(us_str))
pos += len(frac.group())
if pos < len_str:
raise ValueError('Unused components in ISO string')
if components[0] == 24:
# Standard supports 00:00 and 24:00 as representations of midnight
if any(component != 0 for component in components[1:4]):
raise ValueError('Hour may only be 24 at 24:00:00.000')
return components
def _parse_tzstr(self, tzstr, zero_as_utc=True):
if tzstr == b'Z' or tzstr == b'z':
return tz.UTC
if len(tzstr) not in {3, 5, 6}:
raise ValueError('Time zone offset must be 1, 3, 5 or 6 characters')
if tzstr[0:1] == b'-':
mult = -1
elif tzstr[0:1] == b'+':
mult = 1
else:
raise ValueError('Time zone offset requires sign')
hours = int(tzstr[1:3])
if len(tzstr) == 3:
minutes = 0
else:
minutes = int(tzstr[(4 if tzstr[3:4] == self._TIME_SEP else 3):])
if zero_as_utc and hours == 0 and minutes == 0:
return tz.UTC
else:
if minutes > 59:
raise ValueError('Invalid minutes in time zone offset')
if hours > 23:
raise ValueError('Invalid hours in time zone offset')
return tz.tzoffset(None, mult * (hours * 60 + minutes) * 60)
DEFAULT_ISOPARSER = isoparser()
isoparse = DEFAULT_ISOPARSER.isoparse | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/vendored/robocorp_ls_core/libs/robocop_lib/dateutil/parser/isoparser.py | 0.819677 | 0.331661 | isoparser.py | pypi |
from __future__ import annotations
from datetime import date, datetime, time, timedelta, timezone, tzinfo
from functools import lru_cache
import re
from typing import Any
from ._types import ParseFloat
# E.g.
# - 00:32:00.999999
# - 00:32:00
_TIME_RE_STR = r"([01][0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9])(?:\.([0-9]{1,6})[0-9]*)?"
RE_NUMBER = re.compile(
r"""
0
(?:
x[0-9A-Fa-f](?:_?[0-9A-Fa-f])* # hex
|
b[01](?:_?[01])* # bin
|
o[0-7](?:_?[0-7])* # oct
)
|
[+-]?(?:0|[1-9](?:_?[0-9])*) # dec, integer part
(?P<floatpart>
(?:\.[0-9](?:_?[0-9])*)? # optional fractional part
(?:[eE][+-]?[0-9](?:_?[0-9])*)? # optional exponent part
)
""",
flags=re.VERBOSE,
)
RE_LOCALTIME = re.compile(_TIME_RE_STR)
RE_DATETIME = re.compile(
rf"""
([0-9]{{4}})-(0[1-9]|1[0-2])-(0[1-9]|[12][0-9]|3[01]) # date, e.g. 1988-10-27
(?:
[Tt ]
{_TIME_RE_STR}
(?:([Zz])|([+-])([01][0-9]|2[0-3]):([0-5][0-9]))? # optional time offset
)?
""",
flags=re.VERBOSE,
)
def match_to_datetime(match: re.Match) -> datetime | date:
"""Convert a `RE_DATETIME` match to `datetime.datetime` or `datetime.date`.
Raises ValueError if the match does not correspond to a valid date
or datetime.
"""
(
year_str,
month_str,
day_str,
hour_str,
minute_str,
sec_str,
micros_str,
zulu_time,
offset_sign_str,
offset_hour_str,
offset_minute_str,
) = match.groups()
year, month, day = int(year_str), int(month_str), int(day_str)
if hour_str is None:
return date(year, month, day)
hour, minute, sec = int(hour_str), int(minute_str), int(sec_str)
micros = int(micros_str.ljust(6, "0")) if micros_str else 0
if offset_sign_str:
tz: tzinfo | None = cached_tz(
offset_hour_str, offset_minute_str, offset_sign_str
)
elif zulu_time:
tz = timezone.utc
else: # local date-time
tz = None
return datetime(year, month, day, hour, minute, sec, micros, tzinfo=tz)
@lru_cache(maxsize=None)
def cached_tz(hour_str: str, minute_str: str, sign_str: str) -> timezone:
sign = 1 if sign_str == "+" else -1
return timezone(
timedelta(
hours=sign * int(hour_str),
minutes=sign * int(minute_str),
)
)
def match_to_localtime(match: re.Match) -> time:
hour_str, minute_str, sec_str, micros_str = match.groups()
micros = int(micros_str.ljust(6, "0")) if micros_str else 0
return time(int(hour_str), int(minute_str), int(sec_str), micros)
def match_to_number(match: re.Match, parse_float: ParseFloat) -> Any:
if match.group("floatpart"):
return parse_float(match.group())
return int(match.group(), 0) | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/vendored/robocorp_ls_core/libs/robocop_lib/tomli/_re.py | 0.892463 | 0.247589 | _re.py | pypi |
__all__ = ['BaseResolver', 'Resolver']
from .error import *
from .nodes import *
import re
class ResolverError(YAMLError):
pass
class BaseResolver:
DEFAULT_SCALAR_TAG = 'tag:yaml.org,2002:str'
DEFAULT_SEQUENCE_TAG = 'tag:yaml.org,2002:seq'
DEFAULT_MAPPING_TAG = 'tag:yaml.org,2002:map'
yaml_implicit_resolvers = {}
yaml_path_resolvers = {}
def __init__(self):
self.resolver_exact_paths = []
self.resolver_prefix_paths = []
@classmethod
def add_implicit_resolver(cls, tag, regexp, first):
if not 'yaml_implicit_resolvers' in cls.__dict__:
implicit_resolvers = {}
for key in cls.yaml_implicit_resolvers:
implicit_resolvers[key] = cls.yaml_implicit_resolvers[key][:]
cls.yaml_implicit_resolvers = implicit_resolvers
if first is None:
first = [None]
for ch in first:
cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp))
@classmethod
def add_path_resolver(cls, tag, path, kind=None):
# Note: `add_path_resolver` is experimental. The API could be changed.
# `new_path` is a pattern that is matched against the path from the
# root to the node that is being considered. `node_path` elements are
# tuples `(node_check, index_check)`. `node_check` is a node class:
# `ScalarNode`, `SequenceNode`, `MappingNode` or `None`. `None`
# matches any kind of a node. `index_check` could be `None`, a boolean
# value, a string value, or a number. `None` and `False` match against
# any _value_ of sequence and mapping nodes. `True` matches against
# any _key_ of a mapping node. A string `index_check` matches against
# a mapping value that corresponds to a scalar key which content is
# equal to the `index_check` value. An integer `index_check` matches
# against a sequence value with the index equal to `index_check`.
if not 'yaml_path_resolvers' in cls.__dict__:
cls.yaml_path_resolvers = cls.yaml_path_resolvers.copy()
new_path = []
for element in path:
if isinstance(element, (list, tuple)):
if len(element) == 2:
node_check, index_check = element
elif len(element) == 1:
node_check = element[0]
index_check = True
else:
raise ResolverError("Invalid path element: %s" % element)
else:
node_check = None
index_check = element
if node_check is str:
node_check = ScalarNode
elif node_check is list:
node_check = SequenceNode
elif node_check is dict:
node_check = MappingNode
elif node_check not in [ScalarNode, SequenceNode, MappingNode] \
and not isinstance(node_check, str) \
and node_check is not None:
raise ResolverError("Invalid node checker: %s" % node_check)
if not isinstance(index_check, (str, int)) \
and index_check is not None:
raise ResolverError("Invalid index checker: %s" % index_check)
new_path.append((node_check, index_check))
if kind is str:
kind = ScalarNode
elif kind is list:
kind = SequenceNode
elif kind is dict:
kind = MappingNode
elif kind not in [ScalarNode, SequenceNode, MappingNode] \
and kind is not None:
raise ResolverError("Invalid node kind: %s" % kind)
cls.yaml_path_resolvers[tuple(new_path), kind] = tag
def descend_resolver(self, current_node, current_index):
if not self.yaml_path_resolvers:
return
exact_paths = {}
prefix_paths = []
if current_node:
depth = len(self.resolver_prefix_paths)
for path, kind in self.resolver_prefix_paths[-1]:
if self.check_resolver_prefix(depth, path, kind,
current_node, current_index):
if len(path) > depth:
prefix_paths.append((path, kind))
else:
exact_paths[kind] = self.yaml_path_resolvers[path, kind]
else:
for path, kind in self.yaml_path_resolvers:
if not path:
exact_paths[kind] = self.yaml_path_resolvers[path, kind]
else:
prefix_paths.append((path, kind))
self.resolver_exact_paths.append(exact_paths)
self.resolver_prefix_paths.append(prefix_paths)
def ascend_resolver(self):
if not self.yaml_path_resolvers:
return
self.resolver_exact_paths.pop()
self.resolver_prefix_paths.pop()
def check_resolver_prefix(self, depth, path, kind,
current_node, current_index):
node_check, index_check = path[depth-1]
if isinstance(node_check, str):
if current_node.tag != node_check:
return
elif node_check is not None:
if not isinstance(current_node, node_check):
return
if index_check is True and current_index is not None:
return
if (index_check is False or index_check is None) \
and current_index is None:
return
if isinstance(index_check, str):
if not (isinstance(current_index, ScalarNode)
and index_check == current_index.value):
return
elif isinstance(index_check, int) and not isinstance(index_check, bool):
if index_check != current_index:
return
return True
def resolve(self, kind, value, implicit):
if kind is ScalarNode and implicit[0]:
if value == '':
resolvers = self.yaml_implicit_resolvers.get('', [])
else:
resolvers = self.yaml_implicit_resolvers.get(value[0], [])
wildcard_resolvers = self.yaml_implicit_resolvers.get(None, [])
for tag, regexp in resolvers + wildcard_resolvers:
if regexp.match(value):
return tag
implicit = implicit[1]
if self.yaml_path_resolvers:
exact_paths = self.resolver_exact_paths[-1]
if kind in exact_paths:
return exact_paths[kind]
if None in exact_paths:
return exact_paths[None]
if kind is ScalarNode:
return self.DEFAULT_SCALAR_TAG
elif kind is SequenceNode:
return self.DEFAULT_SEQUENCE_TAG
elif kind is MappingNode:
return self.DEFAULT_MAPPING_TAG
class Resolver(BaseResolver):
pass
Resolver.add_implicit_resolver(
'tag:yaml.org,2002:bool',
re.compile(r'''^(?:yes|Yes|YES|no|No|NO
|true|True|TRUE|false|False|FALSE
|on|On|ON|off|Off|OFF)$''', re.X),
list('yYnNtTfFoO'))
Resolver.add_implicit_resolver(
'tag:yaml.org,2002:float',
re.compile(r'''^(?:[-+]?(?:[0-9][0-9_]*)\.[0-9_]*(?:[eE][-+][0-9]+)?
|\.[0-9][0-9_]*(?:[eE][-+][0-9]+)?
|[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\.[0-9_]*
|[-+]?\.(?:inf|Inf|INF)
|\.(?:nan|NaN|NAN))$''', re.X),
list('-+0123456789.'))
Resolver.add_implicit_resolver(
'tag:yaml.org,2002:int',
re.compile(r'''^(?:[-+]?0b[0-1_]+
|[-+]?0[0-7_]+
|[-+]?(?:0|[1-9][0-9_]*)
|[-+]?0x[0-9a-fA-F_]+
|[-+]?[1-9][0-9_]*(?::[0-5]?[0-9])+)$''', re.X),
list('-+0123456789'))
Resolver.add_implicit_resolver(
'tag:yaml.org,2002:merge',
re.compile(r'^(?:<<)$'),
['<'])
Resolver.add_implicit_resolver(
'tag:yaml.org,2002:null',
re.compile(r'''^(?: ~
|null|Null|NULL
| )$''', re.X),
['~', 'n', 'N', ''])
Resolver.add_implicit_resolver(
'tag:yaml.org,2002:timestamp',
re.compile(r'''^(?:[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]
|[0-9][0-9][0-9][0-9] -[0-9][0-9]? -[0-9][0-9]?
(?:[Tt]|[ \t]+)[0-9][0-9]?
:[0-9][0-9] :[0-9][0-9] (?:\.[0-9]*)?
(?:[ \t]*(?:Z|[-+][0-9][0-9]?(?::[0-9][0-9])?))?)$''', re.X),
list('0123456789'))
Resolver.add_implicit_resolver(
'tag:yaml.org,2002:value',
re.compile(r'^(?:=)$'),
['='])
# The following resolver is only for documentation purposes. It cannot work
# because plain scalars cannot start with '!', '&', or '*'.
Resolver.add_implicit_resolver(
'tag:yaml.org,2002:yaml',
re.compile(r'^(?:!|&|\*)$'),
list('!&*')) | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/vendored/robocorp_ls_core/libs/yaml_lib/yaml/resolver.py | 0.614278 | 0.296419 | resolver.py | pypi |
import errno
import logging
import os
import os.path
import sys
import time
from argparse import ArgumentParser, RawDescriptionHelpFormatter
from io import StringIO
from textwrap import dedent
from watchdog.utils import WatchdogShutdown, load_class
from watchdog.version import VERSION_STRING
logging.basicConfig(level=logging.INFO)
CONFIG_KEY_TRICKS = 'tricks'
CONFIG_KEY_PYTHON_PATH = 'python-path'
class HelpFormatter(RawDescriptionHelpFormatter):
"""A nicer help formatter.
Help for arguments can be indented and contain new lines.
It will be de-dented and arguments in the help
will be separated by a blank line for better readability.
Source: https://github.com/httpie/httpie/blob/2423f89/httpie/cli/argparser.py#L31
"""
def __init__(self, *args, max_help_position=6, **kwargs):
# A smaller indent for args help.
kwargs['max_help_position'] = max_help_position
super().__init__(*args, **kwargs)
def _split_lines(self, text, width):
text = dedent(text).strip() + '\n\n'
return text.splitlines()
epilog = '''\
Copyright 2011 Yesudeep Mangalapilly <yesudeep@gmail.com>.
Copyright 2012 Google, Inc & contributors.
Licensed under the terms of the Apache license, version 2.0. Please see
LICENSE in the source code for more information.'''
cli = ArgumentParser(epilog=epilog, formatter_class=HelpFormatter)
cli.add_argument('--version', action='version', version=VERSION_STRING)
subparsers = cli.add_subparsers(dest='top_command')
command_parsers = {}
def argument(*name_or_flags, **kwargs):
"""Convenience function to properly format arguments to pass to the
command decorator.
"""
return list(name_or_flags), kwargs
def command(args=[], parent=subparsers, cmd_aliases=[]):
"""Decorator to define a new command in a sanity-preserving way.
The function will be stored in the ``func`` variable when the parser
parses arguments so that it can be called directly like so::
>>> args = cli.parse_args()
>>> args.func(args)
"""
def decorator(func):
name = func.__name__.replace('_', '-')
desc = dedent(func.__doc__)
parser = parent.add_parser(name,
description=desc,
aliases=cmd_aliases,
formatter_class=HelpFormatter)
command_parsers[name] = parser
verbosity_group = parser.add_mutually_exclusive_group()
verbosity_group.add_argument('-q', '--quiet', dest='verbosity',
action='append_const', const=-1)
verbosity_group.add_argument('-v', '--verbose', dest='verbosity',
action='append_const', const=1)
for arg in args:
parser.add_argument(*arg[0], **arg[1])
parser.set_defaults(func=func)
return func
return decorator
def path_split(pathname_spec, separator=os.pathsep):
"""
Splits a pathname specification separated by an OS-dependent separator.
:param pathname_spec:
The pathname specification.
:param separator:
(OS Dependent) `:` on Unix and `;` on Windows or user-specified.
"""
return list(pathname_spec.split(separator))
def add_to_sys_path(pathnames, index=0):
"""
Adds specified paths at specified index into the sys.path list.
:param paths:
A list of paths to add to the sys.path
:param index:
(Default 0) The index in the sys.path list where the paths will be
added.
"""
for pathname in pathnames[::-1]:
sys.path.insert(index, pathname)
def load_config(tricks_file_pathname):
"""
Loads the YAML configuration from the specified file.
:param tricks_file_path:
The path to the tricks configuration file.
:returns:
A dictionary of configuration information.
"""
import yaml
with open(tricks_file_pathname, 'rb') as f:
return yaml.safe_load(f.read())
def parse_patterns(patterns_spec, ignore_patterns_spec, separator=';'):
"""
Parses pattern argument specs and returns a two-tuple of
(patterns, ignore_patterns).
"""
patterns = patterns_spec.split(separator)
ignore_patterns = ignore_patterns_spec.split(separator)
if ignore_patterns == ['']:
ignore_patterns = []
return (patterns, ignore_patterns)
def observe_with(observer, event_handler, pathnames, recursive):
"""
Single observer thread with a scheduled path and event handler.
:param observer:
The observer thread.
:param event_handler:
Event handler which will be called in response to file system events.
:param pathnames:
A list of pathnames to monitor.
:param recursive:
``True`` if recursive; ``False`` otherwise.
"""
for pathname in set(pathnames):
observer.schedule(event_handler, pathname, recursive)
observer.start()
try:
while True:
time.sleep(1)
except WatchdogShutdown:
observer.stop()
observer.join()
def schedule_tricks(observer, tricks, pathname, recursive):
"""
Schedules tricks with the specified observer and for the given watch
path.
:param observer:
The observer thread into which to schedule the trick and watch.
:param tricks:
A list of tricks.
:param pathname:
A path name which should be watched.
:param recursive:
``True`` if recursive; ``False`` otherwise.
"""
for trick in tricks:
for name, value in list(trick.items()):
TrickClass = load_class(name)
handler = TrickClass(**value)
trick_pathname = getattr(handler, 'source_directory', None) or pathname
observer.schedule(handler, trick_pathname, recursive)
@command([argument('files',
nargs='*',
help='perform tricks from given file'),
argument('--python-path',
default='.',
help=f'Paths separated by {os.pathsep!r} to add to the Python path.'),
argument('--interval',
'--timeout',
dest='timeout',
default=1.0,
type=float,
help='Use this as the polling interval/blocking timeout (in seconds).'),
argument('--recursive',
action='store_true',
default=True,
help='Recursively monitor paths (defaults to True).'),
argument('--debug-force-polling',
action='store_true',
help='[debug] Forces polling.'),
argument('--debug-force-kqueue',
action='store_true',
help='[debug] Forces BSD kqueue(2).'),
argument('--debug-force-winapi',
action='store_true',
help='[debug] Forces Windows API.'),
argument('--debug-force-fsevents',
action='store_true',
help='[debug] Forces macOS FSEvents.'),
argument('--debug-force-inotify',
action='store_true',
help='[debug] Forces Linux inotify(7).')],
cmd_aliases=['tricks'])
def tricks_from(args):
"""
Command to execute tricks from a tricks configuration file.
"""
if args.debug_force_polling:
from watchdog.observers.polling import PollingObserver as Observer
elif args.debug_force_kqueue:
from watchdog.observers.kqueue import KqueueObserver as Observer
elif args.debug_force_winapi:
from watchdog.observers.read_directory_changes import\
WindowsApiObserver as Observer
elif args.debug_force_inotify:
from watchdog.observers.inotify import InotifyObserver as Observer
elif args.debug_force_fsevents:
from watchdog.observers.fsevents import FSEventsObserver as Observer
else:
# Automatically picks the most appropriate observer for the platform
# on which it is running.
from watchdog.observers import Observer
add_to_sys_path(path_split(args.python_path))
observers = []
for tricks_file in args.files:
observer = Observer(timeout=args.timeout)
if not os.path.exists(tricks_file):
raise OSError(errno.ENOENT, os.strerror(errno.ENOENT), tricks_file)
config = load_config(tricks_file)
try:
tricks = config[CONFIG_KEY_TRICKS]
except KeyError:
raise KeyError("No %r key specified in %s." % (
CONFIG_KEY_TRICKS, tricks_file))
if CONFIG_KEY_PYTHON_PATH in config:
add_to_sys_path(config[CONFIG_KEY_PYTHON_PATH])
dir_path = os.path.dirname(tricks_file)
if not dir_path:
dir_path = os.path.relpath(os.getcwd())
schedule_tricks(observer, tricks, dir_path, args.recursive)
observer.start()
observers.append(observer)
try:
while True:
time.sleep(1)
except WatchdogShutdown:
for o in observers:
o.unschedule_all()
o.stop()
for o in observers:
o.join()
@command([argument('trick_paths',
nargs='*',
help='Dotted paths for all the tricks you want to generate.'),
argument('--python-path',
default='.',
help=f'Paths separated by {os.pathsep!r} to add to the Python path.'),
argument('--append-to-file',
default=None,
help='''
Appends the generated tricks YAML to a file.
If not specified, prints to standard output.'''),
argument('-a',
'--append-only',
dest='append_only',
action='store_true',
help='''
If --append-to-file is not specified, produces output for
appending instead of a complete tricks YAML file.''')],
cmd_aliases=['generate-tricks-yaml'])
def tricks_generate_yaml(args):
"""
Command to generate Yaml configuration for tricks named on the command line.
"""
import yaml
python_paths = path_split(args.python_path)
add_to_sys_path(python_paths)
output = StringIO()
for trick_path in args.trick_paths:
TrickClass = load_class(trick_path)
output.write(TrickClass.generate_yaml())
content = output.getvalue()
output.close()
header = yaml.dump({CONFIG_KEY_PYTHON_PATH: python_paths})
header += "%s:\n" % CONFIG_KEY_TRICKS
if args.append_to_file is None:
# Output to standard output.
if not args.append_only:
content = header + content
sys.stdout.write(content)
else:
if not os.path.exists(args.append_to_file):
content = header + content
with open(args.append_to_file, 'ab') as output:
output.write(content)
@command([argument('directories',
nargs='*',
default='.',
help='Directories to watch. (default: \'.\').'),
argument('-p',
'--pattern',
'--patterns',
dest='patterns',
default='*',
help='Matches event paths with these patterns (separated by ;).'),
argument('-i',
'--ignore-pattern',
'--ignore-patterns',
dest='ignore_patterns',
default='',
help='Ignores event paths with these patterns (separated by ;).'),
argument('-D',
'--ignore-directories',
dest='ignore_directories',
action='store_true',
help='Ignores events for directories.'),
argument('-R',
'--recursive',
dest='recursive',
action='store_true',
help='Monitors the directories recursively.'),
argument('--interval',
'--timeout',
dest='timeout',
default=1.0,
type=float,
help='Use this as the polling interval/blocking timeout.'),
argument('--trace',
action='store_true',
help='Dumps complete dispatching trace.'),
argument('--debug-force-polling',
action='store_true',
help='[debug] Forces polling.'),
argument('--debug-force-kqueue',
action='store_true',
help='[debug] Forces BSD kqueue(2).'),
argument('--debug-force-winapi',
action='store_true',
help='[debug] Forces Windows API.'),
argument('--debug-force-fsevents',
action='store_true',
help='[debug] Forces macOS FSEvents.'),
argument('--debug-force-inotify',
action='store_true',
help='[debug] Forces Linux inotify(7).')])
def log(args):
"""
Command to log file system events to the console.
"""
from watchdog.utils import echo
from watchdog.tricks import LoggerTrick
if args.trace:
class_module_logger = logging.getLogger(LoggerTrick.__module__)
echo.echo_class(LoggerTrick, write=lambda msg: class_module_logger.info(msg))
patterns, ignore_patterns =\
parse_patterns(args.patterns, args.ignore_patterns)
handler = LoggerTrick(patterns=patterns,
ignore_patterns=ignore_patterns,
ignore_directories=args.ignore_directories)
if args.debug_force_polling:
from watchdog.observers.polling import PollingObserver as Observer
elif args.debug_force_kqueue:
from watchdog.observers.kqueue import KqueueObserver as Observer
elif args.debug_force_winapi:
from watchdog.observers.read_directory_changes import\
WindowsApiObserver as Observer
elif args.debug_force_inotify:
from watchdog.observers.inotify import InotifyObserver as Observer
elif args.debug_force_fsevents:
from watchdog.observers.fsevents import FSEventsObserver as Observer
else:
# Automatically picks the most appropriate observer for the platform
# on which it is running.
from watchdog.observers import Observer
observer = Observer(timeout=args.timeout)
observe_with(observer, handler, args.directories, args.recursive)
@command([argument('directories',
nargs='*',
default='.',
help='Directories to watch.'),
argument('-c',
'--command',
dest='command',
default=None,
help='''
Shell command executed in response to matching events.
These interpolation variables are available to your command string:
${watch_src_path} - event source path
${watch_dest_path} - event destination path (for moved events)
${watch_event_type} - event type
${watch_object} - 'file' or 'directory'
Note:
Please ensure you do not use double quotes (") to quote
your command string. That will force your shell to
interpolate before the command is processed by this
command.
Example:
--command='echo "${watch_src_path}"'
'''),
argument('-p',
'--pattern',
'--patterns',
dest='patterns',
default='*',
help='Matches event paths with these patterns (separated by ;).'),
argument('-i',
'--ignore-pattern',
'--ignore-patterns',
dest='ignore_patterns',
default='',
help='Ignores event paths with these patterns (separated by ;).'),
argument('-D',
'--ignore-directories',
dest='ignore_directories',
default=False,
action='store_true',
help='Ignores events for directories.'),
argument('-R',
'--recursive',
dest='recursive',
action='store_true',
help='Monitors the directories recursively.'),
argument('--interval',
'--timeout',
dest='timeout',
default=1.0,
type=float,
help='Use this as the polling interval/blocking timeout.'),
argument('-w', '--wait',
dest='wait_for_process',
action='store_true',
help='Wait for process to finish to avoid multiple simultaneous instances.'),
argument('-W', '--drop',
dest='drop_during_process',
action='store_true',
help='Ignore events that occur while command is still being'
' executed to avoid multiple simultaneous instances.'),
argument('--debug-force-polling',
action='store_true',
help='[debug] Forces polling.')])
def shell_command(args):
"""
Command to execute shell commands in response to file system events.
"""
from watchdog.tricks import ShellCommandTrick
if not args.command:
args.command = None
if args.debug_force_polling:
from watchdog.observers.polling import PollingObserver as Observer
else:
from watchdog.observers import Observer
patterns, ignore_patterns = parse_patterns(args.patterns,
args.ignore_patterns)
handler = ShellCommandTrick(shell_command=args.command,
patterns=patterns,
ignore_patterns=ignore_patterns,
ignore_directories=args.ignore_directories,
wait_for_process=args.wait_for_process,
drop_during_process=args.drop_during_process)
observer = Observer(timeout=args.timeout)
observe_with(observer, handler, args.directories, args.recursive)
@command([argument('command',
help='Long-running command to run in a subprocess.'),
argument('command_args',
metavar='arg',
nargs='*',
help='''
Command arguments.
Note: Use -- before the command arguments, otherwise watchmedo will
try to interpret them.
'''),
argument('-d',
'--directory',
dest='directories',
metavar='DIRECTORY',
action='append',
help='Directory to watch. Use another -d or --directory option '
'for each directory.'),
argument('-p',
'--pattern',
'--patterns',
dest='patterns',
default='*',
help='Matches event paths with these patterns (separated by ;).'),
argument('-i',
'--ignore-pattern',
'--ignore-patterns',
dest='ignore_patterns',
default='',
help='Ignores event paths with these patterns (separated by ;).'),
argument('-D',
'--ignore-directories',
dest='ignore_directories',
default=False,
action='store_true',
help='Ignores events for directories.'),
argument('-R',
'--recursive',
dest='recursive',
action='store_true',
help='Monitors the directories recursively.'),
argument('--interval',
'--timeout',
dest='timeout',
default=1.0,
type=float,
help='Use this as the polling interval/blocking timeout.'),
argument('--signal',
dest='signal',
default='SIGINT',
help='Stop the subprocess with this signal (default SIGINT).'),
argument('--debug-force-polling',
action='store_true',
help='[debug] Forces polling.'),
argument('--kill-after',
dest='kill_after',
default=10.0,
type=float,
help='When stopping, kill the subprocess after the specified timeout '
'in seconds (default 10.0).')])
def auto_restart(args):
"""
Command to start a long-running subprocess and restart it on matched events.
"""
if args.debug_force_polling:
from watchdog.observers.polling import PollingObserver as Observer
else:
from watchdog.observers import Observer
from watchdog.tricks import AutoRestartTrick
import signal
if not args.directories:
args.directories = ['.']
# Allow either signal name or number.
if args.signal.startswith("SIG"):
stop_signal = getattr(signal, args.signal)
else:
stop_signal = int(args.signal)
# Handle termination signals by raising a semantic exception which will
# allow us to gracefully unwind and stop the observer
termination_signals = {signal.SIGTERM, signal.SIGINT}
def handler_termination_signal(_signum, _frame):
# Neuter all signals so that we don't attempt a double shutdown
for signum in termination_signals:
signal.signal(signum, signal.SIG_IGN)
raise WatchdogShutdown
for signum in termination_signals:
signal.signal(signum, handler_termination_signal)
patterns, ignore_patterns = parse_patterns(args.patterns,
args.ignore_patterns)
command = [args.command]
command.extend(args.command_args)
handler = AutoRestartTrick(command=command,
patterns=patterns,
ignore_patterns=ignore_patterns,
ignore_directories=args.ignore_directories,
stop_signal=stop_signal,
kill_after=args.kill_after)
handler.start()
observer = Observer(timeout=args.timeout)
try:
observe_with(observer, handler, args.directories, args.recursive)
except WatchdogShutdown:
pass
finally:
handler.stop()
class LogLevelException(Exception):
pass
def _get_log_level_from_args(args):
verbosity = sum(args.verbosity or [])
if verbosity < -1:
raise LogLevelException("-q/--quiet may be specified only once.")
if verbosity > 2:
raise LogLevelException("-v/--verbose may be specified up to 2 times.")
return ['ERROR', 'WARNING', 'INFO', 'DEBUG'][1 + verbosity]
def main():
"""Entry-point function."""
args = cli.parse_args()
if args.top_command is None:
cli.print_help()
return 1
try:
log_level = _get_log_level_from_args(args)
except LogLevelException as exc:
print("Error: " + exc.args[0], file=sys.stderr)
command_parsers[args.top_command].print_help()
return 1
logging.getLogger('watchdog').setLevel(log_level)
args.func(args)
return 0
if __name__ == '__main__':
sys.exit(main()) | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/vendored/robocorp_ls_core/libs/watchdog_lib/watchdog/watchmedo.py | 0.556761 | 0.150153 | watchmedo.py | pypi |
import logging
from watchdog.utils import BaseThread
from watchdog.utils.delayed_queue import DelayedQueue
from watchdog.observers.inotify_c import Inotify
logger = logging.getLogger(__name__)
class InotifyBuffer(BaseThread):
"""A wrapper for `Inotify` that holds events for `delay` seconds. During
this time, IN_MOVED_FROM and IN_MOVED_TO events are paired.
"""
delay = 0.5
def __init__(self, path, recursive=False):
super().__init__()
self._queue = DelayedQueue(self.delay)
self._inotify = Inotify(path, recursive)
self.start()
def read_event(self):
"""Returns a single event or a tuple of from/to events in case of a
paired move event. If this buffer has been closed, immediately return
None.
"""
return self._queue.get()
def on_thread_stop(self):
self._inotify.close()
self._queue.close()
def close(self):
self.stop()
self.join()
def _group_events(self, event_list):
"""Group any matching move events"""
grouped = []
for inotify_event in event_list:
logger.debug("in-event %s", inotify_event)
def matching_from_event(event):
return (not isinstance(event, tuple) and event.is_moved_from
and event.cookie == inotify_event.cookie)
if inotify_event.is_moved_to:
# Check if move_from is already in the buffer
for index, event in enumerate(grouped):
if matching_from_event(event):
grouped[index] = (event, inotify_event)
break
else:
# Check if move_from is in delayqueue already
from_event = self._queue.remove(matching_from_event)
if from_event is not None:
grouped.append((from_event, inotify_event))
else:
logger.debug("could not find matching move_from event")
grouped.append(inotify_event)
else:
grouped.append(inotify_event)
return grouped
def run(self):
"""Read event from `inotify` and add them to `queue`. When reading a
IN_MOVE_TO event, remove the previous added matching IN_MOVE_FROM event
and add them back to the queue as a tuple.
"""
deleted_self = False
while self.should_keep_running() and not deleted_self:
inotify_events = self._inotify.read_events()
grouped_events = self._group_events(inotify_events)
for inotify_event in grouped_events:
if not isinstance(inotify_event, tuple) and inotify_event.is_ignored:
if inotify_event.src_path == self._inotify.path:
# Watch was removed explicitly (inotify_rm_watch(2)) or automatically (file
# was deleted, or filesystem was unmounted), stop watching for events
deleted_self = True
continue
# Only add delay for unmatched move_from events
delay = not isinstance(inotify_event, tuple) and inotify_event.is_moved_from
self._queue.put(inotify_event, delay)
if not isinstance(inotify_event, tuple) and inotify_event.is_delete_self and \
inotify_event.src_path == self._inotify.path:
# Deleted the watched directory, stop watching for events
deleted_self = True | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/vendored/robocorp_ls_core/libs/watchdog_lib/watchdog/observers/inotify_buffer.py | 0.728941 | 0.170335 | inotify_buffer.py | pypi |
import inspect
import sys
def name(item):
" Return an item's name. "
return item.__name__
def is_classmethod(instancemethod, klass):
" Determine if an instancemethod is a classmethod. "
return inspect.ismethod(instancemethod) and instancemethod.__self__ is klass
def is_static_method(method, klass):
"""Returns True if method is an instance method of klass."""
for c in klass.mro():
if name(method) in c.__dict__:
return isinstance(c.__dict__[name(method)], staticmethod)
else:
return False
def is_class_private_name(name):
" Determine if a name is a class private name. "
# Exclude system defined names such as __init__, __add__ etc
return name.startswith("__") and not name.endswith("__")
def method_name(method):
""" Return a method's name.
This function returns the name the method is accessed by from
outside the class (i.e. it prefixes "private" methods appropriately).
"""
mname = name(method)
if is_class_private_name(mname):
mname = "_%s%s" % (name(method.__self__.__class__), mname)
return mname
def format_arg_value(arg_val):
""" Return a string representing a (name, value) pair.
>>> format_arg_value(('x', (1, 2, 3)))
'x=(1, 2, 3)'
"""
arg, val = arg_val
return "%s=%r" % (arg, val)
def echo(fn, write=sys.stdout.write):
""" Echo calls to a function.
Returns a decorated version of the input function which "echoes" calls
made to it by writing out the function's name and the arguments it was
called with.
"""
import functools
# Unpack function's arg count, arg names, arg defaults
code = fn.__code__
argcount = code.co_argcount
argnames = code.co_varnames[:argcount]
fn_defaults = fn.__defaults__ or list()
argdefs = dict(list(zip(argnames[-len(fn_defaults):], fn_defaults)))
@functools.wraps(fn)
def wrapped(*v, **k):
# Collect function arguments by chaining together positional,
# defaulted, extra positional and keyword arguments.
positional = list(map(format_arg_value, list(zip(argnames, v))))
defaulted = [format_arg_value((a, argdefs[a]))
for a in argnames[len(v):] if a not in k]
nameless = list(map(repr, v[argcount:]))
keyword = list(map(format_arg_value, list(k.items())))
args = positional + defaulted + nameless + keyword
write("%s(%s)\n" % (name(fn), ", ".join(args)))
return fn(*v, **k)
return wrapped
def echo_instancemethod(klass, method, write=sys.stdout.write):
""" Change an instancemethod so that calls to it are echoed.
Replacing a classmethod is a little more tricky.
See: http://www.python.org/doc/current/ref/types.html
"""
mname = method_name(method)
never_echo = "__str__", "__repr__", # Avoid recursion printing method calls
if mname in never_echo:
pass
elif is_classmethod(method, klass):
setattr(klass, mname, classmethod(echo(method.__func__, write)))
else:
setattr(klass, mname, echo(method, write))
def echo_class(klass, write=sys.stdout.write):
""" Echo calls to class methods and static functions
"""
for _, method in inspect.getmembers(klass, inspect.ismethod):
# In python 3 only class methods are returned here, but in python2 instance methods are too.
echo_instancemethod(klass, method, write)
for _, fn in inspect.getmembers(klass, inspect.isfunction):
if is_static_method(fn, klass):
setattr(klass, name(fn), staticmethod(echo(fn, write)))
else:
# It's not a class or a static method, so it must be an instance method.
# This should only be called in python 3, because in python 3 instance methods are considered functions.
echo_instancemethod(klass, fn, write)
def echo_module(mod, write=sys.stdout.write):
""" Echo calls to functions and methods in a module.
"""
for fname, fn in inspect.getmembers(mod, inspect.isfunction):
setattr(mod, fname, echo(fn, write))
for _, klass in inspect.getmembers(mod, inspect.isclass):
echo_class(klass, write)
if __name__ == "__main__":
import doctest
optionflags = doctest.ELLIPSIS
doctest.testfile('echoexample.txt', optionflags=optionflags)
doctest.testmod(optionflags=optionflags) | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/vendored/robocorp_ls_core/libs/watchdog_lib/watchdog/utils/echo.py | 0.515132 | 0.234144 | echo.py | pypi |
# Non-pure path objects are only allowed on their respective OS's.
# Thus, these utilities require "pure" path objects that don't access the filesystem.
# Since pathlib doesn't have a `case_sensitive` parameter, we have to approximate it
# by converting input paths to `PureWindowsPath` and `PurePosixPath` where:
# - `PureWindowsPath` is always case-insensitive.
# - `PurePosixPath` is always case-sensitive.
# Reference: https://docs.python.org/3/library/pathlib.html#pathlib.PurePath.match
from pathlib import PureWindowsPath, PurePosixPath
def _match_path(path, included_patterns, excluded_patterns, case_sensitive):
"""Internal function same as :func:`match_path` but does not check arguments."""
if case_sensitive:
path = PurePosixPath(path)
else:
included_patterns = {pattern.lower() for pattern in included_patterns}
excluded_patterns = {pattern.lower() for pattern in excluded_patterns}
path = PureWindowsPath(path)
common_patterns = included_patterns & excluded_patterns
if common_patterns:
raise ValueError('conflicting patterns `{}` included and excluded'.format(common_patterns))
return (any(path.match(p) for p in included_patterns)
and not any(path.match(p) for p in excluded_patterns))
def filter_paths(paths, included_patterns=None, excluded_patterns=None, case_sensitive=True):
"""
Filters from a set of paths based on acceptable patterns and
ignorable patterns.
:param pathnames:
A list of path names that will be filtered based on matching and
ignored patterns.
:param included_patterns:
Allow filenames matching wildcard patterns specified in this list.
If no pattern list is specified, ["*"] is used as the default pattern,
which matches all files.
:param excluded_patterns:
Ignores filenames matching wildcard patterns specified in this list.
If no pattern list is specified, no files are ignored.
:param case_sensitive:
``True`` if matching should be case-sensitive; ``False`` otherwise.
:returns:
A list of pathnames that matched the allowable patterns and passed
through the ignored patterns.
"""
included = ["*"] if included_patterns is None else included_patterns
excluded = [] if excluded_patterns is None else excluded_patterns
for path in paths:
if _match_path(path, set(included), set(excluded), case_sensitive):
yield path
def match_any_paths(paths, included_patterns=None, excluded_patterns=None, case_sensitive=True):
"""
Matches from a set of paths based on acceptable patterns and
ignorable patterns.
:param pathnames:
A list of path names that will be filtered based on matching and
ignored patterns.
:param included_patterns:
Allow filenames matching wildcard patterns specified in this list.
If no pattern list is specified, ["*"] is used as the default pattern,
which matches all files.
:param excluded_patterns:
Ignores filenames matching wildcard patterns specified in this list.
If no pattern list is specified, no files are ignored.
:param case_sensitive:
``True`` if matching should be case-sensitive; ``False`` otherwise.
:returns:
``True`` if any of the paths matches; ``False`` otherwise.
"""
included = ["*"] if included_patterns is None else included_patterns
excluded = [] if excluded_patterns is None else excluded_patterns
for path in paths:
if _match_path(path, set(included), set(excluded), case_sensitive):
return True
return False | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/vendored/robocorp_ls_core/libs/watchdog_lib/watchdog/utils/patterns.py | 0.911113 | 0.511961 | patterns.py | pypi |
import dataclasses
import re
import warnings
from typing import (
Any,
AnyStr,
Iterable,
Iterator,
Match as MatchHint,
Optional,
Pattern as PatternHint,
Tuple,
Union)
class Pattern(object):
"""
The :class:`Pattern` class is the abstract definition of a pattern.
"""
# Make the class dict-less.
__slots__ = ('include',)
def __init__(self, include: Optional[bool]) -> None:
"""
Initializes the :class:`Pattern` instance.
*include* (:class:`bool` or :data:`None`) is whether the matched
files should be included (:data:`True`), excluded (:data:`False`),
or is a null-operation (:data:`None`).
"""
self.include = include
"""
*include* (:class:`bool` or :data:`None`) is whether the matched
files should be included (:data:`True`), excluded (:data:`False`),
or is a null-operation (:data:`None`).
"""
def match(self, files: Iterable[str]) -> Iterator[str]:
"""
DEPRECATED: This method is no longer used and has been replaced by
:meth:`.match_file`. Use the :meth:`.match_file` method with a loop
for similar results.
Matches this pattern against the specified files.
*files* (:class:`~collections.abc.Iterable` of :class:`str`)
contains each file relative to the root directory (e.g.,
:data:`"relative/path/to/file"`).
Returns an :class:`~collections.abc.Iterable` yielding each matched
file path (:class:`str`).
"""
warnings.warn((
"{0.__module__}.{0.__qualname__}.match() is deprecated. Use "
"{0.__module__}.{0.__qualname__}.match_file() with a loop for "
"similar results."
).format(self.__class__), DeprecationWarning, stacklevel=2)
for file in files:
if self.match_file(file) is not None:
yield file
def match_file(self, file: str) -> Optional[Any]:
"""
Matches this pattern against the specified file.
*file* (:class:`str`) is the normalized file path to match against.
Returns the match result if *file* matched; otherwise, :data:`None`.
"""
raise NotImplementedError((
"{0.__module__}.{0.__qualname__} must override match_file()."
).format(self.__class__))
class RegexPattern(Pattern):
"""
The :class:`RegexPattern` class is an implementation of a pattern
using regular expressions.
"""
# Keep the class dict-less.
__slots__ = ('regex',)
def __init__(
self,
pattern: Union[AnyStr, PatternHint],
include: Optional[bool] = None,
) -> None:
"""
Initializes the :class:`RegexPattern` instance.
*pattern* (:class:`str`, :class:`bytes`, :class:`re.Pattern`, or
:data:`None`) is the pattern to compile into a regular expression.
*include* (:class:`bool` or :data:`None`) must be :data:`None`
unless *pattern* is a precompiled regular expression (:class:`re.Pattern`)
in which case it is whether matched files should be included
(:data:`True`), excluded (:data:`False`), or is a null operation
(:data:`None`).
.. NOTE:: Subclasses do not need to support the *include*
parameter.
"""
if isinstance(pattern, (str, bytes)):
assert include is None, (
"include:{!r} must be null when pattern:{!r} is a string."
).format(include, pattern)
regex, include = self.pattern_to_regex(pattern)
# NOTE: Make sure to allow a null regular expression to be
# returned for a null-operation.
if include is not None:
regex = re.compile(regex)
elif pattern is not None and hasattr(pattern, 'match'):
# Assume pattern is a precompiled regular expression.
# - NOTE: Used specified *include*.
regex = pattern
elif pattern is None:
# NOTE: Make sure to allow a null pattern to be passed for a
# null-operation.
assert include is None, (
"include:{!r} must be null when pattern:{!r} is null."
).format(include, pattern)
else:
raise TypeError("pattern:{!r} is not a string, re.Pattern, or None.".format(pattern))
super(RegexPattern, self).__init__(include)
self.regex: PatternHint = regex
"""
*regex* (:class:`re.Pattern`) is the regular expression for the
pattern.
"""
def __eq__(self, other: 'RegexPattern') -> bool:
"""
Tests the equality of this regex pattern with *other* (:class:`RegexPattern`)
by comparing their :attr:`~Pattern.include` and :attr:`~RegexPattern.regex`
attributes.
"""
if isinstance(other, RegexPattern):
return self.include == other.include and self.regex == other.regex
else:
return NotImplemented
def match_file(self, file: str) -> Optional['RegexMatchResult']:
"""
Matches this pattern against the specified file.
*file* (:class:`str`)
contains each file relative to the root directory (e.g., "relative/path/to/file").
Returns the match result (:class:`RegexMatchResult`) if *file*
matched; otherwise, :data:`None`.
"""
if self.include is not None:
match = self.regex.match(file)
if match is not None:
return RegexMatchResult(match)
return None
@classmethod
def pattern_to_regex(cls, pattern: str) -> Tuple[str, bool]:
"""
Convert the pattern into an uncompiled regular expression.
*pattern* (:class:`str`) is the pattern to convert into a regular
expression.
Returns the uncompiled regular expression (:class:`str` or :data:`None`),
and whether matched files should be included (:data:`True`),
excluded (:data:`False`), or is a null-operation (:data:`None`).
.. NOTE:: The default implementation simply returns *pattern* and
:data:`True`.
"""
return pattern, True
@dataclasses.dataclass()
class RegexMatchResult(object):
"""
The :class:`RegexMatchResult` data class is used to return information
about the matched regular expression.
"""
# Keep the class dict-less.
__slots__ = (
'match',
)
match: MatchHint
"""
*match* (:class:`re.Match`) is the regex match result.
""" | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/vendored/robocorp_ls_core/libs/robotidy_lib/pathspec/pattern.py | 0.849628 | 0.371365 | pattern.py | pypi |
import sys
from collections.abc import (
Collection as CollectionType)
from itertools import (
zip_longest)
from os import (
PathLike)
from typing import (
AnyStr,
Callable,
Collection,
Iterable,
Iterator,
Optional,
Type,
TypeVar,
Union)
from . import util
from .pattern import (
Pattern)
from .util import (
StrPath,
TreeEntry,
_filter_patterns,
_is_iterable,
match_file,
normalize_file)
Self = TypeVar("Self", bound="PathSpec")
"""
:class:`PathSpec` self type hint to support Python v<3.11 using PEP 673
recommendation.
"""
class PathSpec(object):
"""
The :class:`PathSpec` class is a wrapper around a list of compiled
:class:`.Pattern` instances.
"""
def __init__(self, patterns: Iterable[Pattern]) -> None:
"""
Initializes the :class:`PathSpec` instance.
*patterns* (:class:`~collections.abc.Collection` or :class:`~collections.abc.Iterable`)
yields each compiled pattern (:class:`.Pattern`).
"""
self.patterns = patterns if isinstance(patterns, CollectionType) else list(patterns)
"""
*patterns* (:class:`~collections.abc.Collection` of :class:`.Pattern`)
contains the compiled patterns.
"""
def __eq__(self, other: object) -> bool:
"""
Tests the equality of this path-spec with *other* (:class:`PathSpec`)
by comparing their :attr:`~PathSpec.patterns` attributes.
"""
if isinstance(other, PathSpec):
paired_patterns = zip_longest(self.patterns, other.patterns)
return all(a == b for a, b in paired_patterns)
else:
return NotImplemented
def __len__(self) -> int:
"""
Returns the number of compiled patterns this path-spec contains
(:class:`int`).
"""
return len(self.patterns)
def __add__(self: Self, other: "PathSpec") -> Self:
"""
Combines the :attr:`Pathspec.patterns` patterns from two
:class:`PathSpec` instances.
"""
if isinstance(other, PathSpec):
return self.__class__(self.patterns + other.patterns)
else:
return NotImplemented
def __iadd__(self: Self, other: "PathSpec") -> Self:
"""
Adds the :attr:`Pathspec.patterns` patterns from one :class:`PathSpec`
instance to this instance.
"""
if isinstance(other, PathSpec):
self.patterns += other.patterns
return self
else:
return NotImplemented
@classmethod
def from_lines(
cls: Type[Self],
pattern_factory: Union[str, Callable[[AnyStr], Pattern]],
lines: Iterable[AnyStr],
) -> Self:
"""
Compiles the pattern lines.
*pattern_factory* can be either the name of a registered pattern
factory (:class:`str`), or a :class:`~collections.abc.Callable` used
to compile patterns. It must accept an uncompiled pattern (:class:`str`)
and return the compiled pattern (:class:`.Pattern`).
*lines* (:class:`~collections.abc.Iterable`) yields each uncompiled
pattern (:class:`str`). This simply has to yield each line so it can
be a :class:`io.TextIOBase` (e.g., from :func:`open` or
:class:`io.StringIO`) or the result from :meth:`str.splitlines`.
Returns the :class:`PathSpec` instance.
"""
if isinstance(pattern_factory, str):
pattern_factory = util.lookup_pattern(pattern_factory)
if not callable(pattern_factory):
raise TypeError(f"pattern_factory:{pattern_factory!r} is not callable.")
if not _is_iterable(lines):
raise TypeError(f"lines:{lines!r} is not an iterable.")
patterns = [pattern_factory(line) for line in lines if line]
return cls(patterns)
def match_entries(
self,
entries: Iterable[TreeEntry],
separators: Optional[Collection[str]] = None,
) -> Iterator[TreeEntry]:
"""
Matches the entries to this path-spec.
*entries* (:class:`~collections.abc.Iterable` of :class:`~util.TreeEntry`)
contains the entries to be matched against :attr:`self.patterns <PathSpec.patterns>`.
*separators* (:class:`~collections.abc.Collection` of :class:`str`;
or :data:`None`) optionally contains the path separators to
normalize. See :func:`~pathspec.util.normalize_file` for more
information.
Returns the matched entries (:class:`~collections.abc.Iterator` of
:class:`~util.TreeEntry`).
"""
if not _is_iterable(entries):
raise TypeError(f"entries:{entries!r} is not an iterable.")
use_patterns = _filter_patterns(self.patterns)
for entry in entries:
norm_file = normalize_file(entry.path, separators)
if self._match_file(use_patterns, norm_file):
yield entry
# Match files using the `match_file()` utility function. Subclasses
# may override this method as an instance method. It does not have to
# be a static method.
_match_file = staticmethod(match_file)
def match_file(
self,
file: StrPath,
separators: Optional[Collection[str]] = None,
) -> bool:
"""
Matches the file to this path-spec.
*file* (:class:`str` or :class:`os.PathLike[str]`) is the file path to be
matched against :attr:`self.patterns <PathSpec.patterns>`.
*separators* (:class:`~collections.abc.Collection` of :class:`str`)
optionally contains the path separators to normalize. See
:func:`~pathspec.util.normalize_file` for more information.
Returns :data:`True` if *file* matched; otherwise, :data:`False`.
"""
norm_file = util.normalize_file(file, separators=separators)
return self._match_file(self.patterns, norm_file)
def match_files(
self,
files: Iterable[StrPath],
separators: Optional[Collection[str]] = None,
) -> Iterator[StrPath]:
"""
Matches the files to this path-spec.
*files* (:class:`~collections.abc.Iterable` of :class:`str` or
:class:`os.PathLike[str]`) contains the file paths to be matched against
:attr:`self.patterns <PathSpec.patterns>`.
*separators* (:class:`~collections.abc.Collection` of :class:`str`;
or :data:`None`) optionally contains the path separators to
normalize. See :func:`~pathspec.util.normalize_file` for more
information.
Returns the matched files (:class:`~collections.abc.Iterator` of
:class:`str` or :class:`os.PathLike[str]`).
"""
if not _is_iterable(files):
raise TypeError(f"files:{files!r} is not an iterable.")
use_patterns = _filter_patterns(self.patterns)
for orig_file in files:
norm_file = normalize_file(orig_file, separators)
if self._match_file(use_patterns, norm_file):
yield orig_file
def match_tree_entries(
self,
root: StrPath,
on_error: Optional[Callable] = None,
follow_links: Optional[bool] = None,
) -> Iterator[TreeEntry]:
"""
Walks the specified root path for all files and matches them to this
path-spec.
*root* (:class:`str` or :class:`os.PathLike[str]`) is the root directory
to search.
*on_error* (:class:`~collections.abc.Callable` or :data:`None`)
optionally is the error handler for file-system exceptions. See
:func:`~pathspec.util.iter_tree_entries` for more information.
*follow_links* (:class:`bool` or :data:`None`) optionally is whether
to walk symbolic links that resolve to directories. See
:func:`~pathspec.util.iter_tree_files` for more information.
Returns the matched files (:class:`~collections.abc.Iterator` of
:class:`.TreeEntry`).
"""
entries = util.iter_tree_entries(root, on_error=on_error, follow_links=follow_links)
yield from self.match_entries(entries)
def match_tree_files(
self,
root: StrPath,
on_error: Optional[Callable] = None,
follow_links: Optional[bool] = None,
) -> Iterator[str]:
"""
Walks the specified root path for all files and matches them to this
path-spec.
*root* (:class:`str` or :class:`os.PathLike[str]`) is the root directory
to search for files.
*on_error* (:class:`~collections.abc.Callable` or :data:`None`)
optionally is the error handler for file-system exceptions. See
:func:`~pathspec.util.iter_tree_files` for more information.
*follow_links* (:class:`bool` or :data:`None`) optionally is whether
to walk symbolic links that resolve to directories. See
:func:`~pathspec.util.iter_tree_files` for more information.
Returns the matched files (:class:`~collections.abc.Iterable` of
:class:`str`).
"""
files = util.iter_tree_files(root, on_error=on_error, follow_links=follow_links)
yield from self.match_files(files)
# Alias `match_tree_files()` as `match_tree()` for backward
# compatibility before v0.3.2.
match_tree = match_tree_files | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/vendored/robocorp_ls_core/libs/robotidy_lib/pathspec/pathspec.py | 0.714329 | 0.402568 | pathspec.py | pypi |
from typing import (
AnyStr,
Callable,
Collection,
Iterable,
Type,
TypeVar,
Union)
from .pathspec import (
PathSpec)
from .pattern import (
Pattern)
from .patterns.gitwildmatch import (
GitWildMatchPattern,
GitWildMatchPatternError,
_DIR_MARK)
from .util import (
_is_iterable)
Self = TypeVar("Self", bound="GitIgnoreSpec")
"""
:class:`GitIgnoreSpec` self type hint to support Python v<3.11 using PEP
673 recommendation.
"""
class GitIgnoreSpec(PathSpec):
"""
The :class:`GitIgnoreSpec` class extends :class:`PathSpec` to
replicate *.gitignore* behavior.
"""
def __eq__(self, other: object) -> bool:
"""
Tests the equality of this gitignore-spec with *other*
(:class:`GitIgnoreSpec`) by comparing their :attr:`~PathSpec.patterns`
attributes. A non-:class:`GitIgnoreSpec` will not compare equal.
"""
if isinstance(other, GitIgnoreSpec):
return super().__eq__(other)
elif isinstance(other, PathSpec):
return False
else:
return NotImplemented
@classmethod
def from_lines(
cls: Type[Self],
lines: Iterable[AnyStr],
pattern_factory: Union[str, Callable[[AnyStr], Pattern], None] = None,
) -> Self:
"""
Compiles the pattern lines.
*lines* (:class:`~collections.abc.Iterable`) yields each uncompiled
pattern (:class:`str`). This simply has to yield each line so it can
be a :class:`io.TextIOBase` (e.g., from :func:`open` or
:class:`io.StringIO`) or the result from :meth:`str.splitlines`.
*pattern_factory* can be :data:`None`, the name of a registered
pattern factory (:class:`str`), or a :class:`~collections.abc.Callable`
used to compile patterns. The callable must accept an uncompiled
pattern (:class:`str`) and return the compiled pattern (:class:`.Pattern`).
Default is :data:`None` for :class:`.GitWildMatchPattern`).
Returns the :class:`GitIgnoreSpec` instance.
"""
if pattern_factory is None:
pattern_factory = GitWildMatchPattern
elif (isinstance(lines, str) or callable(lines)) and _is_iterable(pattern_factory):
# Support reversed order of arguments from PathSpec.
pattern_factory, lines = lines, pattern_factory
self = super().from_lines(pattern_factory, lines)
return self # type: ignore
@staticmethod
def _match_file(
patterns: Collection[GitWildMatchPattern],
file: str,
) -> bool:
"""
Matches the file to the patterns.
.. NOTE:: Subclasses of :class:`.PathSpec` may override this
method as an instance method. It does not have to be a static
method.
*patterns* (:class:`~collections.abc.Iterable` of :class:`~pathspec.pattern.Pattern`)
contains the patterns to use.
*file* (:class:`str`) is the normalized file path to be matched
against *patterns*.
Returns :data:`True` if *file* matched; otherwise, :data:`False`.
"""
out_matched = False
out_priority = 0
for pattern in patterns:
if pattern.include is not None:
match = pattern.match_file(file)
if match is not None:
# Pattern matched.
# Check for directory marker.
try:
dir_mark = match.match.group(_DIR_MARK)
except IndexError as e:
# NOTICE: The exact content of this error message is subject
# to change.
raise GitWildMatchPatternError((
f"Invalid git pattern: directory marker regex group is missing. "
f"Debug: file={file!r} regex={pattern.regex!r} "
f"group={_DIR_MARK!r} match={match.match!r}."
)) from e
if dir_mark:
# Pattern matched by a directory pattern.
priority = 1
else:
# Pattern matched by a file pattern.
priority = 2
if pattern.include and dir_mark:
out_matched = pattern.include
out_priority = priority
elif priority >= out_priority:
out_matched = pattern.include
out_priority = priority
return out_matched | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/vendored/robocorp_ls_core/libs/robotidy_lib/pathspec/gitignore.py | 0.831896 | 0.298901 | gitignore.py | pypi |
import typing as t
from contextlib import contextmanager
from gettext import gettext as _
from ._compat import term_len
from .parser import split_opt
# Can force a width. This is used by the test system
FORCED_WIDTH: t.Optional[int] = None
def measure_table(rows: t.Iterable[t.Tuple[str, str]]) -> t.Tuple[int, ...]:
widths: t.Dict[int, int] = {}
for row in rows:
for idx, col in enumerate(row):
widths[idx] = max(widths.get(idx, 0), term_len(col))
return tuple(y for x, y in sorted(widths.items()))
def iter_rows(
rows: t.Iterable[t.Tuple[str, str]], col_count: int
) -> t.Iterator[t.Tuple[str, ...]]:
for row in rows:
yield row + ("",) * (col_count - len(row))
def wrap_text(
text: str,
width: int = 78,
initial_indent: str = "",
subsequent_indent: str = "",
preserve_paragraphs: bool = False,
) -> str:
"""A helper function that intelligently wraps text. By default, it
assumes that it operates on a single paragraph of text but if the
`preserve_paragraphs` parameter is provided it will intelligently
handle paragraphs (defined by two empty lines).
If paragraphs are handled, a paragraph can be prefixed with an empty
line containing the ``\\b`` character (``\\x08``) to indicate that
no rewrapping should happen in that block.
:param text: the text that should be rewrapped.
:param width: the maximum width for the text.
:param initial_indent: the initial indent that should be placed on the
first line as a string.
:param subsequent_indent: the indent string that should be placed on
each consecutive line.
:param preserve_paragraphs: if this flag is set then the wrapping will
intelligently handle paragraphs.
"""
from ._textwrap import TextWrapper
text = text.expandtabs()
wrapper = TextWrapper(
width,
initial_indent=initial_indent,
subsequent_indent=subsequent_indent,
replace_whitespace=False,
)
if not preserve_paragraphs:
return wrapper.fill(text)
p: t.List[t.Tuple[int, bool, str]] = []
buf: t.List[str] = []
indent = None
def _flush_par() -> None:
if not buf:
return
if buf[0].strip() == "\b":
p.append((indent or 0, True, "\n".join(buf[1:])))
else:
p.append((indent or 0, False, " ".join(buf)))
del buf[:]
for line in text.splitlines():
if not line:
_flush_par()
indent = None
else:
if indent is None:
orig_len = term_len(line)
line = line.lstrip()
indent = orig_len - term_len(line)
buf.append(line)
_flush_par()
rv = []
for indent, raw, text in p:
with wrapper.extra_indent(" " * indent):
if raw:
rv.append(wrapper.indent_only(text))
else:
rv.append(wrapper.fill(text))
return "\n\n".join(rv)
class HelpFormatter:
"""This class helps with formatting text-based help pages. It's
usually just needed for very special internal cases, but it's also
exposed so that developers can write their own fancy outputs.
At present, it always writes into memory.
:param indent_increment: the additional increment for each level.
:param width: the width for the text. This defaults to the terminal
width clamped to a maximum of 78.
"""
def __init__(
self,
indent_increment: int = 2,
width: t.Optional[int] = None,
max_width: t.Optional[int] = None,
) -> None:
import shutil
self.indent_increment = indent_increment
if max_width is None:
max_width = 80
if width is None:
width = FORCED_WIDTH
if width is None:
width = max(min(shutil.get_terminal_size().columns, max_width) - 2, 50)
self.width = width
self.current_indent = 0
self.buffer: t.List[str] = []
def write(self, string: str) -> None:
"""Writes a unicode string into the internal buffer."""
self.buffer.append(string)
def indent(self) -> None:
"""Increases the indentation."""
self.current_indent += self.indent_increment
def dedent(self) -> None:
"""Decreases the indentation."""
self.current_indent -= self.indent_increment
def write_usage(
self, prog: str, args: str = "", prefix: t.Optional[str] = None
) -> None:
"""Writes a usage line into the buffer.
:param prog: the program name.
:param args: whitespace separated list of arguments.
:param prefix: The prefix for the first line. Defaults to
``"Usage: "``.
"""
if prefix is None:
prefix = f"{_('Usage:')} "
usage_prefix = f"{prefix:>{self.current_indent}}{prog} "
text_width = self.width - self.current_indent
if text_width >= (term_len(usage_prefix) + 20):
# The arguments will fit to the right of the prefix.
indent = " " * term_len(usage_prefix)
self.write(
wrap_text(
args,
text_width,
initial_indent=usage_prefix,
subsequent_indent=indent,
)
)
else:
# The prefix is too long, put the arguments on the next line.
self.write(usage_prefix)
self.write("\n")
indent = " " * (max(self.current_indent, term_len(prefix)) + 4)
self.write(
wrap_text(
args, text_width, initial_indent=indent, subsequent_indent=indent
)
)
self.write("\n")
def write_heading(self, heading: str) -> None:
"""Writes a heading into the buffer."""
self.write(f"{'':>{self.current_indent}}{heading}:\n")
def write_paragraph(self) -> None:
"""Writes a paragraph into the buffer."""
if self.buffer:
self.write("\n")
def write_text(self, text: str) -> None:
"""Writes re-indented text into the buffer. This rewraps and
preserves paragraphs.
"""
indent = " " * self.current_indent
self.write(
wrap_text(
text,
self.width,
initial_indent=indent,
subsequent_indent=indent,
preserve_paragraphs=True,
)
)
self.write("\n")
def write_dl(
self,
rows: t.Sequence[t.Tuple[str, str]],
col_max: int = 30,
col_spacing: int = 2,
) -> None:
"""Writes a definition list into the buffer. This is how options
and commands are usually formatted.
:param rows: a list of two item tuples for the terms and values.
:param col_max: the maximum width of the first column.
:param col_spacing: the number of spaces between the first and
second column.
"""
rows = list(rows)
widths = measure_table(rows)
if len(widths) != 2:
raise TypeError("Expected two columns for definition list")
first_col = min(widths[0], col_max) + col_spacing
for first, second in iter_rows(rows, len(widths)):
self.write(f"{'':>{self.current_indent}}{first}")
if not second:
self.write("\n")
continue
if term_len(first) <= first_col - col_spacing:
self.write(" " * (first_col - term_len(first)))
else:
self.write("\n")
self.write(" " * (first_col + self.current_indent))
text_width = max(self.width - first_col - 2, 10)
wrapped_text = wrap_text(second, text_width, preserve_paragraphs=True)
lines = wrapped_text.splitlines()
if lines:
self.write(f"{lines[0]}\n")
for line in lines[1:]:
self.write(f"{'':>{first_col + self.current_indent}}{line}\n")
else:
self.write("\n")
@contextmanager
def section(self, name: str) -> t.Iterator[None]:
"""Helpful context manager that writes a paragraph, a heading,
and the indents.
:param name: the section name that is written as heading.
"""
self.write_paragraph()
self.write_heading(name)
self.indent()
try:
yield
finally:
self.dedent()
@contextmanager
def indentation(self) -> t.Iterator[None]:
"""A context manager that increases the indentation."""
self.indent()
try:
yield
finally:
self.dedent()
def getvalue(self) -> str:
"""Returns the buffer contents."""
return "".join(self.buffer)
def join_options(options: t.Sequence[str]) -> t.Tuple[str, bool]:
"""Given a list of option strings this joins them in the most appropriate
way and returns them in the form ``(formatted_string,
any_prefix_is_slash)`` where the second item in the tuple is a flag that
indicates if any of the option prefixes was a slash.
"""
rv = []
any_prefix_is_slash = False
for opt in options:
prefix = split_opt(opt)[0]
if prefix == "/":
any_prefix_is_slash = True
rv.append((len(prefix), opt))
rv.sort(key=lambda x: x[0])
return ", ".join(x[1] for x in rv), any_prefix_is_slash | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/vendored/robocorp_ls_core/libs/robotidy_lib/click/formatting.py | 0.702224 | 0.313669 | formatting.py | pypi |
import os
import typing as t
from gettext import gettext as _
from gettext import ngettext
from ._compat import get_text_stderr
from .utils import echo
if t.TYPE_CHECKING:
from .core import Context
from .core import Parameter
def _join_param_hints(
param_hint: t.Optional[t.Union[t.Sequence[str], str]]
) -> t.Optional[str]:
if param_hint is not None and not isinstance(param_hint, str):
return " / ".join(repr(x) for x in param_hint)
return param_hint
class ClickException(Exception):
"""An exception that Click can handle and show to the user."""
#: The exit code for this exception.
exit_code = 1
def __init__(self, message: str) -> None:
super().__init__(message)
self.message = message
def format_message(self) -> str:
return self.message
def __str__(self) -> str:
return self.message
def show(self, file: t.Optional[t.IO] = None) -> None:
if file is None:
file = get_text_stderr()
echo(_("Error: {message}").format(message=self.format_message()), file=file)
class UsageError(ClickException):
"""An internal exception that signals a usage error. This typically
aborts any further handling.
:param message: the error message to display.
:param ctx: optionally the context that caused this error. Click will
fill in the context automatically in some situations.
"""
exit_code = 2
def __init__(self, message: str, ctx: t.Optional["Context"] = None) -> None:
super().__init__(message)
self.ctx = ctx
self.cmd = self.ctx.command if self.ctx else None
def show(self, file: t.Optional[t.IO] = None) -> None:
if file is None:
file = get_text_stderr()
color = None
hint = ""
if (
self.ctx is not None
and self.ctx.command.get_help_option(self.ctx) is not None
):
hint = _("Try '{command} {option}' for help.").format(
command=self.ctx.command_path, option=self.ctx.help_option_names[0]
)
hint = f"{hint}\n"
if self.ctx is not None:
color = self.ctx.color
echo(f"{self.ctx.get_usage()}\n{hint}", file=file, color=color)
echo(
_("Error: {message}").format(message=self.format_message()),
file=file,
color=color,
)
class BadParameter(UsageError):
"""An exception that formats out a standardized error message for a
bad parameter. This is useful when thrown from a callback or type as
Click will attach contextual information to it (for instance, which
parameter it is).
.. versionadded:: 2.0
:param param: the parameter object that caused this error. This can
be left out, and Click will attach this info itself
if possible.
:param param_hint: a string that shows up as parameter name. This
can be used as alternative to `param` in cases
where custom validation should happen. If it is
a string it's used as such, if it's a list then
each item is quoted and separated.
"""
def __init__(
self,
message: str,
ctx: t.Optional["Context"] = None,
param: t.Optional["Parameter"] = None,
param_hint: t.Optional[str] = None,
) -> None:
super().__init__(message, ctx)
self.param = param
self.param_hint = param_hint
def format_message(self) -> str:
if self.param_hint is not None:
param_hint = self.param_hint
elif self.param is not None:
param_hint = self.param.get_error_hint(self.ctx) # type: ignore
else:
return _("Invalid value: {message}").format(message=self.message)
return _("Invalid value for {param_hint}: {message}").format(
param_hint=_join_param_hints(param_hint), message=self.message
)
class MissingParameter(BadParameter):
"""Raised if click required an option or argument but it was not
provided when invoking the script.
.. versionadded:: 4.0
:param param_type: a string that indicates the type of the parameter.
The default is to inherit the parameter type from
the given `param`. Valid values are ``'parameter'``,
``'option'`` or ``'argument'``.
"""
def __init__(
self,
message: t.Optional[str] = None,
ctx: t.Optional["Context"] = None,
param: t.Optional["Parameter"] = None,
param_hint: t.Optional[str] = None,
param_type: t.Optional[str] = None,
) -> None:
super().__init__(message or "", ctx, param, param_hint)
self.param_type = param_type
def format_message(self) -> str:
if self.param_hint is not None:
param_hint: t.Optional[str] = self.param_hint
elif self.param is not None:
param_hint = self.param.get_error_hint(self.ctx) # type: ignore
else:
param_hint = None
param_hint = _join_param_hints(param_hint)
param_hint = f" {param_hint}" if param_hint else ""
param_type = self.param_type
if param_type is None and self.param is not None:
param_type = self.param.param_type_name
msg = self.message
if self.param is not None:
msg_extra = self.param.type.get_missing_message(self.param)
if msg_extra:
if msg:
msg += f". {msg_extra}"
else:
msg = msg_extra
msg = f" {msg}" if msg else ""
# Translate param_type for known types.
if param_type == "argument":
missing = _("Missing argument")
elif param_type == "option":
missing = _("Missing option")
elif param_type == "parameter":
missing = _("Missing parameter")
else:
missing = _("Missing {param_type}").format(param_type=param_type)
return f"{missing}{param_hint}.{msg}"
def __str__(self) -> str:
if not self.message:
param_name = self.param.name if self.param else None
return _("Missing parameter: {param_name}").format(param_name=param_name)
else:
return self.message
class NoSuchOption(UsageError):
"""Raised if click attempted to handle an option that does not
exist.
.. versionadded:: 4.0
"""
def __init__(
self,
option_name: str,
message: t.Optional[str] = None,
possibilities: t.Optional[t.Sequence[str]] = None,
ctx: t.Optional["Context"] = None,
) -> None:
if message is None:
message = _("No such option: {name}").format(name=option_name)
super().__init__(message, ctx)
self.option_name = option_name
self.possibilities = possibilities
def format_message(self) -> str:
if not self.possibilities:
return self.message
possibility_str = ", ".join(sorted(self.possibilities))
suggest = ngettext(
"Did you mean {possibility}?",
"(Possible options: {possibilities})",
len(self.possibilities),
).format(possibility=possibility_str, possibilities=possibility_str)
return f"{self.message} {suggest}"
class BadOptionUsage(UsageError):
"""Raised if an option is generally supplied but the use of the option
was incorrect. This is for instance raised if the number of arguments
for an option is not correct.
.. versionadded:: 4.0
:param option_name: the name of the option being used incorrectly.
"""
def __init__(
self, option_name: str, message: str, ctx: t.Optional["Context"] = None
) -> None:
super().__init__(message, ctx)
self.option_name = option_name
class BadArgumentUsage(UsageError):
"""Raised if an argument is generally supplied but the use of the argument
was incorrect. This is for instance raised if the number of values
for an argument is not correct.
.. versionadded:: 6.0
"""
class FileError(ClickException):
"""Raised if a file cannot be opened."""
def __init__(self, filename: str, hint: t.Optional[str] = None) -> None:
if hint is None:
hint = _("unknown error")
super().__init__(hint)
self.ui_filename = os.fsdecode(filename)
self.filename = filename
def format_message(self) -> str:
return _("Could not open file {filename!r}: {message}").format(
filename=self.ui_filename, message=self.message
)
class Abort(RuntimeError):
"""An internal signalling exception that signals Click to abort."""
class Exit(RuntimeError):
"""An exception that indicates that the application should exit with some
status code.
:param code: the status code to exit with.
"""
__slots__ = ("exit_code",)
def __init__(self, code: int = 0) -> None:
self.exit_code = code | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/vendored/robocorp_ls_core/libs/robotidy_lib/click/exceptions.py | 0.642432 | 0.155944 | exceptions.py | pypi |
import os
import re
import typing as t
from gettext import gettext as _
from .core import Argument
from .core import BaseCommand
from .core import Context
from .core import MultiCommand
from .core import Option
from .core import Parameter
from .core import ParameterSource
from .parser import split_arg_string
from .utils import echo
def shell_complete(
cli: BaseCommand,
ctx_args: t.Dict[str, t.Any],
prog_name: str,
complete_var: str,
instruction: str,
) -> int:
"""Perform shell completion for the given CLI program.
:param cli: Command being called.
:param ctx_args: Extra arguments to pass to
``cli.make_context``.
:param prog_name: Name of the executable in the shell.
:param complete_var: Name of the environment variable that holds
the completion instruction.
:param instruction: Value of ``complete_var`` with the completion
instruction and shell, in the form ``instruction_shell``.
:return: Status code to exit with.
"""
shell, _, instruction = instruction.partition("_")
comp_cls = get_completion_class(shell)
if comp_cls is None:
return 1
comp = comp_cls(cli, ctx_args, prog_name, complete_var)
if instruction == "source":
echo(comp.source())
return 0
if instruction == "complete":
echo(comp.complete())
return 0
return 1
class CompletionItem:
"""Represents a completion value and metadata about the value. The
default metadata is ``type`` to indicate special shell handling,
and ``help`` if a shell supports showing a help string next to the
value.
Arbitrary parameters can be passed when creating the object, and
accessed using ``item.attr``. If an attribute wasn't passed,
accessing it returns ``None``.
:param value: The completion suggestion.
:param type: Tells the shell script to provide special completion
support for the type. Click uses ``"dir"`` and ``"file"``.
:param help: String shown next to the value if supported.
:param kwargs: Arbitrary metadata. The built-in implementations
don't use this, but custom type completions paired with custom
shell support could use it.
"""
__slots__ = ("value", "type", "help", "_info")
def __init__(
self,
value: t.Any,
type: str = "plain",
help: t.Optional[str] = None,
**kwargs: t.Any,
) -> None:
self.value = value
self.type = type
self.help = help
self._info = kwargs
def __getattr__(self, name: str) -> t.Any:
return self._info.get(name)
# Only Bash >= 4.4 has the nosort option.
_SOURCE_BASH = """\
%(complete_func)s() {
local IFS=$'\\n'
local response
response=$(env COMP_WORDS="${COMP_WORDS[*]}" COMP_CWORD=$COMP_CWORD \
%(complete_var)s=bash_complete $1)
for completion in $response; do
IFS=',' read type value <<< "$completion"
if [[ $type == 'dir' ]]; then
COMPREPLY=()
compopt -o dirnames
elif [[ $type == 'file' ]]; then
COMPREPLY=()
compopt -o default
elif [[ $type == 'plain' ]]; then
COMPREPLY+=($value)
fi
done
return 0
}
%(complete_func)s_setup() {
complete -o nosort -F %(complete_func)s %(prog_name)s
}
%(complete_func)s_setup;
"""
_SOURCE_ZSH = """\
#compdef %(prog_name)s
%(complete_func)s() {
local -a completions
local -a completions_with_descriptions
local -a response
(( ! $+commands[%(prog_name)s] )) && return 1
response=("${(@f)$(env COMP_WORDS="${words[*]}" COMP_CWORD=$((CURRENT-1)) \
%(complete_var)s=zsh_complete %(prog_name)s)}")
for type key descr in ${response}; do
if [[ "$type" == "plain" ]]; then
if [[ "$descr" == "_" ]]; then
completions+=("$key")
else
completions_with_descriptions+=("$key":"$descr")
fi
elif [[ "$type" == "dir" ]]; then
_path_files -/
elif [[ "$type" == "file" ]]; then
_path_files -f
fi
done
if [ -n "$completions_with_descriptions" ]; then
_describe -V unsorted completions_with_descriptions -U
fi
if [ -n "$completions" ]; then
compadd -U -V unsorted -a completions
fi
}
compdef %(complete_func)s %(prog_name)s;
"""
_SOURCE_FISH = """\
function %(complete_func)s;
set -l response;
for value in (env %(complete_var)s=fish_complete COMP_WORDS=(commandline -cp) \
COMP_CWORD=(commandline -t) %(prog_name)s);
set response $response $value;
end;
for completion in $response;
set -l metadata (string split "," $completion);
if test $metadata[1] = "dir";
__fish_complete_directories $metadata[2];
else if test $metadata[1] = "file";
__fish_complete_path $metadata[2];
else if test $metadata[1] = "plain";
echo $metadata[2];
end;
end;
end;
complete --no-files --command %(prog_name)s --arguments \
"(%(complete_func)s)";
"""
class ShellComplete:
"""Base class for providing shell completion support. A subclass for
a given shell will override attributes and methods to implement the
completion instructions (``source`` and ``complete``).
:param cli: Command being called.
:param prog_name: Name of the executable in the shell.
:param complete_var: Name of the environment variable that holds
the completion instruction.
.. versionadded:: 8.0
"""
name: t.ClassVar[str]
"""Name to register the shell as with :func:`add_completion_class`.
This is used in completion instructions (``{name}_source`` and
``{name}_complete``).
"""
source_template: t.ClassVar[str]
"""Completion script template formatted by :meth:`source`. This must
be provided by subclasses.
"""
def __init__(
self,
cli: BaseCommand,
ctx_args: t.Dict[str, t.Any],
prog_name: str,
complete_var: str,
) -> None:
self.cli = cli
self.ctx_args = ctx_args
self.prog_name = prog_name
self.complete_var = complete_var
@property
def func_name(self) -> str:
"""The name of the shell function defined by the completion
script.
"""
safe_name = re.sub(r"\W*", "", self.prog_name.replace("-", "_"), re.ASCII)
return f"_{safe_name}_completion"
def source_vars(self) -> t.Dict[str, t.Any]:
"""Vars for formatting :attr:`source_template`.
By default this provides ``complete_func``, ``complete_var``,
and ``prog_name``.
"""
return {
"complete_func": self.func_name,
"complete_var": self.complete_var,
"prog_name": self.prog_name,
}
def source(self) -> str:
"""Produce the shell script that defines the completion
function. By default this ``%``-style formats
:attr:`source_template` with the dict returned by
:meth:`source_vars`.
"""
return self.source_template % self.source_vars()
def get_completion_args(self) -> t.Tuple[t.List[str], str]:
"""Use the env vars defined by the shell script to return a
tuple of ``args, incomplete``. This must be implemented by
subclasses.
"""
raise NotImplementedError
def get_completions(
self, args: t.List[str], incomplete: str
) -> t.List[CompletionItem]:
"""Determine the context and last complete command or parameter
from the complete args. Call that object's ``shell_complete``
method to get the completions for the incomplete value.
:param args: List of complete args before the incomplete value.
:param incomplete: Value being completed. May be empty.
"""
ctx = _resolve_context(self.cli, self.ctx_args, self.prog_name, args)
obj, incomplete = _resolve_incomplete(ctx, args, incomplete)
return obj.shell_complete(ctx, incomplete)
def format_completion(self, item: CompletionItem) -> str:
"""Format a completion item into the form recognized by the
shell script. This must be implemented by subclasses.
:param item: Completion item to format.
"""
raise NotImplementedError
def complete(self) -> str:
"""Produce the completion data to send back to the shell.
By default this calls :meth:`get_completion_args`, gets the
completions, then calls :meth:`format_completion` for each
completion.
"""
args, incomplete = self.get_completion_args()
completions = self.get_completions(args, incomplete)
out = [self.format_completion(item) for item in completions]
return "\n".join(out)
class BashComplete(ShellComplete):
"""Shell completion for Bash."""
name = "bash"
source_template = _SOURCE_BASH
def _check_version(self) -> None:
import subprocess
output = subprocess.run(
["bash", "-c", "echo ${BASH_VERSION}"], stdout=subprocess.PIPE
)
match = re.search(r"^(\d+)\.(\d+)\.\d+", output.stdout.decode())
if match is not None:
major, minor = match.groups()
if major < "4" or major == "4" and minor < "4":
raise RuntimeError(
_(
"Shell completion is not supported for Bash"
" versions older than 4.4."
)
)
else:
raise RuntimeError(
_("Couldn't detect Bash version, shell completion is not supported.")
)
def source(self) -> str:
self._check_version()
return super().source()
def get_completion_args(self) -> t.Tuple[t.List[str], str]:
cwords = split_arg_string(os.environ["COMP_WORDS"])
cword = int(os.environ["COMP_CWORD"])
args = cwords[1:cword]
try:
incomplete = cwords[cword]
except IndexError:
incomplete = ""
return args, incomplete
def format_completion(self, item: CompletionItem) -> str:
return f"{item.type},{item.value}"
class ZshComplete(ShellComplete):
"""Shell completion for Zsh."""
name = "zsh"
source_template = _SOURCE_ZSH
def get_completion_args(self) -> t.Tuple[t.List[str], str]:
cwords = split_arg_string(os.environ["COMP_WORDS"])
cword = int(os.environ["COMP_CWORD"])
args = cwords[1:cword]
try:
incomplete = cwords[cword]
except IndexError:
incomplete = ""
return args, incomplete
def format_completion(self, item: CompletionItem) -> str:
return f"{item.type}\n{item.value}\n{item.help if item.help else '_'}"
class FishComplete(ShellComplete):
"""Shell completion for Fish."""
name = "fish"
source_template = _SOURCE_FISH
def get_completion_args(self) -> t.Tuple[t.List[str], str]:
cwords = split_arg_string(os.environ["COMP_WORDS"])
incomplete = os.environ["COMP_CWORD"]
args = cwords[1:]
# Fish stores the partial word in both COMP_WORDS and
# COMP_CWORD, remove it from complete args.
if incomplete and args and args[-1] == incomplete:
args.pop()
return args, incomplete
def format_completion(self, item: CompletionItem) -> str:
if item.help:
return f"{item.type},{item.value}\t{item.help}"
return f"{item.type},{item.value}"
_available_shells: t.Dict[str, t.Type[ShellComplete]] = {
"bash": BashComplete,
"fish": FishComplete,
"zsh": ZshComplete,
}
def add_completion_class(
cls: t.Type[ShellComplete], name: t.Optional[str] = None
) -> None:
"""Register a :class:`ShellComplete` subclass under the given name.
The name will be provided by the completion instruction environment
variable during completion.
:param cls: The completion class that will handle completion for the
shell.
:param name: Name to register the class under. Defaults to the
class's ``name`` attribute.
"""
if name is None:
name = cls.name
_available_shells[name] = cls
def get_completion_class(shell: str) -> t.Optional[t.Type[ShellComplete]]:
"""Look up a registered :class:`ShellComplete` subclass by the name
provided by the completion instruction environment variable. If the
name isn't registered, returns ``None``.
:param shell: Name the class is registered under.
"""
return _available_shells.get(shell)
def _is_incomplete_argument(ctx: Context, param: Parameter) -> bool:
"""Determine if the given parameter is an argument that can still
accept values.
:param ctx: Invocation context for the command represented by the
parsed complete args.
:param param: Argument object being checked.
"""
if not isinstance(param, Argument):
return False
assert param.name is not None
value = ctx.params[param.name]
return (
param.nargs == -1
or ctx.get_parameter_source(param.name) is not ParameterSource.COMMANDLINE
or (
param.nargs > 1
and isinstance(value, (tuple, list))
and len(value) < param.nargs
)
)
def _start_of_option(ctx: Context, value: str) -> bool:
"""Check if the value looks like the start of an option."""
if not value:
return False
c = value[0]
return c in ctx._opt_prefixes
def _is_incomplete_option(ctx: Context, args: t.List[str], param: Parameter) -> bool:
"""Determine if the given parameter is an option that needs a value.
:param args: List of complete args before the incomplete value.
:param param: Option object being checked.
"""
if not isinstance(param, Option):
return False
if param.is_flag or param.count:
return False
last_option = None
for index, arg in enumerate(reversed(args)):
if index + 1 > param.nargs:
break
if _start_of_option(ctx, arg):
last_option = arg
return last_option is not None and last_option in param.opts
def _resolve_context(
cli: BaseCommand, ctx_args: t.Dict[str, t.Any], prog_name: str, args: t.List[str]
) -> Context:
"""Produce the context hierarchy starting with the command and
traversing the complete arguments. This only follows the commands,
it doesn't trigger input prompts or callbacks.
:param cli: Command being called.
:param prog_name: Name of the executable in the shell.
:param args: List of complete args before the incomplete value.
"""
ctx_args["resilient_parsing"] = True
ctx = cli.make_context(prog_name, args.copy(), **ctx_args)
args = ctx.protected_args + ctx.args
while args:
command = ctx.command
if isinstance(command, MultiCommand):
if not command.chain:
name, cmd, args = command.resolve_command(ctx, args)
if cmd is None:
return ctx
ctx = cmd.make_context(name, args, parent=ctx, resilient_parsing=True)
args = ctx.protected_args + ctx.args
else:
while args:
name, cmd, args = command.resolve_command(ctx, args)
if cmd is None:
return ctx
sub_ctx = cmd.make_context(
name,
args,
parent=ctx,
allow_extra_args=True,
allow_interspersed_args=False,
resilient_parsing=True,
)
args = sub_ctx.args
ctx = sub_ctx
args = [*sub_ctx.protected_args, *sub_ctx.args]
else:
break
return ctx
def _resolve_incomplete(
ctx: Context, args: t.List[str], incomplete: str
) -> t.Tuple[t.Union[BaseCommand, Parameter], str]:
"""Find the Click object that will handle the completion of the
incomplete value. Return the object and the incomplete value.
:param ctx: Invocation context for the command represented by
the parsed complete args.
:param args: List of complete args before the incomplete value.
:param incomplete: Value being completed. May be empty.
"""
# Different shells treat an "=" between a long option name and
# value differently. Might keep the value joined, return the "="
# as a separate item, or return the split name and value. Always
# split and discard the "=" to make completion easier.
if incomplete == "=":
incomplete = ""
elif "=" in incomplete and _start_of_option(ctx, incomplete):
name, _, incomplete = incomplete.partition("=")
args.append(name)
# The "--" marker tells Click to stop treating values as options
# even if they start with the option character. If it hasn't been
# given and the incomplete arg looks like an option, the current
# command will provide option name completions.
if "--" not in args and _start_of_option(ctx, incomplete):
return ctx.command, incomplete
params = ctx.command.get_params(ctx)
# If the last complete arg is an option name with an incomplete
# value, the option will provide value completions.
for param in params:
if _is_incomplete_option(ctx, args, param):
return param, incomplete
# It's not an option name or value. The first argument without a
# parsed value will provide value completions.
for param in params:
if _is_incomplete_argument(ctx, param):
return param, incomplete
# There were no unparsed arguments, the command may be a group that
# will provide command name completions.
return ctx.command, incomplete | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/vendored/robocorp_ls_core/libs/robotidy_lib/click/shell_completion.py | 0.621081 | 0.153867 | shell_completion.py | pypi |
import functools
import re
import string
import sys
import typing as t
if t.TYPE_CHECKING:
import typing_extensions as te
class HasHTML(te.Protocol):
def __html__(self) -> str:
pass
_P = te.ParamSpec("_P")
__version__ = "2.1.3"
_strip_comments_re = re.compile(r"<!--.*?-->", re.DOTALL)
_strip_tags_re = re.compile(r"<.*?>", re.DOTALL)
def _simple_escaping_wrapper(func: "t.Callable[_P, str]") -> "t.Callable[_P, Markup]":
@functools.wraps(func)
def wrapped(self: "Markup", *args: "_P.args", **kwargs: "_P.kwargs") -> "Markup":
arg_list = _escape_argspec(list(args), enumerate(args), self.escape)
_escape_argspec(kwargs, kwargs.items(), self.escape)
return self.__class__(func(self, *arg_list, **kwargs)) # type: ignore[arg-type]
return wrapped # type: ignore[return-value]
class Markup(str):
"""A string that is ready to be safely inserted into an HTML or XML
document, either because it was escaped or because it was marked
safe.
Passing an object to the constructor converts it to text and wraps
it to mark it safe without escaping. To escape the text, use the
:meth:`escape` class method instead.
>>> Markup("Hello, <em>World</em>!")
Markup('Hello, <em>World</em>!')
>>> Markup(42)
Markup('42')
>>> Markup.escape("Hello, <em>World</em>!")
Markup('Hello <em>World</em>!')
This implements the ``__html__()`` interface that some frameworks
use. Passing an object that implements ``__html__()`` will wrap the
output of that method, marking it safe.
>>> class Foo:
... def __html__(self):
... return '<a href="/foo">foo</a>'
...
>>> Markup(Foo())
Markup('<a href="/foo">foo</a>')
This is a subclass of :class:`str`. It has the same methods, but
escapes their arguments and returns a ``Markup`` instance.
>>> Markup("<em>%s</em>") % ("foo & bar",)
Markup('<em>foo & bar</em>')
>>> Markup("<em>Hello</em> ") + "<foo>"
Markup('<em>Hello</em> <foo>')
"""
__slots__ = ()
def __new__(
cls, base: t.Any = "", encoding: t.Optional[str] = None, errors: str = "strict"
) -> "te.Self":
if hasattr(base, "__html__"):
base = base.__html__()
if encoding is None:
return super().__new__(cls, base)
return super().__new__(cls, base, encoding, errors)
def __html__(self) -> "te.Self":
return self
def __add__(self, other: t.Union[str, "HasHTML"]) -> "te.Self":
if isinstance(other, str) or hasattr(other, "__html__"):
return self.__class__(super().__add__(self.escape(other)))
return NotImplemented
def __radd__(self, other: t.Union[str, "HasHTML"]) -> "te.Self":
if isinstance(other, str) or hasattr(other, "__html__"):
return self.escape(other).__add__(self)
return NotImplemented
def __mul__(self, num: "te.SupportsIndex") -> "te.Self":
if isinstance(num, int):
return self.__class__(super().__mul__(num))
return NotImplemented
__rmul__ = __mul__
def __mod__(self, arg: t.Any) -> "te.Self":
if isinstance(arg, tuple):
# a tuple of arguments, each wrapped
arg = tuple(_MarkupEscapeHelper(x, self.escape) for x in arg)
elif hasattr(type(arg), "__getitem__") and not isinstance(arg, str):
# a mapping of arguments, wrapped
arg = _MarkupEscapeHelper(arg, self.escape)
else:
# a single argument, wrapped with the helper and a tuple
arg = (_MarkupEscapeHelper(arg, self.escape),)
return self.__class__(super().__mod__(arg))
def __repr__(self) -> str:
return f"{self.__class__.__name__}({super().__repr__()})"
def join(self, seq: t.Iterable[t.Union[str, "HasHTML"]]) -> "te.Self":
return self.__class__(super().join(map(self.escape, seq)))
join.__doc__ = str.join.__doc__
def split( # type: ignore[override]
self, sep: t.Optional[str] = None, maxsplit: int = -1
) -> t.List["te.Self"]:
return [self.__class__(v) for v in super().split(sep, maxsplit)]
split.__doc__ = str.split.__doc__
def rsplit( # type: ignore[override]
self, sep: t.Optional[str] = None, maxsplit: int = -1
) -> t.List["te.Self"]:
return [self.__class__(v) for v in super().rsplit(sep, maxsplit)]
rsplit.__doc__ = str.rsplit.__doc__
def splitlines( # type: ignore[override]
self, keepends: bool = False
) -> t.List["te.Self"]:
return [self.__class__(v) for v in super().splitlines(keepends)]
splitlines.__doc__ = str.splitlines.__doc__
def unescape(self) -> str:
"""Convert escaped markup back into a text string. This replaces
HTML entities with the characters they represent.
>>> Markup("Main » <em>About</em>").unescape()
'Main » <em>About</em>'
"""
from html import unescape
return unescape(str(self))
def striptags(self) -> str:
""":meth:`unescape` the markup, remove tags, and normalize
whitespace to single spaces.
>>> Markup("Main »\t<em>About</em>").striptags()
'Main » About'
"""
# Use two regexes to avoid ambiguous matches.
value = _strip_comments_re.sub("", self)
value = _strip_tags_re.sub("", value)
value = " ".join(value.split())
return self.__class__(value).unescape()
@classmethod
def escape(cls, s: t.Any) -> "te.Self":
"""Escape a string. Calls :func:`escape` and ensures that for
subclasses the correct type is returned.
"""
rv = escape(s)
if rv.__class__ is not cls:
return cls(rv)
return rv # type: ignore[return-value]
__getitem__ = _simple_escaping_wrapper(str.__getitem__)
capitalize = _simple_escaping_wrapper(str.capitalize)
title = _simple_escaping_wrapper(str.title)
lower = _simple_escaping_wrapper(str.lower)
upper = _simple_escaping_wrapper(str.upper)
replace = _simple_escaping_wrapper(str.replace)
ljust = _simple_escaping_wrapper(str.ljust)
rjust = _simple_escaping_wrapper(str.rjust)
lstrip = _simple_escaping_wrapper(str.lstrip)
rstrip = _simple_escaping_wrapper(str.rstrip)
center = _simple_escaping_wrapper(str.center)
strip = _simple_escaping_wrapper(str.strip)
translate = _simple_escaping_wrapper(str.translate)
expandtabs = _simple_escaping_wrapper(str.expandtabs)
swapcase = _simple_escaping_wrapper(str.swapcase)
zfill = _simple_escaping_wrapper(str.zfill)
casefold = _simple_escaping_wrapper(str.casefold)
if sys.version_info >= (3, 9):
removeprefix = _simple_escaping_wrapper(str.removeprefix)
removesuffix = _simple_escaping_wrapper(str.removesuffix)
def partition(self, sep: str) -> t.Tuple["te.Self", "te.Self", "te.Self"]:
l, s, r = super().partition(self.escape(sep))
cls = self.__class__
return cls(l), cls(s), cls(r)
def rpartition(self, sep: str) -> t.Tuple["te.Self", "te.Self", "te.Self"]:
l, s, r = super().rpartition(self.escape(sep))
cls = self.__class__
return cls(l), cls(s), cls(r)
def format(self, *args: t.Any, **kwargs: t.Any) -> "te.Self":
formatter = EscapeFormatter(self.escape)
return self.__class__(formatter.vformat(self, args, kwargs))
def format_map( # type: ignore[override]
self, map: t.Mapping[str, t.Any]
) -> "te.Self":
formatter = EscapeFormatter(self.escape)
return self.__class__(formatter.vformat(self, (), map))
def __html_format__(self, format_spec: str) -> "te.Self":
if format_spec:
raise ValueError("Unsupported format specification for Markup.")
return self
class EscapeFormatter(string.Formatter):
__slots__ = ("escape",)
def __init__(self, escape: t.Callable[[t.Any], Markup]) -> None:
self.escape = escape
super().__init__()
def format_field(self, value: t.Any, format_spec: str) -> str:
if hasattr(value, "__html_format__"):
rv = value.__html_format__(format_spec)
elif hasattr(value, "__html__"):
if format_spec:
raise ValueError(
f"Format specifier {format_spec} given, but {type(value)} does not"
" define __html_format__. A class that defines __html__ must define"
" __html_format__ to work with format specifiers."
)
rv = value.__html__()
else:
# We need to make sure the format spec is str here as
# otherwise the wrong callback methods are invoked.
rv = string.Formatter.format_field(self, value, str(format_spec))
return str(self.escape(rv))
_ListOrDict = t.TypeVar("_ListOrDict", list, dict)
def _escape_argspec(
obj: _ListOrDict, iterable: t.Iterable[t.Any], escape: t.Callable[[t.Any], Markup]
) -> _ListOrDict:
"""Helper for various string-wrapped functions."""
for key, value in iterable:
if isinstance(value, str) or hasattr(value, "__html__"):
obj[key] = escape(value)
return obj
class _MarkupEscapeHelper:
"""Helper for :meth:`Markup.__mod__`."""
__slots__ = ("obj", "escape")
def __init__(self, obj: t.Any, escape: t.Callable[[t.Any], Markup]) -> None:
self.obj = obj
self.escape = escape
def __getitem__(self, item: t.Any) -> "te.Self":
return self.__class__(self.obj[item], self.escape)
def __str__(self) -> str:
return str(self.escape(self.obj))
def __repr__(self) -> str:
return str(self.escape(repr(self.obj)))
def __int__(self) -> int:
return int(self.obj)
def __float__(self) -> float:
return float(self.obj)
# circular import
try:
from ._speedups import escape as escape
from ._speedups import escape_silent as escape_silent
from ._speedups import soft_str as soft_str
except ImportError:
from ._native import escape as escape
from ._native import escape_silent as escape_silent # noqa: F401
from ._native import soft_str as soft_str # noqa: F401 | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/vendored/robocorp_ls_core/libs/robotidy_lib/markupsafe/__init__.py | 0.563858 | 0.173691 | __init__.py | pypi |
import sys
import typing as t
from types import CodeType
from types import TracebackType
from .exceptions import TemplateSyntaxError
from .utils import internal_code
from .utils import missing
if t.TYPE_CHECKING:
from .runtime import Context
def rewrite_traceback_stack(source: t.Optional[str] = None) -> BaseException:
"""Rewrite the current exception to replace any tracebacks from
within compiled template code with tracebacks that look like they
came from the template source.
This must be called within an ``except`` block.
:param source: For ``TemplateSyntaxError``, the original source if
known.
:return: The original exception with the rewritten traceback.
"""
_, exc_value, tb = sys.exc_info()
exc_value = t.cast(BaseException, exc_value)
tb = t.cast(TracebackType, tb)
if isinstance(exc_value, TemplateSyntaxError) and not exc_value.translated:
exc_value.translated = True
exc_value.source = source
# Remove the old traceback, otherwise the frames from the
# compiler still show up.
exc_value.with_traceback(None)
# Outside of runtime, so the frame isn't executing template
# code, but it still needs to point at the template.
tb = fake_traceback(
exc_value, None, exc_value.filename or "<unknown>", exc_value.lineno
)
else:
# Skip the frame for the render function.
tb = tb.tb_next
stack = []
# Build the stack of traceback object, replacing any in template
# code with the source file and line information.
while tb is not None:
# Skip frames decorated with @internalcode. These are internal
# calls that aren't useful in template debugging output.
if tb.tb_frame.f_code in internal_code:
tb = tb.tb_next
continue
template = tb.tb_frame.f_globals.get("__jinja_template__")
if template is not None:
lineno = template.get_corresponding_lineno(tb.tb_lineno)
fake_tb = fake_traceback(exc_value, tb, template.filename, lineno)
stack.append(fake_tb)
else:
stack.append(tb)
tb = tb.tb_next
tb_next = None
# Assign tb_next in reverse to avoid circular references.
for tb in reversed(stack):
tb.tb_next = tb_next
tb_next = tb
return exc_value.with_traceback(tb_next)
def fake_traceback( # type: ignore
exc_value: BaseException, tb: t.Optional[TracebackType], filename: str, lineno: int
) -> TracebackType:
"""Produce a new traceback object that looks like it came from the
template source instead of the compiled code. The filename, line
number, and location name will point to the template, and the local
variables will be the current template context.
:param exc_value: The original exception to be re-raised to create
the new traceback.
:param tb: The original traceback to get the local variables and
code info from.
:param filename: The template filename.
:param lineno: The line number in the template source.
"""
if tb is not None:
# Replace the real locals with the context that would be
# available at that point in the template.
locals = get_template_locals(tb.tb_frame.f_locals)
locals.pop("__jinja_exception__", None)
else:
locals = {}
globals = {
"__name__": filename,
"__file__": filename,
"__jinja_exception__": exc_value,
}
# Raise an exception at the correct line number.
code: CodeType = compile(
"\n" * (lineno - 1) + "raise __jinja_exception__", filename, "exec"
)
# Build a new code object that points to the template file and
# replaces the location with a block name.
location = "template"
if tb is not None:
function = tb.tb_frame.f_code.co_name
if function == "root":
location = "top-level template code"
elif function.startswith("block_"):
location = f"block {function[6:]!r}"
if sys.version_info >= (3, 8):
code = code.replace(co_name=location)
else:
code = CodeType(
code.co_argcount,
code.co_kwonlyargcount,
code.co_nlocals,
code.co_stacksize,
code.co_flags,
code.co_code,
code.co_consts,
code.co_names,
code.co_varnames,
code.co_filename,
location,
code.co_firstlineno,
code.co_lnotab,
code.co_freevars,
code.co_cellvars,
)
# Execute the new code, which is guaranteed to raise, and return
# the new traceback without this frame.
try:
exec(code, globals, locals)
except BaseException:
return sys.exc_info()[2].tb_next # type: ignore
def get_template_locals(real_locals: t.Mapping[str, t.Any]) -> t.Dict[str, t.Any]:
"""Based on the runtime locals, get the context that would be
available at that point in the template.
"""
# Start with the current template context.
ctx: "t.Optional[Context]" = real_locals.get("context")
if ctx is not None:
data: t.Dict[str, t.Any] = ctx.get_all().copy()
else:
data = {}
# Might be in a derived context that only sets local variables
# rather than pushing a context. Local variables follow the scheme
# l_depth_name. Find the highest-depth local that has a value for
# each name.
local_overrides: t.Dict[str, t.Tuple[int, t.Any]] = {}
for name, value in real_locals.items():
if not name.startswith("l_") or value is missing:
# Not a template variable, or no longer relevant.
continue
try:
_, depth_str, name = name.split("_", 2)
depth = int(depth_str)
except ValueError:
continue
cur_depth = local_overrides.get(name, (-1,))[0]
if cur_depth < depth:
local_overrides[name] = (depth, value)
# Modify the context with any derived context.
for name, (_, value) in local_overrides.items():
if value is missing:
data.pop(name, None)
else:
data[name] = value
return data | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/vendored/robocorp_ls_core/libs/robotidy_lib/jinja2/debug.py | 0.466359 | 0.204521 | debug.py | pypi |
import typing as t
from . import nodes
from .visitor import NodeVisitor
VAR_LOAD_PARAMETER = "param"
VAR_LOAD_RESOLVE = "resolve"
VAR_LOAD_ALIAS = "alias"
VAR_LOAD_UNDEFINED = "undefined"
def find_symbols(
nodes: t.Iterable[nodes.Node], parent_symbols: t.Optional["Symbols"] = None
) -> "Symbols":
sym = Symbols(parent=parent_symbols)
visitor = FrameSymbolVisitor(sym)
for node in nodes:
visitor.visit(node)
return sym
def symbols_for_node(
node: nodes.Node, parent_symbols: t.Optional["Symbols"] = None
) -> "Symbols":
sym = Symbols(parent=parent_symbols)
sym.analyze_node(node)
return sym
class Symbols:
def __init__(
self, parent: t.Optional["Symbols"] = None, level: t.Optional[int] = None
) -> None:
if level is None:
if parent is None:
level = 0
else:
level = parent.level + 1
self.level: int = level
self.parent = parent
self.refs: t.Dict[str, str] = {}
self.loads: t.Dict[str, t.Any] = {}
self.stores: t.Set[str] = set()
def analyze_node(self, node: nodes.Node, **kwargs: t.Any) -> None:
visitor = RootVisitor(self)
visitor.visit(node, **kwargs)
def _define_ref(
self, name: str, load: t.Optional[t.Tuple[str, t.Optional[str]]] = None
) -> str:
ident = f"l_{self.level}_{name}"
self.refs[name] = ident
if load is not None:
self.loads[ident] = load
return ident
def find_load(self, target: str) -> t.Optional[t.Any]:
if target in self.loads:
return self.loads[target]
if self.parent is not None:
return self.parent.find_load(target)
return None
def find_ref(self, name: str) -> t.Optional[str]:
if name in self.refs:
return self.refs[name]
if self.parent is not None:
return self.parent.find_ref(name)
return None
def ref(self, name: str) -> str:
rv = self.find_ref(name)
if rv is None:
raise AssertionError(
"Tried to resolve a name to a reference that was"
f" unknown to the frame ({name!r})"
)
return rv
def copy(self) -> "Symbols":
rv = object.__new__(self.__class__)
rv.__dict__.update(self.__dict__)
rv.refs = self.refs.copy()
rv.loads = self.loads.copy()
rv.stores = self.stores.copy()
return rv
def store(self, name: str) -> None:
self.stores.add(name)
# If we have not see the name referenced yet, we need to figure
# out what to set it to.
if name not in self.refs:
# If there is a parent scope we check if the name has a
# reference there. If it does it means we might have to alias
# to a variable there.
if self.parent is not None:
outer_ref = self.parent.find_ref(name)
if outer_ref is not None:
self._define_ref(name, load=(VAR_LOAD_ALIAS, outer_ref))
return
# Otherwise we can just set it to undefined.
self._define_ref(name, load=(VAR_LOAD_UNDEFINED, None))
def declare_parameter(self, name: str) -> str:
self.stores.add(name)
return self._define_ref(name, load=(VAR_LOAD_PARAMETER, None))
def load(self, name: str) -> None:
if self.find_ref(name) is None:
self._define_ref(name, load=(VAR_LOAD_RESOLVE, name))
def branch_update(self, branch_symbols: t.Sequence["Symbols"]) -> None:
stores: t.Dict[str, int] = {}
for branch in branch_symbols:
for target in branch.stores:
if target in self.stores:
continue
stores[target] = stores.get(target, 0) + 1
for sym in branch_symbols:
self.refs.update(sym.refs)
self.loads.update(sym.loads)
self.stores.update(sym.stores)
for name, branch_count in stores.items():
if branch_count == len(branch_symbols):
continue
target = self.find_ref(name) # type: ignore
assert target is not None, "should not happen"
if self.parent is not None:
outer_target = self.parent.find_ref(name)
if outer_target is not None:
self.loads[target] = (VAR_LOAD_ALIAS, outer_target)
continue
self.loads[target] = (VAR_LOAD_RESOLVE, name)
def dump_stores(self) -> t.Dict[str, str]:
rv: t.Dict[str, str] = {}
node: t.Optional["Symbols"] = self
while node is not None:
for name in sorted(node.stores):
if name not in rv:
rv[name] = self.find_ref(name) # type: ignore
node = node.parent
return rv
def dump_param_targets(self) -> t.Set[str]:
rv = set()
node: t.Optional["Symbols"] = self
while node is not None:
for target, (instr, _) in self.loads.items():
if instr == VAR_LOAD_PARAMETER:
rv.add(target)
node = node.parent
return rv
class RootVisitor(NodeVisitor):
def __init__(self, symbols: "Symbols") -> None:
self.sym_visitor = FrameSymbolVisitor(symbols)
def _simple_visit(self, node: nodes.Node, **kwargs: t.Any) -> None:
for child in node.iter_child_nodes():
self.sym_visitor.visit(child)
visit_Template = _simple_visit
visit_Block = _simple_visit
visit_Macro = _simple_visit
visit_FilterBlock = _simple_visit
visit_Scope = _simple_visit
visit_If = _simple_visit
visit_ScopedEvalContextModifier = _simple_visit
def visit_AssignBlock(self, node: nodes.AssignBlock, **kwargs: t.Any) -> None:
for child in node.body:
self.sym_visitor.visit(child)
def visit_CallBlock(self, node: nodes.CallBlock, **kwargs: t.Any) -> None:
for child in node.iter_child_nodes(exclude=("call",)):
self.sym_visitor.visit(child)
def visit_OverlayScope(self, node: nodes.OverlayScope, **kwargs: t.Any) -> None:
for child in node.body:
self.sym_visitor.visit(child)
def visit_For(
self, node: nodes.For, for_branch: str = "body", **kwargs: t.Any
) -> None:
if for_branch == "body":
self.sym_visitor.visit(node.target, store_as_param=True)
branch = node.body
elif for_branch == "else":
branch = node.else_
elif for_branch == "test":
self.sym_visitor.visit(node.target, store_as_param=True)
if node.test is not None:
self.sym_visitor.visit(node.test)
return
else:
raise RuntimeError("Unknown for branch")
if branch:
for item in branch:
self.sym_visitor.visit(item)
def visit_With(self, node: nodes.With, **kwargs: t.Any) -> None:
for target in node.targets:
self.sym_visitor.visit(target)
for child in node.body:
self.sym_visitor.visit(child)
def generic_visit(self, node: nodes.Node, *args: t.Any, **kwargs: t.Any) -> None:
raise NotImplementedError(f"Cannot find symbols for {type(node).__name__!r}")
class FrameSymbolVisitor(NodeVisitor):
"""A visitor for `Frame.inspect`."""
def __init__(self, symbols: "Symbols") -> None:
self.symbols = symbols
def visit_Name(
self, node: nodes.Name, store_as_param: bool = False, **kwargs: t.Any
) -> None:
"""All assignments to names go through this function."""
if store_as_param or node.ctx == "param":
self.symbols.declare_parameter(node.name)
elif node.ctx == "store":
self.symbols.store(node.name)
elif node.ctx == "load":
self.symbols.load(node.name)
def visit_NSRef(self, node: nodes.NSRef, **kwargs: t.Any) -> None:
self.symbols.load(node.name)
def visit_If(self, node: nodes.If, **kwargs: t.Any) -> None:
self.visit(node.test, **kwargs)
original_symbols = self.symbols
def inner_visit(nodes: t.Iterable[nodes.Node]) -> "Symbols":
self.symbols = rv = original_symbols.copy()
for subnode in nodes:
self.visit(subnode, **kwargs)
self.symbols = original_symbols
return rv
body_symbols = inner_visit(node.body)
elif_symbols = inner_visit(node.elif_)
else_symbols = inner_visit(node.else_ or ())
self.symbols.branch_update([body_symbols, elif_symbols, else_symbols])
def visit_Macro(self, node: nodes.Macro, **kwargs: t.Any) -> None:
self.symbols.store(node.name)
def visit_Import(self, node: nodes.Import, **kwargs: t.Any) -> None:
self.generic_visit(node, **kwargs)
self.symbols.store(node.target)
def visit_FromImport(self, node: nodes.FromImport, **kwargs: t.Any) -> None:
self.generic_visit(node, **kwargs)
for name in node.names:
if isinstance(name, tuple):
self.symbols.store(name[1])
else:
self.symbols.store(name)
def visit_Assign(self, node: nodes.Assign, **kwargs: t.Any) -> None:
"""Visit assignments in the correct order."""
self.visit(node.node, **kwargs)
self.visit(node.target, **kwargs)
def visit_For(self, node: nodes.For, **kwargs: t.Any) -> None:
"""Visiting stops at for blocks. However the block sequence
is visited as part of the outer scope.
"""
self.visit(node.iter, **kwargs)
def visit_CallBlock(self, node: nodes.CallBlock, **kwargs: t.Any) -> None:
self.visit(node.call, **kwargs)
def visit_FilterBlock(self, node: nodes.FilterBlock, **kwargs: t.Any) -> None:
self.visit(node.filter, **kwargs)
def visit_With(self, node: nodes.With, **kwargs: t.Any) -> None:
for target in node.values:
self.visit(target)
def visit_AssignBlock(self, node: nodes.AssignBlock, **kwargs: t.Any) -> None:
"""Stop visiting at block assigns."""
self.visit(node.target, **kwargs)
def visit_Scope(self, node: nodes.Scope, **kwargs: t.Any) -> None:
"""Stop visiting at scopes."""
def visit_Block(self, node: nodes.Block, **kwargs: t.Any) -> None:
"""Stop visiting at blocks."""
def visit_OverlayScope(self, node: nodes.OverlayScope, **kwargs: t.Any) -> None:
"""Do not visit into overlay scopes.""" | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/vendored/robocorp_ls_core/libs/robotidy_lib/jinja2/idtracking.py | 0.538983 | 0.234999 | idtracking.py | pypi |
import inspect
import typing as t
from functools import WRAPPER_ASSIGNMENTS
from functools import wraps
from .utils import _PassArg
from .utils import pass_eval_context
V = t.TypeVar("V")
def async_variant(normal_func): # type: ignore
def decorator(async_func): # type: ignore
pass_arg = _PassArg.from_obj(normal_func)
need_eval_context = pass_arg is None
if pass_arg is _PassArg.environment:
def is_async(args: t.Any) -> bool:
return t.cast(bool, args[0].is_async)
else:
def is_async(args: t.Any) -> bool:
return t.cast(bool, args[0].environment.is_async)
# Take the doc and annotations from the sync function, but the
# name from the async function. Pallets-Sphinx-Themes
# build_function_directive expects __wrapped__ to point to the
# sync function.
async_func_attrs = ("__module__", "__name__", "__qualname__")
normal_func_attrs = tuple(set(WRAPPER_ASSIGNMENTS).difference(async_func_attrs))
@wraps(normal_func, assigned=normal_func_attrs)
@wraps(async_func, assigned=async_func_attrs, updated=())
def wrapper(*args, **kwargs): # type: ignore
b = is_async(args)
if need_eval_context:
args = args[1:]
if b:
return async_func(*args, **kwargs)
return normal_func(*args, **kwargs)
if need_eval_context:
wrapper = pass_eval_context(wrapper)
wrapper.jinja_async_variant = True
return wrapper
return decorator
_common_primitives = {int, float, bool, str, list, dict, tuple, type(None)}
async def auto_await(value: t.Union[t.Awaitable["V"], "V"]) -> "V":
# Avoid a costly call to isawaitable
if type(value) in _common_primitives:
return t.cast("V", value)
if inspect.isawaitable(value):
return await t.cast("t.Awaitable[V]", value)
return t.cast("V", value)
async def auto_aiter(
iterable: "t.Union[t.AsyncIterable[V], t.Iterable[V]]",
) -> "t.AsyncIterator[V]":
if hasattr(iterable, "__aiter__"):
async for item in t.cast("t.AsyncIterable[V]", iterable):
yield item
else:
for item in t.cast("t.Iterable[V]", iterable):
yield item
async def auto_to_list(
value: "t.Union[t.AsyncIterable[V], t.Iterable[V]]",
) -> t.List["V"]:
return [x async for x in auto_aiter(value)] | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/vendored/robocorp_ls_core/libs/robotidy_lib/jinja2/async_utils.py | 0.64232 | 0.197619 | async_utils.py | pypi |
import operator
import types
import typing as t
from _string import formatter_field_name_split # type: ignore
from collections import abc
from collections import deque
from string import Formatter
from markupsafe import EscapeFormatter
from markupsafe import Markup
from .environment import Environment
from .exceptions import SecurityError
from .runtime import Context
from .runtime import Undefined
F = t.TypeVar("F", bound=t.Callable[..., t.Any])
#: maximum number of items a range may produce
MAX_RANGE = 100000
#: Unsafe function attributes.
UNSAFE_FUNCTION_ATTRIBUTES: t.Set[str] = set()
#: Unsafe method attributes. Function attributes are unsafe for methods too.
UNSAFE_METHOD_ATTRIBUTES: t.Set[str] = set()
#: unsafe generator attributes.
UNSAFE_GENERATOR_ATTRIBUTES = {"gi_frame", "gi_code"}
#: unsafe attributes on coroutines
UNSAFE_COROUTINE_ATTRIBUTES = {"cr_frame", "cr_code"}
#: unsafe attributes on async generators
UNSAFE_ASYNC_GENERATOR_ATTRIBUTES = {"ag_code", "ag_frame"}
_mutable_spec: t.Tuple[t.Tuple[t.Type, t.FrozenSet[str]], ...] = (
(
abc.MutableSet,
frozenset(
[
"add",
"clear",
"difference_update",
"discard",
"pop",
"remove",
"symmetric_difference_update",
"update",
]
),
),
(
abc.MutableMapping,
frozenset(["clear", "pop", "popitem", "setdefault", "update"]),
),
(
abc.MutableSequence,
frozenset(["append", "reverse", "insert", "sort", "extend", "remove"]),
),
(
deque,
frozenset(
[
"append",
"appendleft",
"clear",
"extend",
"extendleft",
"pop",
"popleft",
"remove",
"rotate",
]
),
),
)
def inspect_format_method(callable: t.Callable) -> t.Optional[str]:
if not isinstance(
callable, (types.MethodType, types.BuiltinMethodType)
) or callable.__name__ not in ("format", "format_map"):
return None
obj = callable.__self__
if isinstance(obj, str):
return obj
return None
def safe_range(*args: int) -> range:
"""A range that can't generate ranges with a length of more than
MAX_RANGE items.
"""
rng = range(*args)
if len(rng) > MAX_RANGE:
raise OverflowError(
"Range too big. The sandbox blocks ranges larger than"
f" MAX_RANGE ({MAX_RANGE})."
)
return rng
def unsafe(f: F) -> F:
"""Marks a function or method as unsafe.
.. code-block: python
@unsafe
def delete(self):
pass
"""
f.unsafe_callable = True # type: ignore
return f
def is_internal_attribute(obj: t.Any, attr: str) -> bool:
"""Test if the attribute given is an internal python attribute. For
example this function returns `True` for the `func_code` attribute of
python objects. This is useful if the environment method
:meth:`~SandboxedEnvironment.is_safe_attribute` is overridden.
>>> from jinja2.sandbox import is_internal_attribute
>>> is_internal_attribute(str, "mro")
True
>>> is_internal_attribute(str, "upper")
False
"""
if isinstance(obj, types.FunctionType):
if attr in UNSAFE_FUNCTION_ATTRIBUTES:
return True
elif isinstance(obj, types.MethodType):
if attr in UNSAFE_FUNCTION_ATTRIBUTES or attr in UNSAFE_METHOD_ATTRIBUTES:
return True
elif isinstance(obj, type):
if attr == "mro":
return True
elif isinstance(obj, (types.CodeType, types.TracebackType, types.FrameType)):
return True
elif isinstance(obj, types.GeneratorType):
if attr in UNSAFE_GENERATOR_ATTRIBUTES:
return True
elif hasattr(types, "CoroutineType") and isinstance(obj, types.CoroutineType):
if attr in UNSAFE_COROUTINE_ATTRIBUTES:
return True
elif hasattr(types, "AsyncGeneratorType") and isinstance(
obj, types.AsyncGeneratorType
):
if attr in UNSAFE_ASYNC_GENERATOR_ATTRIBUTES:
return True
return attr.startswith("__")
def modifies_known_mutable(obj: t.Any, attr: str) -> bool:
"""This function checks if an attribute on a builtin mutable object
(list, dict, set or deque) or the corresponding ABCs would modify it
if called.
>>> modifies_known_mutable({}, "clear")
True
>>> modifies_known_mutable({}, "keys")
False
>>> modifies_known_mutable([], "append")
True
>>> modifies_known_mutable([], "index")
False
If called with an unsupported object, ``False`` is returned.
>>> modifies_known_mutable("foo", "upper")
False
"""
for typespec, unsafe in _mutable_spec:
if isinstance(obj, typespec):
return attr in unsafe
return False
class SandboxedEnvironment(Environment):
"""The sandboxed environment. It works like the regular environment but
tells the compiler to generate sandboxed code. Additionally subclasses of
this environment may override the methods that tell the runtime what
attributes or functions are safe to access.
If the template tries to access insecure code a :exc:`SecurityError` is
raised. However also other exceptions may occur during the rendering so
the caller has to ensure that all exceptions are caught.
"""
sandboxed = True
#: default callback table for the binary operators. A copy of this is
#: available on each instance of a sandboxed environment as
#: :attr:`binop_table`
default_binop_table: t.Dict[str, t.Callable[[t.Any, t.Any], t.Any]] = {
"+": operator.add,
"-": operator.sub,
"*": operator.mul,
"/": operator.truediv,
"//": operator.floordiv,
"**": operator.pow,
"%": operator.mod,
}
#: default callback table for the unary operators. A copy of this is
#: available on each instance of a sandboxed environment as
#: :attr:`unop_table`
default_unop_table: t.Dict[str, t.Callable[[t.Any], t.Any]] = {
"+": operator.pos,
"-": operator.neg,
}
#: a set of binary operators that should be intercepted. Each operator
#: that is added to this set (empty by default) is delegated to the
#: :meth:`call_binop` method that will perform the operator. The default
#: operator callback is specified by :attr:`binop_table`.
#:
#: The following binary operators are interceptable:
#: ``//``, ``%``, ``+``, ``*``, ``-``, ``/``, and ``**``
#:
#: The default operation form the operator table corresponds to the
#: builtin function. Intercepted calls are always slower than the native
#: operator call, so make sure only to intercept the ones you are
#: interested in.
#:
#: .. versionadded:: 2.6
intercepted_binops: t.FrozenSet[str] = frozenset()
#: a set of unary operators that should be intercepted. Each operator
#: that is added to this set (empty by default) is delegated to the
#: :meth:`call_unop` method that will perform the operator. The default
#: operator callback is specified by :attr:`unop_table`.
#:
#: The following unary operators are interceptable: ``+``, ``-``
#:
#: The default operation form the operator table corresponds to the
#: builtin function. Intercepted calls are always slower than the native
#: operator call, so make sure only to intercept the ones you are
#: interested in.
#:
#: .. versionadded:: 2.6
intercepted_unops: t.FrozenSet[str] = frozenset()
def __init__(self, *args: t.Any, **kwargs: t.Any) -> None:
super().__init__(*args, **kwargs)
self.globals["range"] = safe_range
self.binop_table = self.default_binop_table.copy()
self.unop_table = self.default_unop_table.copy()
def is_safe_attribute(self, obj: t.Any, attr: str, value: t.Any) -> bool:
"""The sandboxed environment will call this method to check if the
attribute of an object is safe to access. Per default all attributes
starting with an underscore are considered private as well as the
special attributes of internal python objects as returned by the
:func:`is_internal_attribute` function.
"""
return not (attr.startswith("_") or is_internal_attribute(obj, attr))
def is_safe_callable(self, obj: t.Any) -> bool:
"""Check if an object is safely callable. By default callables
are considered safe unless decorated with :func:`unsafe`.
This also recognizes the Django convention of setting
``func.alters_data = True``.
"""
return not (
getattr(obj, "unsafe_callable", False) or getattr(obj, "alters_data", False)
)
def call_binop(
self, context: Context, operator: str, left: t.Any, right: t.Any
) -> t.Any:
"""For intercepted binary operator calls (:meth:`intercepted_binops`)
this function is executed instead of the builtin operator. This can
be used to fine tune the behavior of certain operators.
.. versionadded:: 2.6
"""
return self.binop_table[operator](left, right)
def call_unop(self, context: Context, operator: str, arg: t.Any) -> t.Any:
"""For intercepted unary operator calls (:meth:`intercepted_unops`)
this function is executed instead of the builtin operator. This can
be used to fine tune the behavior of certain operators.
.. versionadded:: 2.6
"""
return self.unop_table[operator](arg)
def getitem(
self, obj: t.Any, argument: t.Union[str, t.Any]
) -> t.Union[t.Any, Undefined]:
"""Subscribe an object from sandboxed code."""
try:
return obj[argument]
except (TypeError, LookupError):
if isinstance(argument, str):
try:
attr = str(argument)
except Exception:
pass
else:
try:
value = getattr(obj, attr)
except AttributeError:
pass
else:
if self.is_safe_attribute(obj, argument, value):
return value
return self.unsafe_undefined(obj, argument)
return self.undefined(obj=obj, name=argument)
def getattr(self, obj: t.Any, attribute: str) -> t.Union[t.Any, Undefined]:
"""Subscribe an object from sandboxed code and prefer the
attribute. The attribute passed *must* be a bytestring.
"""
try:
value = getattr(obj, attribute)
except AttributeError:
try:
return obj[attribute]
except (TypeError, LookupError):
pass
else:
if self.is_safe_attribute(obj, attribute, value):
return value
return self.unsafe_undefined(obj, attribute)
return self.undefined(obj=obj, name=attribute)
def unsafe_undefined(self, obj: t.Any, attribute: str) -> Undefined:
"""Return an undefined object for unsafe attributes."""
return self.undefined(
f"access to attribute {attribute!r} of"
f" {type(obj).__name__!r} object is unsafe.",
name=attribute,
obj=obj,
exc=SecurityError,
)
def format_string(
self,
s: str,
args: t.Tuple[t.Any, ...],
kwargs: t.Dict[str, t.Any],
format_func: t.Optional[t.Callable] = None,
) -> str:
"""If a format call is detected, then this is routed through this
method so that our safety sandbox can be used for it.
"""
formatter: SandboxedFormatter
if isinstance(s, Markup):
formatter = SandboxedEscapeFormatter(self, escape=s.escape)
else:
formatter = SandboxedFormatter(self)
if format_func is not None and format_func.__name__ == "format_map":
if len(args) != 1 or kwargs:
raise TypeError(
"format_map() takes exactly one argument"
f" {len(args) + (kwargs is not None)} given"
)
kwargs = args[0]
args = ()
rv = formatter.vformat(s, args, kwargs)
return type(s)(rv)
def call(
__self, # noqa: B902
__context: Context,
__obj: t.Any,
*args: t.Any,
**kwargs: t.Any,
) -> t.Any:
"""Call an object from sandboxed code."""
fmt = inspect_format_method(__obj)
if fmt is not None:
return __self.format_string(fmt, args, kwargs, __obj)
# the double prefixes are to avoid double keyword argument
# errors when proxying the call.
if not __self.is_safe_callable(__obj):
raise SecurityError(f"{__obj!r} is not safely callable")
return __context.call(__obj, *args, **kwargs)
class ImmutableSandboxedEnvironment(SandboxedEnvironment):
"""Works exactly like the regular `SandboxedEnvironment` but does not
permit modifications on the builtin mutable objects `list`, `set`, and
`dict` by using the :func:`modifies_known_mutable` function.
"""
def is_safe_attribute(self, obj: t.Any, attr: str, value: t.Any) -> bool:
if not super().is_safe_attribute(obj, attr, value):
return False
return not modifies_known_mutable(obj, attr)
class SandboxedFormatter(Formatter):
def __init__(self, env: Environment, **kwargs: t.Any) -> None:
self._env = env
super().__init__(**kwargs)
def get_field(
self, field_name: str, args: t.Sequence[t.Any], kwargs: t.Mapping[str, t.Any]
) -> t.Tuple[t.Any, str]:
first, rest = formatter_field_name_split(field_name)
obj = self.get_value(first, args, kwargs)
for is_attr, i in rest:
if is_attr:
obj = self._env.getattr(obj, i)
else:
obj = self._env.getitem(obj, i)
return obj, first
class SandboxedEscapeFormatter(SandboxedFormatter, EscapeFormatter):
pass | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/vendored/robocorp_ls_core/libs/robotidy_lib/jinja2/sandbox.py | 0.77081 | 0.174586 | sandbox.py | pypi |
import typing as t
from .nodes import Node
if t.TYPE_CHECKING:
import typing_extensions as te
class VisitCallable(te.Protocol):
def __call__(self, node: Node, *args: t.Any, **kwargs: t.Any) -> t.Any:
...
class NodeVisitor:
"""Walks the abstract syntax tree and call visitor functions for every
node found. The visitor functions may return values which will be
forwarded by the `visit` method.
Per default the visitor functions for the nodes are ``'visit_'`` +
class name of the node. So a `TryFinally` node visit function would
be `visit_TryFinally`. This behavior can be changed by overriding
the `get_visitor` function. If no visitor function exists for a node
(return value `None`) the `generic_visit` visitor is used instead.
"""
def get_visitor(self, node: Node) -> "t.Optional[VisitCallable]":
"""Return the visitor function for this node or `None` if no visitor
exists for this node. In that case the generic visit function is
used instead.
"""
return getattr(self, f"visit_{type(node).__name__}", None)
def visit(self, node: Node, *args: t.Any, **kwargs: t.Any) -> t.Any:
"""Visit a node."""
f = self.get_visitor(node)
if f is not None:
return f(node, *args, **kwargs)
return self.generic_visit(node, *args, **kwargs)
def generic_visit(self, node: Node, *args: t.Any, **kwargs: t.Any) -> t.Any:
"""Called if no explicit visitor function exists for a node."""
for child_node in node.iter_child_nodes():
self.visit(child_node, *args, **kwargs)
class NodeTransformer(NodeVisitor):
"""Walks the abstract syntax tree and allows modifications of nodes.
The `NodeTransformer` will walk the AST and use the return value of the
visitor functions to replace or remove the old node. If the return
value of the visitor function is `None` the node will be removed
from the previous location otherwise it's replaced with the return
value. The return value may be the original node in which case no
replacement takes place.
"""
def generic_visit(self, node: Node, *args: t.Any, **kwargs: t.Any) -> Node:
for field, old_value in node.iter_fields():
if isinstance(old_value, list):
new_values = []
for value in old_value:
if isinstance(value, Node):
value = self.visit(value, *args, **kwargs)
if value is None:
continue
elif not isinstance(value, Node):
new_values.extend(value)
continue
new_values.append(value)
old_value[:] = new_values
elif isinstance(old_value, Node):
new_node = self.visit(old_value, *args, **kwargs)
if new_node is None:
delattr(node, field)
else:
setattr(node, field, new_node)
return node
def visit_list(self, node: Node, *args: t.Any, **kwargs: t.Any) -> t.List[Node]:
"""As transformers may return lists in some places this method
can be used to enforce a list as return value.
"""
rv = self.visit(node, *args, **kwargs)
if not isinstance(rv, list):
return [rv]
return rv | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/vendored/robocorp_ls_core/libs/robotidy_lib/jinja2/visitor.py | 0.80147 | 0.575349 | visitor.py | pypi |
import functools
import re
from robot.api.parsing import Comment, ModelVisitor, Token
def skip_if_disabled(func):
"""
Do not transform node if it's not within passed ``start_line`` and ``end_line`` or
it does match any ``# robotidy: off`` disabler
"""
@functools.wraps(func)
def wrapper(self, node, *args, **kwargs):
if self.disablers.is_node_disabled(node):
return node
return func(self, node, *args, **kwargs)
return wrapper
def get_section_name_from_header_type(node):
header_type = node.header.type if node.header else "COMMENT HEADER"
return {
"SETTING HEADER": "settings",
"VARIABLE HEADER": "variables",
"TESTCASE HEADER": "testcases",
"TASK HEADER": "tasks",
"KEYWORD HEADER": "keywords",
"COMMENT HEADER": "comments",
}.get(header_type, "invalid")
def skip_section_if_disabled(func):
"""
Does the same checks as ``skip_if_disabled`` and additionally checks
if the section header does not contain disabler
"""
@functools.wraps(func)
def wrapper(self, node, *args, **kwargs):
if self.disablers.is_node_disabled(node):
return node
if self.disablers.is_header_disabled(node.lineno):
return node
if self.skip:
section_name = get_section_name_from_header_type(node)
if self.skip.section(section_name):
return node
return func(self, node, *args, **kwargs)
return wrapper
def is_line_start(node):
for token in node.tokens:
if token.type == Token.SEPARATOR:
continue
return token.col_offset == 0
return False
class DisabledLines:
def __init__(self, start_line, end_line, file_end):
self.start_line = start_line
self.end_line = end_line
self.file_end = file_end
self.lines = []
self.disabled_headers = set()
@property
def file_disabled(self):
"""Check if file is disabled. Whole file is only disabled if the first line contains one line disabler."""
if not self.lines:
return False
return self.lines[0] == (1, 1)
def add_disabler(self, start_line, end_line):
self.lines.append((start_line, end_line))
def add_disabled_header(self, lineno):
self.disabled_headers.add(lineno)
def parse_global_disablers(self):
if not self.start_line:
return
end_line = self.end_line if self.end_line else self.start_line
if self.start_line > 1:
self.add_disabler(1, self.start_line - 1)
if end_line < self.file_end:
self.add_disabler(end_line + 1, self.file_end)
def sort_disablers(self):
self.lines = sorted(self.lines, key=lambda x: x[0])
def is_header_disabled(self, line):
return line in self.disabled_headers
def is_node_disabled(self, node, full_match=True):
if full_match:
for start_line, end_line in self.lines:
# lines are sorted on start_line, so we can return on first match
if end_line >= node.end_lineno:
return start_line <= node.lineno
else:
for start_line, end_line in self.lines:
if node.lineno <= end_line and node.end_lineno >= start_line:
return True
return False
class RegisterDisablers(ModelVisitor):
def __init__(self, start_line, end_line):
self.start_line = start_line
self.end_line = end_line
self.disablers = DisabledLines(self.start_line, self.end_line, None)
self.disabler_pattern = re.compile(r"\s*#\s?robotidy:\s?(?P<disabler>on|off)")
self.stack = []
self.file_disabled = False
def any_disabler_open(self):
return any(disabler for disabler in self.stack)
def get_disabler(self, comment):
if not comment.value:
return None
return self.disabler_pattern.match(comment.value)
def close_disabler(self, end_line):
disabler = self.stack.pop()
if disabler:
self.disablers.add_disabler(disabler, end_line)
def visit_File(self, node): # noqa
self.disablers = DisabledLines(self.start_line, self.end_line, node.end_lineno)
self.disablers.parse_global_disablers()
self.stack = []
self.generic_visit(node)
self.disablers.sort_disablers()
self.file_disabled = self.disablers.file_disabled
def visit_SectionHeader(self, node): # noqa
for comment in node.get_tokens(Token.COMMENT):
disabler = self.get_disabler(comment)
if disabler and disabler.group("disabler") == "off":
self.disablers.add_disabled_header(node.lineno)
break
return self.generic_visit(node)
def visit_TestCase(self, node): # noqa
self.stack.append(0)
self.generic_visit(node)
self.close_disabler(node.end_lineno)
def visit_Try(self, node): # noqa
self.generic_visit(node.header)
self.stack.append(0)
for statement in node.body:
self.visit(statement)
self.close_disabler(node.end_lineno)
tail = node
while tail.next:
self.generic_visit(tail.header)
self.stack.append(0)
for statement in tail.body:
self.visit(statement)
end_line = tail.next.lineno - 1 if tail.next else tail.end_lineno
self.close_disabler(end_line=end_line)
tail = tail.next
visit_Keyword = visit_Section = visit_For = visit_ForLoop = visit_If = visit_While = visit_TestCase
def visit_Statement(self, node): # noqa
if isinstance(node, Comment):
comment = node.get_token(Token.COMMENT)
disabler = self.get_disabler(comment)
if not disabler:
return
index = 0 if is_line_start(node) else -1
if disabler.group("disabler") == "on":
if not self.stack[index]: # no disabler open
return
self.disablers.add_disabler(self.stack[index], node.lineno)
self.stack[index] = 0
elif not self.stack[index]:
self.stack[index] = node.lineno
else:
# inline disabler
if self.any_disabler_open():
return
for comment in node.get_tokens(Token.COMMENT):
disabler = self.get_disabler(comment)
if disabler and disabler.group("disabler") == "off":
self.disablers.add_disabler(node.lineno, node.end_lineno) | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/vendored/robocorp_ls_core/libs/robotidy_lib/robotidy/disablers.py | 0.669313 | 0.201047 | disablers.py | pypi |
import copy
import dataclasses
import os
import re
import sys
from collections import namedtuple
from dataclasses import dataclass, field
from pathlib import Path
from typing import Dict, List, Optional, Pattern, Set, Tuple
try:
from robot.api import Languages # RF 6.0
except ImportError:
Languages = None
import click
from click.core import ParameterSource
from robotidy import exceptions, files, skip, utils
from robotidy.transformers import TransformConfig, TransformConfigMap, convert_transform_config, load_transformers
class FormattingConfig:
def __init__(
self,
space_count: int,
indent: Optional[int],
continuation_indent: Optional[int],
line_sep: str,
start_line: Optional[int],
end_line: Optional[int],
separator: str,
line_length: int,
):
self.start_line = start_line
self.end_line = end_line
self.space_count = space_count
self.line_length = line_length
if indent is None:
indent = space_count
if continuation_indent is None:
continuation_indent = space_count
if separator == "space":
self.separator = " " * space_count
self.indent = " " * indent
self.continuation_indent = " " * continuation_indent
elif separator == "tab":
self.separator = "\t"
self.indent = "\t"
self.continuation_indent = "\t"
self.line_sep = self.get_line_sep(line_sep)
@staticmethod
def get_line_sep(line_sep):
if line_sep == "windows":
return "\r\n"
elif line_sep == "unix":
return "\n"
elif line_sep == "auto":
return "auto"
else:
return os.linesep
def validate_target_version(value: Optional[str]) -> Optional[int]:
if value is None:
return utils.ROBOT_VERSION.major
target_version = utils.TargetVersion[value.upper()].value
if target_version > utils.ROBOT_VERSION.major:
raise click.BadParameter(
f"Target Robot Framework version ({target_version}) should not be higher than "
f"installed version ({utils.ROBOT_VERSION})."
)
return target_version
def csv_list_type(value: Optional[str]) -> List[str]:
if not value:
return []
return value.split(",")
def convert_transformers_config(
param_name: str,
config: Dict,
force_included: bool = False,
custom_transformer: bool = False,
is_config: bool = False,
) -> List[TransformConfig]:
return [
TransformConfig(tr, force_include=force_included, custom_transformer=custom_transformer, is_config=is_config)
for tr in config.get(param_name, ())
]
def str_to_bool(v):
if isinstance(v, bool):
return v
return v.lower() in ("yes", "true", "1")
def map_class_fields_with_their_types(cls):
"""Returns map of dataclass attributes with their types."""
fields = dataclasses.fields(cls)
return {field.name: field.type for field in fields}
SourceAndConfig = namedtuple("SourceAndConfig", "source config")
@dataclass
class RawConfig:
"""Configuration read directly from cli or configuration file."""
transform: List[TransformConfig] = field(default_factory=list)
custom_transformers: List[TransformConfig] = field(default_factory=list)
configure: List[TransformConfig] = field(default_factory=list)
src: Tuple[str, ...] = None
exclude: Pattern = re.compile(files.DEFAULT_EXCLUDES)
extend_exclude: Pattern = None
skip_gitignore: bool = False
overwrite: bool = False
diff: bool = False
color: bool = True
check: bool = False
spacecount: int = 4
indent: int = None
continuation_indent: int = None
lineseparator: str = "native"
verbose: bool = False
config: str = None
config_path: Path = None
separator: str = "space"
startline: int = None
endline: int = None
line_length: int = 120
list_transformers: str = ""
desc: str = None
output: Path = None
force_order: bool = False
target_version: int = utils.ROBOT_VERSION.major
language: List[str] = field(default_factory=list)
reruns: int = 0
ignore_git_dir: bool = False
skip_comments: bool = False
skip_documentation: bool = False
skip_return_values: bool = False
skip_keyword_call: List[str] = None
skip_keyword_call_pattern: List[str] = None
skip_settings: bool = False
skip_arguments: bool = False
skip_setup: bool = False
skip_teardown: bool = False
skip_timeout: bool = False
skip_template: bool = False
skip_return: bool = False
skip_tags: bool = False
skip_block_comments: bool = False
skip_sections: str = ""
defined_in_cli: Set = field(default_factory=set)
defined_in_config: Set = field(default_factory=set)
@classmethod
def from_cli(cls, ctx: click.Context, **kwargs):
"""Creates RawConfig instance while saving which options were supplied from CLI."""
defined_in_cli = set()
for option in kwargs:
if ctx.get_parameter_source(option) == ParameterSource.COMMANDLINE:
defined_in_cli.add(option)
return cls(**kwargs, defined_in_cli=defined_in_cli)
def from_config_file(self, config: Dict, config_path: Path) -> "RawConfig":
"""Creates new RawConfig instance from dictionary.
Dictionary key:values needs to be normalized and parsed to correct types.
"""
options_map = map_class_fields_with_their_types(self)
parsed_config = {"defined_in_config": {"defined_in_config", "config_path"}, "config_path": config_path}
for key, value in config.items():
if key not in options_map:
raise exceptions.NoSuchOptionError(key, list(options_map.keys())) from None
value_type = options_map[key]
if value_type == bool:
parsed_config[key] = str_to_bool(value)
elif key == "target_version":
parsed_config[key] = validate_target_version(value)
elif key == "language":
parsed_config[key] = csv_list_type(value)
elif value_type == int:
parsed_config[key] = int(value)
elif value_type == List[TransformConfig]:
parsed_config[key] = [convert_transform_config(val, key) for val in value]
elif key == "src":
parsed_config[key] = tuple(value)
elif value_type == Pattern:
parsed_config[key] = utils.validate_regex(value)
else:
parsed_config[key] = value
parsed_config["defined_in_config"].add(key)
from_config = RawConfig(**parsed_config)
return self.merge_with_config_file(from_config)
def merge_with_config_file(self, config: "RawConfig") -> "RawConfig":
"""Merge cli config with the configuration file config.
Use configuration file parameter value only if it was not defined in the cli already.
"""
merged = copy.deepcopy(self)
if not config:
return merged
overwrite_params = config.defined_in_config - self.defined_in_cli
for param in overwrite_params:
merged.__dict__[param] = config.__dict__[param]
return merged
class MainConfig:
"""Main configuration file which contains default configuration and map of sources and their configurations."""
def __init__(self, cli_config: RawConfig):
self.loaded_configs = {}
self.default = self.load_config_from_option(cli_config)
self.default_loaded = Config.from_raw_config(self.default)
self.sources = self.get_sources(self.default.src)
def validate_src_is_required(self):
if self.sources or self.default.list_transformers or self.default.desc:
return
print("No source path provided. Run robotidy --help to see how to use robotidy")
sys.exit(1)
@staticmethod
def load_config_from_option(cli_config: RawConfig) -> RawConfig:
"""If there is config path passed from cli, load it and overwrite default config."""
if cli_config.config:
config_path = Path(cli_config.config)
config_file = files.read_pyproject_config(config_path)
cli_config = cli_config.from_config_file(config_file, config_path)
return cli_config
def get_sources(self, sources: Tuple[str, ...]) -> Optional[Tuple[str, ...]]:
"""Get list of sources to be transformed by Robotidy.
If the sources tuple is empty, look for most common configuration file and load sources from there.
"""
if sources:
return sources
src = Path(".").resolve()
config_path = files.find_source_config_file(src, self.default.ignore_git_dir)
if not config_path:
return None
config = files.read_pyproject_config(config_path)
if not config or "src" not in config:
return None
raw_config = self.default.from_config_file(config, config_path)
loaded_config = Config.from_raw_config(raw_config)
self.loaded_configs[str(loaded_config.config_directory)] = loaded_config
return tuple(config["src"])
def get_sources_with_configs(self):
sources = files.get_paths(
self.sources, self.default.exclude, self.default.extend_exclude, self.default.skip_gitignore
)
for source in sources:
if self.default.config:
loaded_config = self.default_loaded
else:
src = Path(".").resolve() if source == "-" else source
loaded_config = self.get_config_for_source(src)
yield SourceAndConfig(source, loaded_config)
def get_config_for_source(self, source: Path):
config_path = files.find_source_config_file(source, self.default.ignore_git_dir)
if config_path is None:
return self.default_loaded
if str(config_path.parent) in self.loaded_configs:
return self.loaded_configs[str(config_path.parent)]
config_file = files.read_pyproject_config(config_path)
raw_config = self.default.from_config_file(config_file, config_path)
loaded_config = Config.from_raw_config(raw_config)
self.loaded_configs[str(loaded_config.config_directory)] = loaded_config
return loaded_config
class Config:
"""Configuration after loading dynamic attributes like transformer list."""
def __init__(
self,
formatting: FormattingConfig,
skip,
transformers_config: TransformConfigMap,
overwrite: bool,
show_diff: bool,
verbose: bool,
check: bool,
output: Optional[Path],
force_order: bool,
target_version: int,
color: bool,
language: Optional[List[str]],
reruns: int,
config_path: Optional[Path],
):
self.formatting = formatting
self.overwrite = self.set_overwrite_mode(overwrite, check)
self.show_diff = show_diff
self.verbose = verbose
self.check = check
self.output = output
self.color = self.set_color_mode(color)
self.reruns = reruns
self.config_directory = config_path.parent if config_path else None
self.language = self.get_languages(language)
self.transformers = []
self.transformers_lookup = dict()
self.transformers_config = transformers_config
self.load_transformers(transformers_config, force_order, target_version, skip)
@staticmethod
def get_languages(lang):
if Languages is None:
return None
return Languages(lang)
@staticmethod
def set_overwrite_mode(overwrite: bool, check: bool) -> bool:
if overwrite is None:
return not check
return overwrite
@staticmethod
def set_color_mode(color: bool) -> bool:
if not color:
return color
return "NO_COLOR" not in os.environ
@classmethod
def from_raw_config(cls, raw_config: "RawConfig"):
skip_config = skip.SkipConfig(
documentation=raw_config.skip_documentation,
return_values=raw_config.skip_return_values,
keyword_call=raw_config.skip_keyword_call,
keyword_call_pattern=raw_config.skip_keyword_call_pattern,
settings=raw_config.skip_settings,
arguments=raw_config.skip_arguments,
setup=raw_config.skip_setup,
teardown=raw_config.skip_teardown,
template=raw_config.skip_template,
timeout=raw_config.skip_timeout,
return_statement=raw_config.skip_return,
tags=raw_config.skip_tags,
comments=raw_config.skip_comments,
block_comments=raw_config.skip_block_comments,
sections=raw_config.skip_sections,
)
formatting = FormattingConfig(
space_count=raw_config.spacecount,
indent=raw_config.indent,
continuation_indent=raw_config.continuation_indent,
line_sep=raw_config.lineseparator,
start_line=raw_config.startline,
separator=raw_config.separator,
end_line=raw_config.endline,
line_length=raw_config.line_length,
)
transformers_config = TransformConfigMap(
raw_config.transform, raw_config.custom_transformers, raw_config.configure
)
if raw_config.verbose and raw_config.config_path:
click.echo(f"Loaded configuration from {raw_config.config_path}")
return cls(
formatting=formatting,
skip=skip_config,
transformers_config=transformers_config,
overwrite=raw_config.overwrite,
show_diff=raw_config.diff,
verbose=raw_config.verbose,
check=raw_config.check,
output=raw_config.output,
force_order=raw_config.force_order,
target_version=raw_config.target_version,
color=raw_config.color,
language=raw_config.language,
reruns=raw_config.reruns,
config_path=raw_config.config_path,
)
def load_transformers(self, transformers_config: TransformConfigMap, force_order, target_version, skip):
# Workaround to pass configuration to transformer before the instance is created
if "GenerateDocumentation" in transformers_config.transformers:
transformers_config.transformers["GenerateDocumentation"].args["template_directory"] = self.config_directory
transformers = load_transformers(
transformers_config,
force_order=force_order,
target_version=target_version,
skip=skip,
)
for transformer in transformers:
# inject global settings TODO: handle it better
setattr(transformer.instance, "formatting_config", self.formatting)
setattr(transformer.instance, "transformers", self.transformers_lookup)
setattr(transformer.instance, "languages", self.language)
self.transformers.append(transformer.instance)
self.transformers_lookup[transformer.name] = transformer.instance | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/vendored/robocorp_ls_core/libs/robotidy_lib/robotidy/config.py | 0.727782 | 0.217171 | config.py | pypi |
from functools import lru_cache
from pathlib import Path
from typing import Any, Dict, Iterable, Iterator, List, Optional, Pattern, Tuple
try:
import rich_click as click
except ImportError:
import click
import tomli
from pathspec import PathSpec
DEFAULT_EXCLUDES = r"/(\.direnv|\.eggs|\.git|\.hg|\.nox|\.tox|\.venv|venv|\.svn)/"
INCLUDE_EXT = (".robot", ".resource")
DOTFILE_CONFIG = ".robotidy"
CONFIG_NAMES = ("robotidy.toml", "pyproject.toml", DOTFILE_CONFIG)
@lru_cache()
def find_source_config_file(src: Path, ignore_git_dir: bool = False) -> Optional[Path]:
"""Find and return configuration file for the source path.
This method looks iteratively in source parents for directory that contains configuration file and
returns its path. The lru_cache speeds up searching if there are multiple files in the same directory (they will
have the same configuration file).
If ``.git`` directory is found and ``ignore_git_dir`` is set to ``False``, or top directory is reached, this method
returns ``None``.
"""
if src.is_dir():
if not ignore_git_dir and src.name == ".git":
return None
for config_filename in CONFIG_NAMES:
if (src / config_filename).is_file():
return src / config_filename
if not src.parents:
return None
return find_source_config_file(src.parent, ignore_git_dir)
@lru_cache()
def find_project_root(srcs: Iterable[str], ignore_git_dir: bool = False) -> Path:
"""Return a directory containing .git, or robotidy.toml.
That directory will be a common parent of all files and directories
passed in `srcs`.
If no directory in the tree contains a marker that would specify it's the
project root, the root of the file system is returned.
"""
if not srcs:
return Path("/").resolve()
path_srcs = [Path(Path.cwd(), src).resolve() for src in srcs]
# A list of lists of parents for each 'src'. 'src' is included as a
# "parent" of itself if it is a directory
src_parents = [list(path.parents) + ([path] if path.is_dir() else []) for path in path_srcs]
common_base = max(
set.intersection(*(set(parents) for parents in src_parents)),
key=lambda path: path.parts,
)
for directory in (common_base, *common_base.parents):
if not ignore_git_dir and (directory / ".git").exists():
return directory
if any((directory / config_name).is_file() for config_name in CONFIG_NAMES):
return directory
return directory
def load_toml_file(config_path: Path) -> Dict[str, Any]:
try:
with config_path.open("rb") as tf:
config = tomli.load(tf)
return config
except (tomli.TOMLDecodeError, OSError) as e:
raise click.FileError(filename=str(config_path), hint=f"Error reading configuration file: {e}")
def read_pyproject_config(config_path: Path) -> Dict[str, Any]:
config = load_toml_file(config_path)
if config_path.name != DOTFILE_CONFIG or "tool" in config:
config = config.get("tool", {}).get("robotidy", {})
return {k.replace("--", "").replace("-", "_"): v for k, v in config.items()}
@lru_cache()
def get_gitignore(root: Path) -> PathSpec:
"""Return a PathSpec matching gitignore content if present."""
gitignore = root / ".gitignore"
lines: List[str] = []
if gitignore.is_file():
with gitignore.open(encoding="utf-8") as gf:
lines = gf.readlines()
return PathSpec.from_lines("gitwildmatch", lines)
def should_parse_path(
path: Path, exclude: Optional[Pattern[str]], extend_exclude: Optional[Pattern[str]], gitignore: Optional[PathSpec]
) -> bool:
normalized_path = str(path)
for pattern in (exclude, extend_exclude):
match = pattern.search(normalized_path) if pattern else None
if bool(match and match.group(0)):
return False
if gitignore is not None and gitignore.match_file(path):
return False
if path.is_file():
return path.suffix in INCLUDE_EXT
if exclude and exclude.match(path.name):
return False
return True
def get_paths(
src: Tuple[str, ...], exclude: Optional[Pattern], extend_exclude: Optional[Pattern], skip_gitignore: bool
):
root = find_project_root(src)
if skip_gitignore:
gitignore = None
else:
gitignore = get_gitignore(root)
sources = set()
for s in src:
if s == "-":
sources.add("-")
continue
path = Path(s).resolve()
if not should_parse_path(path, exclude, extend_exclude, gitignore):
continue
if path.is_file():
sources.add(path)
elif path.is_dir():
sources.update(iterate_dir((path,), exclude, extend_exclude, gitignore))
elif s == "-":
sources.add(path)
return sources
def iterate_dir(
paths: Iterable[Path],
exclude: Optional[Pattern],
extend_exclude: Optional[Pattern],
gitignore: Optional[PathSpec],
) -> Iterator[Path]:
for path in paths:
if not should_parse_path(path, exclude, extend_exclude, gitignore):
continue
if path.is_dir():
yield from iterate_dir(
path.iterdir(),
exclude,
extend_exclude,
gitignore + get_gitignore(path) if gitignore is not None else None,
)
elif path.is_file():
yield path | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/vendored/robocorp_ls_core/libs/robotidy_lib/robotidy/files.py | 0.785555 | 0.286593 | files.py | pypi |
import re
from typing import List, Optional, Pattern
import click
from robot.api import Token
from robotidy.utils import normalize_name
def parse_csv(value):
if not value:
return []
return [val for val in value.split(",")]
def str_to_bool(value):
return value.lower() == "true"
def validate_regex(value: str) -> Optional[Pattern]:
try:
return re.compile(value)
except re.error:
raise ValueError(f"'{value}' is not a valid regular expression.") from None
class SkipConfig:
"""Skip configuration (global and for each transformer)."""
# Following names will be taken from transformer config and provided to Skip class instead
HANDLES = frozenset(
{
"skip_documentation",
"skip_return_values",
"skip_keyword_call",
"skip_keyword_call_pattern",
"skip_settings",
"skip_arguments",
"skip_setup",
"skip_teardown",
"skip_timeout",
"skip_template",
"skip_return_statement",
"skip_tags",
"skip_comments",
"skip_block_comments",
"skip_sections",
}
)
def __init__(
self,
documentation: bool = False,
return_values: bool = False,
keyword_call: Optional[List] = None,
keyword_call_pattern: Optional[List] = None,
settings: bool = False,
arguments: bool = False,
setup: bool = False,
teardown: bool = False,
timeout: bool = False,
template: bool = False,
return_statement: bool = False,
tags: bool = False,
comments: bool = False,
block_comments: bool = False,
sections: str = "",
):
self.documentation = documentation
self.return_values = return_values
self.keyword_call: List = keyword_call if keyword_call else []
self.keyword_call_pattern: List = keyword_call_pattern if keyword_call_pattern else []
self.settings = settings
self.arguments = arguments
self.setup = setup
self.teardown = teardown
self.timeout = timeout
self.template = template
self.return_statement = return_statement
self.tags = tags
self.comments = comments
self.block_comments = block_comments
self.sections = parse_csv(sections)
def update_with_str_config(self, **kwargs):
for name, value in kwargs.items():
# find the value we're overriding and get its type from it
original_value = self.__dict__[name]
if isinstance(original_value, bool):
self.__dict__[name] = str_to_bool(value)
elif isinstance(original_value, list):
parsed_list = parse_csv(value)
self.__dict__[name].extend(parsed_list)
def __eq__(self, other):
return self.__dict__ == other.__dict__
class Skip:
"""Defines global skip conditions for each transformer."""
def __init__(self, skip_config: SkipConfig):
self.return_values = skip_config.return_values
self.documentation = skip_config.documentation
self.comments = skip_config.comments
self.block_comments = skip_config.block_comments
self.keyword_call_names = {normalize_name(name) for name in skip_config.keyword_call}
self.keyword_call_pattern = {validate_regex(pattern) for pattern in skip_config.keyword_call_pattern}
self.any_keword_call = self.check_any_keyword_call()
self.skip_settings = self.parse_skip_settings(skip_config)
self.skip_sections = set(skip_config.sections)
@staticmethod
def parse_skip_settings(skip_config):
settings = {"settings", "arguments", "setup", "teardown", "timeout", "template", "return_statement", "tags"}
skip_settings = set()
for setting in settings:
if getattr(skip_config, setting):
skip_settings.add(setting)
return skip_settings
def check_any_keyword_call(self):
return self.keyword_call_names or self.keyword_call_pattern
def keyword_call(self, node):
if not getattr(node, "keyword", None) or not self.any_keword_call:
return False
normalized = normalize_name(node.keyword)
if normalized in self.keyword_call_names:
return True
for pattern in self.keyword_call_pattern:
if pattern.search(node.keyword):
return True
return False
def setting(self, name):
if not self.skip_settings:
return False
if "settings" in self.skip_settings:
return True
return name.lower() in self.skip_settings
def comment(self, comment):
if self.comments:
return True
if not self.block_comments:
return False
return comment.tokens and comment.tokens[0].type == Token.COMMENT
def section(self, name):
return name in self.skip_sections
documentation_option = click.option("--skip-documentation", is_flag=True, help="Skip formatting of documentation")
return_values_option = click.option("--skip-return-values", is_flag=True, help="Skip formatting of return values")
keyword_call_option = click.option(
"--skip-keyword-call", type=str, multiple=True, help="Keyword call name that should not be formatted"
)
keyword_call_pattern_option = click.option(
"--skip-keyword-call-pattern",
type=str,
multiple=True,
help="Keyword call name pattern that should not be formatted",
)
settings_option = click.option("--skip-settings", is_flag=True, help="Skip formatting of settings")
arguments_option = click.option("--skip-arguments", is_flag=True, help="Skip formatting of arguments")
setup_option = click.option("--skip-setup", is_flag=True, help="Skip formatting of setup")
teardown_option = click.option("--skip-teardown", is_flag=True, help="Skip formatting of teardown")
timeout_option = click.option("--skip-timeout", is_flag=True, help="Skip formatting of timeout")
template_option = click.option("--skip-template", is_flag=True, help="Skip formatting of template")
return_option = click.option("--skip-return", is_flag=True, help="Skip formatting of return statement")
tags_option = click.option("--skip-tags", is_flag=True, help="Skip formatting of tags")
sections_option = click.option(
"--skip-sections",
type=str,
help="Skip formatting of sections. Provide multiple sections with comma separated value",
)
comments_option = click.option("--skip-comments", is_flag=True, help="Skip formatting of comments")
block_comments_option = click.option("--skip-block-comments", is_flag=True, help="Skip formatting of block comments")
option_group = {
"name": "Skip formatting",
"options": [
"--skip-documentation",
"--skip-return-values",
"--skip-keyword-call",
"--skip-keyword-call-pattern",
"--skip-settings",
"--skip-arguments",
"--skip-setup",
"--skip-teardown",
"--skip-timeout",
"--skip-template",
"--skip-return",
"--skip-tags",
"--skip-comments",
"--skip-block-comments",
"--skip-sections",
],
} | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/vendored/robocorp_ls_core/libs/robotidy_lib/robotidy/skip.py | 0.777004 | 0.253838 | skip.py | pypi |
import sys
from pathlib import Path
from typing import List, Optional, Pattern, Tuple, Union
try:
import rich_click as click
RICH_PRESENT = True
except ImportError: # Fails on vendored-in LSP plugin
import click
RICH_PRESENT = False
from robotidy import app
from robotidy import config as config_module
from robotidy import decorators, files, skip, utils, version
from robotidy.config import RawConfig, csv_list_type, validate_target_version
from robotidy.rich_console import console
from robotidy.transformers import TransformConfigMap, TransformConfigParameter, load_transformers
CLI_OPTIONS_LIST = [
{
"name": "Run only selected transformers",
"options": ["--transform"],
},
{
"name": "Load custom transformers",
"options": ["--load-transformers"],
},
{
"name": "Work modes",
"options": ["--overwrite", "--diff", "--check", "--force-order"],
},
{
"name": "Documentation",
"options": ["--list", "--desc"],
},
{
"name": "Configuration",
"options": ["--configure", "--config", "--ignore-git-dir"],
},
{
"name": "Global formatting settings",
"options": [
"--spacecount",
"--indent",
"--continuation-indent",
"--line-length",
"--lineseparator",
"--separator",
"--startline",
"--endline",
],
},
{"name": "File exclusion", "options": ["--exclude", "--extend-exclude", "--skip-gitignore"]},
skip.option_group,
{
"name": "Other",
"options": [
"--target-version",
"--language",
"--reruns",
"--verbose",
"--color",
"--output",
"--version",
"--help",
],
},
]
if RICH_PRESENT:
click.rich_click.USE_RICH_MARKUP = True
click.rich_click.USE_MARKDOWN = True
click.rich_click.FORCE_TERMINAL = None # workaround rich_click trying to force color in GitHub Actions
click.rich_click.STYLE_OPTION = "bold sky_blue3"
click.rich_click.STYLE_SWITCH = "bold sky_blue3"
click.rich_click.STYLE_METAVAR = "bold white"
click.rich_click.STYLE_OPTION_DEFAULT = "grey37"
click.rich_click.STYLE_OPTIONS_PANEL_BORDER = "grey66"
click.rich_click.STYLE_USAGE = "magenta"
click.rich_click.OPTION_GROUPS = {
"robotidy": CLI_OPTIONS_LIST,
"python -m robotidy": CLI_OPTIONS_LIST,
}
CONTEXT_SETTINGS = dict(help_option_names=["-h", "--help"])
def validate_regex_callback(ctx: click.Context, param: click.Parameter, value: Optional[str]) -> Optional[Pattern]:
return utils.validate_regex(value)
def validate_target_version_callback(
ctx: click.Context, param: Union[click.Option, click.Parameter], value: Optional[str]
) -> Optional[int]:
return validate_target_version(value)
def validate_list_optional_value(ctx: click.Context, param: Union[click.Option, click.Parameter], value: Optional[str]):
if not value:
return value
allowed = ["all", "enabled", "disabled"]
if value not in allowed:
raise click.BadParameter(f"Not allowed value. Allowed values are: {', '.join(allowed)}")
return value
def csv_list_type_callback(
ctx: click.Context, param: Union[click.Option, click.Parameter], value: Optional[str]
) -> List[str]:
return csv_list_type(value)
def print_transformer_docs(transformer):
from rich.markdown import Markdown
md = Markdown(str(transformer), code_theme="native", inline_code_lexer="robotframework")
console.print(md)
@decorators.optional_rich
def print_description(name: str, target_version: int):
# TODO: --desc works only for default transformers, it should also print custom transformer desc
transformers = load_transformers(TransformConfigMap([], [], []), allow_disabled=True, target_version=target_version)
transformer_by_names = {transformer.name: transformer for transformer in transformers}
if name == "all":
for transformer in transformers:
print_transformer_docs(transformer)
elif name in transformer_by_names:
print_transformer_docs(transformer_by_names[name])
else:
rec_finder = utils.RecommendationFinder()
similar = rec_finder.find_similar(name, transformer_by_names.keys())
click.echo(f"Transformer with the name '{name}' does not exist.{similar}")
return 1
return 0
def _load_external_transformers(transformers: List, transformers_config: TransformConfigMap, target_version: int):
external = []
transformers_names = {transformer.name for transformer in transformers}
transformers_from_conf = load_transformers(transformers_config, target_version=target_version)
for transformer in transformers_from_conf:
if transformer.name not in transformers_names:
external.append(transformer)
return external
@decorators.optional_rich
def print_transformers_list(global_config: config_module.MainConfig):
from rich.table import Table
target_version = global_config.default.target_version
list_transformers = global_config.default.list_transformers
table = Table(title="Transformers", header_style="bold red")
table.add_column("Name", justify="left", no_wrap=True)
table.add_column("Enabled")
transformers = load_transformers(TransformConfigMap([], [], []), allow_disabled=True, target_version=target_version)
transformers.extend(
_load_external_transformers(transformers, global_config.default_loaded.transformers_config, target_version)
)
for transformer in transformers:
enabled = transformer.name in global_config.default_loaded.transformers_lookup
if list_transformers != "all":
filter_by = list_transformers == "enabled"
if enabled != filter_by:
continue
decorated_enable = "Yes" if enabled else "No"
if enabled != transformer.enabled_by_default:
decorated_enable = f"[bold magenta]{decorated_enable}*"
table.add_row(transformer.name, decorated_enable)
console.print(table)
console.print(
"Transformers are listed in the order they are run by default. If the transformer was enabled/disabled by the "
"configuration the status will be displayed with extra asterisk (*) and in the [magenta]different[/] color."
)
console.print(
"To see detailed docs run:\n"
" [bold]robotidy --desc [blue]transformer_name[/][/]\n"
"or\n"
" [bold]robotidy --desc [blue]all[/][/]\n\n"
"Non-default transformers needs to be selected explicitly with [bold cyan]--transform[/] or "
"configured with param `enabled=True`.\n"
)
@click.command(context_settings=CONTEXT_SETTINGS)
@click.option(
"--transform",
"-t",
type=TransformConfigParameter(),
multiple=True,
metavar="TRANSFORMER_NAME",
help="Transform files from [PATH(S)] with given transformer",
)
@click.option(
"--load-transformers",
"custom_transformers",
type=TransformConfigParameter(),
multiple=True,
metavar="TRANSFORMER_NAME",
help="Load custom transformer from the path and run them after default ones.",
)
@click.option(
"--configure",
"-c",
type=TransformConfigParameter(),
multiple=True,
metavar="TRANSFORMER_NAME:PARAM=VALUE",
help="Configure transformers",
)
@click.argument(
"src",
nargs=-1,
type=click.Path(exists=True, file_okay=True, dir_okay=True, readable=True, allow_dash=True),
metavar="[PATH(S)]",
)
@click.option(
"--exclude",
type=str,
callback=validate_regex_callback,
help=(
"A regular expression that matches files and directories that should be"
" excluded on recursive searches. An empty value means no paths are excluded."
" Use forward slashes for directories on all platforms."
),
show_default=f"{files.DEFAULT_EXCLUDES}",
)
@click.option(
"--extend-exclude",
type=str,
callback=validate_regex_callback,
help=(
"Like **--exclude**, but adds additional files and directories on top of the"
" excluded ones. (Useful if you simply want to add to the default)"
),
)
@click.option(
"--skip-gitignore",
is_flag=True,
show_default=True,
help="Skip **.gitignore** files and do not ignore files listed inside.",
)
@click.option(
"--ignore-git-dir",
is_flag=True,
help="Ignore **.git** directories when searching for the default configuration file. "
"By default first parent directory with **.git** directory is returned and this flag disables this behaviour.",
show_default=True,
)
@click.option(
"--config",
type=click.Path(
exists=True,
file_okay=True,
dir_okay=False,
readable=True,
allow_dash=False,
path_type=str,
),
help="Read configuration from FILE path.",
)
@click.option(
"--overwrite/--no-overwrite",
default=None,
help="Write changes back to file",
show_default=True,
)
@click.option(
"--diff",
is_flag=True,
help="Output diff of each processed file.",
show_default=True,
)
@click.option(
"--color/--no-color",
default=True,
help="Enable ANSI coloring the output",
show_default=True,
)
@click.option(
"--check",
is_flag=True,
help="Don't overwrite files and just return status. Return code 0 means nothing would change. "
"Return code 1 means that at least 1 file would change. Any internal error will overwrite this status.",
show_default=True,
)
@click.option(
"-s",
"--spacecount",
type=click.types.INT,
default=4,
help="The number of spaces between cells",
show_default=True,
)
@click.option(
"--indent",
type=click.types.INT,
default=None,
help="The number of spaces to be used as indentation",
show_default="same as --spacecount value",
)
@click.option(
"--continuation-indent",
type=click.types.INT,
default=None,
help="The number of spaces to be used as separator after ... (line continuation) token",
show_default="same as --spacecount value]",
)
@click.option(
"-ls",
"--lineseparator",
type=click.types.Choice(["native", "windows", "unix", "auto"]),
default="native",
help="""
Line separator to use in the outputs:
- **native**: use operating system's native line endings
- windows: use Windows line endings (CRLF)
- unix: use Unix line endings (LF)
- auto: maintain existing line endings (uses what's used in the first line)
""",
show_default=True,
)
@click.option(
"--separator",
type=click.types.Choice(["space", "tab"]),
default="space",
help="""
Token separator to use in the outputs:
- **space**: use --spacecount spaces to separate tokens
- tab: use a single tabulation to separate tokens
""",
show_default=True,
)
@click.option(
"-sl",
"--startline",
default=None,
type=int,
help="Limit robotidy only to selected area. If **--endline** is not provided, format text only at **--startline**. "
"Line numbers start from 1.",
)
@click.option(
"-el",
"--endline",
default=None,
type=int,
help="Limit robotidy only to selected area. Line numbers start from 1.",
)
@click.option(
"--line-length",
default=120,
type=int,
help="Max allowed characters per line",
show_default=True,
)
@click.option(
"--list",
"-l",
"list_transformers",
callback=validate_list_optional_value,
is_flag=False,
default="",
flag_value="all",
help="List available transformers and exit. "
"Pass optional value **enabled** or **disabled** to filter out list by transformer status.",
)
@click.option(
"--desc",
"-d",
default=None,
metavar="TRANSFORMER_NAME",
help="Show documentation for selected transformer.",
)
@click.option(
"--output",
"-o",
type=click.Path(file_okay=True, dir_okay=False, writable=True, allow_dash=False),
default=None,
metavar="PATH",
help="Use this option to override file destination path.",
)
@click.option("-v", "--verbose", is_flag=True, help="More verbose output", show_default=True)
@click.option(
"--force-order",
is_flag=True,
help="Transform files using transformers in order provided in cli",
)
@click.option(
"--target-version",
"-tv",
type=click.Choice([v.name.lower() for v in utils.TargetVersion], case_sensitive=False),
callback=validate_target_version_callback,
help="Only enable transformers supported in set target version",
show_default="installed Robot Framework version",
)
@click.option(
"--language",
"--lang",
callback=csv_list_type_callback,
help="Parse Robot Framework files using additional languages.",
show_default="en",
)
@click.option(
"--reruns",
"-r",
type=int,
help="Robotidy will rerun the transformations up to reruns times until the code stop changing.",
show_default="0",
)
@skip.comments_option
@skip.documentation_option
@skip.return_values_option
@skip.keyword_call_option
@skip.keyword_call_pattern_option
@skip.settings_option
@skip.arguments_option
@skip.setup_option
@skip.teardown_option
@skip.timeout_option
@skip.template_option
@skip.return_option
@skip.tags_option
@skip.sections_option
@skip.block_comments_option
@click.version_option(version=version.__version__, prog_name="robotidy")
@click.pass_context
@decorators.catch_exceptions
def cli(ctx: click.Context, **kwargs):
"""
Robotidy is a tool for formatting Robot Framework source code.
Full documentation available at <https://robotidy.readthedocs.io> .
"""
cli_config = RawConfig.from_cli(ctx=ctx, **kwargs)
global_config = config_module.MainConfig(cli_config)
global_config.validate_src_is_required()
if global_config.default.list_transformers:
print_transformers_list(global_config)
sys.exit(0)
if global_config.default.desc is not None:
return_code = print_description(global_config.default.desc, global_config.default.target_version)
sys.exit(return_code)
tidy = app.Robotidy(global_config)
status = tidy.transform_files()
sys.exit(status) | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/vendored/robocorp_ls_core/libs/robotidy_lib/robotidy/cli.py | 0.52756 | 0.216167 | cli.py | pypi |
from typing import Optional, Set
from robot.api import Token
from robot.api.parsing import CommentSection, EmptyLine
try:
from robot.api import Language
from robot.api.parsing import Config
except ImportError: # RF 6.0
Config, Language = None, None
from robotidy.disablers import skip_if_disabled, skip_section_if_disabled
from robotidy.exceptions import InvalidParameterValueError
from robotidy.transformers import Transformer
class Translate(Transformer):
"""
Translate Robot Framework source files from one or many languages to different one.
Following code:
```robotframework
*** Test Cases ***
Test case
[Setup] Keyword
Step
```
will be transformed to (with the German language configured):
```robotframework
*** Testfälle ***
Test case
[Vorbereitung] Keyword
Step
```
You can configure destination language with ``language`` parameter (default ``en``). If your file is not written
in english you also need to configure source language - either using cli option or language header in the
source files:
```
robotidy --configure Translate:enabled=True:language=uk --language pl,de source_in_pl_and_de.robot
```
BDD keywords are not translated by default. Set ``translate_bdd`` parameter to ``True`` to enable it.
If there is more than one alternative to BDD keyword the first one (sorted alphabetically) will be chosen.
It can be overwritten using ``<bdd_keyword>_alternative`` parameters.
"""
ENABLED = False
MIN_VERSION = 6
def __init__(
self,
language: str = "en",
translate_bdd: bool = False,
add_language_header: bool = False,
but_alternative: Optional[str] = None,
given_alternative: Optional[str] = None,
and_alternative: Optional[str] = None,
then_alternative: Optional[str] = None,
when_alternative: Optional[str] = None,
):
super().__init__()
self.in_settings = False
self.translate_bdd = translate_bdd
self.add_language_header = add_language_header
if Language is not None:
self.language = Language.from_name(language)
# reverse mapping, in core it's other_lang: en and we need en: other_lang name
self.settings = {value: key.title() for key, value in self.language.settings.items()}
else:
self.language, self.settings = None, None
self._bdd_mapping = None
self.bdd = self.get_translated_bdd(
but_alternative, given_alternative, and_alternative, then_alternative, when_alternative
)
@property
def bdd_mapping(self):
if self._bdd_mapping is None:
self._bdd_mapping = {}
for language in self.languages:
self._bdd_mapping.update({name.title(): "But" for name in language.but_prefixes})
self._bdd_mapping.update({name.title(): "Given" for name in language.given_prefixes})
self._bdd_mapping.update({name.title(): "And" for name in language.and_prefixes})
self._bdd_mapping.update({name.title(): "Then" for name in language.then_prefixes})
self._bdd_mapping.update({name.title(): "When" for name in language.when_prefixes})
return self._bdd_mapping
def get_bdd_keyword(self, container: Set, alternative: Optional[str], param_name: str) -> str:
if alternative is not None:
names = ",".join(sorted(container))
if alternative not in container:
raise InvalidParameterValueError(
self.__class__.__name__,
param_name,
alternative,
f"Provided BDD keyword alternative does not exist in the destination language. Select one of: {names}",
)
return alternative.title()
return sorted(kw.title() for kw in container)[0]
def get_translated_bdd(
self,
but_alternative: Optional[str],
given_alternative: Optional[str],
and_alternative: Optional[str],
then_alternative: Optional[str],
when_alternative: Optional[str],
):
if not self.translate_bdd:
return {}
return {
"But": self.get_bdd_keyword(self.language.but_prefixes, but_alternative, "but_alternative"),
"Given": self.get_bdd_keyword(self.language.given_prefixes, given_alternative, "given_alternative"),
"And": self.get_bdd_keyword(self.language.and_prefixes, and_alternative, "and_alternative"),
"Then": self.get_bdd_keyword(self.language.then_prefixes, then_alternative, "then_alternative"),
"When": self.get_bdd_keyword(self.language.when_prefixes, when_alternative, "when_alternative"),
}
def add_replace_language_header(self, node):
"""
Adds or replaces language headers in transformed files.
If the file already contains language header it will be replaced.
If the destination language is English, it will be removed.
"""
if not self.add_language_header or not node.sections:
return node
if isinstance(node.sections[0], CommentSection) and node.sections[0].header is None:
if node.sections[0].body and isinstance(node.sections[0].body[0], Config):
if self.language.code == "en":
node.sections[0].body.pop(0)
else:
node.sections[0].body[0] = Config.from_params(f"language: {self.language.code}")
else:
node.sections[0].body.insert(0, Config.from_params(f"language: {self.language.code}"))
elif self.language.code != "en":
language_header = Config.from_params(f"language: {self.language.code}")
empty_line = EmptyLine.from_params()
section = CommentSection(body=[language_header, empty_line])
node.sections.insert(0, section)
return node
def visit_File(self, node): # noqa
self.in_settings = False
self.add_replace_language_header(node)
return self.generic_visit(node)
@skip_if_disabled
def visit_KeywordCall(self, node): # noqa
"""
Translate BDD keyword in Keyword Call. BDD is translated only if keyword call name starts with BDD,
it is recognized as BDD and there is one space of separation before rest of the keyword name.
Example of keyword name with BDD keyword:
Given I Open Main Page
Source keyword call can be written in any language - that's why we need to translate first word of the keyword
to English then to destination language.
"""
if not self.translate_bdd or not node.keyword:
return node
prefix, *name = node.keyword.split(maxsplit=1)
if not name or not prefix.title() in self.languages.bdd_prefixes:
return node
english_bdd = self.bdd_mapping.get(prefix.title(), None)
if not english_bdd:
return node
translated_bdd = self.bdd[english_bdd]
name_token = node.get_token(Token.KEYWORD)
name_token.value = f"{translated_bdd} {name[0]}"
return node
@skip_section_if_disabled
def translate_section_header(self, node, eng_name):
translated_value = getattr(self.language, eng_name)
translated_value = translated_value.title()
name_token = node.header.data_tokens[0]
name_token.value = f"*** {translated_value} ***"
return self.generic_visit(node)
def visit_SettingSection(self, node): # noqa
self.in_settings = True
node = self.translate_section_header(node, "settings_header")
self.in_settings = False
return node
def visit_TestCaseSection(self, node): # noqa
return self.translate_section_header(node, "test_cases_header")
def visit_KeywordSection(self, node): # noqa
return self.translate_section_header(node, "keywords_header")
def visit_VariableSection(self, node): # noqa
return self.translate_section_header(node, "variables_header")
def visit_CommentSection(self, node): # noqa
if node.header is None:
return node
return self.translate_section_header(node, "comments_header")
@skip_if_disabled
def visit_ForceTags(self, node): # noqa
node_type = node.data_tokens[0].value.title()
# special handling because it's renamed in 6.0
if node_type == "Force Tags":
node_type = "Test Tags" # TODO: Handle Task/Test types
english_value = self.languages.settings.get(node_type, None)
if english_value is None:
return node
translated_value = self.settings.get(english_value, None)
if translated_value is None:
return node
node.data_tokens[0].value = translated_value.title()
return node
visit_TestTags = visit_TaskTags = visit_ForceTags
@skip_if_disabled
def visit_Setup(self, node): # noqa
node_type = node.type.title()
translated_value = self.settings.get(node_type, None)
if translated_value is None:
return node
if not self.in_settings:
translated_value = f"[{translated_value}]"
node.data_tokens[0].value = translated_value
return self.generic_visit(node)
visit_Teardown = (
visit_Template
) = (
visit_Timeout
) = (
visit_Arguments
) = (
visit_Tags
) = (
visit_Documentation
) = (
visit_Metadata
) = (
visit_SuiteSetup
) = (
visit_SuiteTeardown
) = (
visit_TestSetup
) = (
visit_TestTeardown
) = (
visit_TestTemplate
) = (
visit_TestTimeout
) = visit_KeywordTags = visit_LibraryImport = visit_VariablesImport = visit_ResourceImport = visit_Setup | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/vendored/robocorp_ls_core/libs/robotidy_lib/robotidy/transformers/Translate.py | 0.931501 | 0.737265 | Translate.py | pypi |
from robot.api.parsing import EmptyLine
from robot.parsing.model.blocks import Keyword
from robotidy.disablers import skip_section_if_disabled
from robotidy.transformers import Transformer
class SmartSortKeywords(Transformer):
"""
Sort keywords in ``*** Keywords ***`` section.
By default sorting is case insensitive, but keywords with leading underscore go to the bottom. Other underscores are
treated as spaces.
Empty lines (or lack of them) between keywords are preserved.
Following code:
```robotframework
*** Keywords ***
_my secrete keyword
Kw2
My Keyword
Kw1
my_another_cool_keyword
my another keyword
Kw3
```
Will be transformed to:
```robotframework
*** Keywords ***
my_another_cool_keyword
my another keyword
Kw3
My Keyword
Kw1
_my secrete keyword
Kw2
```
Default behaviour could be changed using following parameters: ``case_insensitive = True``,
``ignore_leading_underscore = False`` and ``ignore_other_underscore = True``.
"""
ENABLED = False
def __init__(self, case_insensitive=True, ignore_leading_underscore=False, ignore_other_underscore=True):
super().__init__()
self.ci = case_insensitive
self.ilu = ignore_leading_underscore
self.iou = ignore_other_underscore
@skip_section_if_disabled
def visit_KeywordSection(self, node): # noqa
before, after = self.leave_only_keywords(node)
empty_lines = self.pop_empty_lines(node)
node.body.sort(key=self.sort_function)
self.append_empty_lines(node, empty_lines)
node.body = before + node.body + after
return node
@staticmethod
def pop_empty_lines(node):
all_empty = []
for kw in node.body:
kw_empty = []
while kw.body and isinstance(kw.body[-1], EmptyLine):
kw_empty.insert(0, kw.body.pop())
all_empty.append(kw_empty)
return all_empty
@staticmethod
def leave_only_keywords(node):
before = []
after = []
while node.body and not isinstance(node.body[0], Keyword):
before.append(node.body.pop(0))
while node.body and not isinstance(node.body[-1], Keyword):
after.append(node.body.pop(-1))
return before, after
def sort_function(self, kw):
name = kw.name
if self.ci:
name = name.casefold().upper() # to make sure that letters go before underscore
if self.ilu:
name = name.lstrip("_")
if self.iou:
index = len(name) - len(name.lstrip("_"))
name = name[:index] + name[index:].replace("_", " ")
return name
@staticmethod
def append_empty_lines(node, empty_lines):
for kw, lines in zip(node.body, empty_lines):
kw.body.extend(lines) | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/vendored/robocorp_ls_core/libs/robotidy_lib/robotidy/transformers/SmartSortKeywords.py | 0.823151 | 0.632701 | SmartSortKeywords.py | pypi |
from typing import Iterable
from robot.api.parsing import Token
try:
from robot.api.parsing import Break, Continue
except ImportError:
Continue, Break = None, None
from robotidy.disablers import skip_if_disabled, skip_section_if_disabled
from robotidy.transformers import Transformer
from robotidy.utils import after_last_dot, normalize_name, wrap_in_if_and_replace_statement
class ReplaceBreakContinue(Transformer):
"""
Replace Continue For Loop and Exit For Loop keyword variants with CONTINUE and BREAK statements.
Following code:
```robotframework
*** Keywords ***
Keyword
FOR ${var} IN 1 2
Continue For Loop
Continue For Loop If $condition
Exit For Loop
Exit For Loop If $condition
END
```
will be transformed to:
```robotframework
*** Keywords ***
Keyword
FOR ${var} IN 1 2
CONTINUE
IF $condition
CONTINUE
END
BREAK
IF $condition
BREAK
END
END
```
"""
MIN_VERSION = 5
def __init__(self):
super().__init__()
self.in_loop = False
def visit_File(self, node): # noqa
self.in_loop = False
return self.generic_visit(node)
@skip_section_if_disabled
def visit_Section(self, node): # noqa
return self.generic_visit(node)
@staticmethod
def create_statement_from_tokens(statement, tokens: Iterable, indent: Token):
return statement([indent, Token(statement.type), *tokens])
@skip_if_disabled
def visit_KeywordCall(self, node): # noqa
if not self.in_loop or not node.keyword or node.errors:
return node
normalized_name = after_last_dot(normalize_name(node.keyword))
if "forloop" not in normalized_name:
return node
if normalized_name == "continueforloop":
return self.create_statement_from_tokens(statement=Continue, tokens=node.tokens[2:], indent=node.tokens[0])
elif normalized_name == "exitforloop":
return self.create_statement_from_tokens(statement=Break, tokens=node.tokens[2:], indent=node.tokens[0])
elif normalized_name == "continueforloopif":
return wrap_in_if_and_replace_statement(node, Continue, self.formatting_config.separator)
elif normalized_name == "exitforloopif":
return wrap_in_if_and_replace_statement(node, Break, self.formatting_config.separator)
return node
def visit_For(self, node): # noqa
self.in_loop = True
node = self.generic_visit(node)
self.in_loop = False
return node
visit_While = visit_For | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/vendored/robocorp_ls_core/libs/robotidy_lib/robotidy/transformers/ReplaceBreakContinue.py | 0.830422 | 0.562717 | ReplaceBreakContinue.py | pypi |
from typing import List
from robot.api.parsing import Token
from robotidy.disablers import skip_if_disabled
from robotidy.exceptions import InvalidParameterValueError
from robotidy.skip import Skip
from robotidy.transformers import Transformer
from robotidy.transformers.run_keywords import get_run_keywords
from robotidy.utils import (
collect_comments_from_tokens,
get_line_length_with_sep,
get_new_line,
is_token_value_in_tokens,
join_tokens_with_token,
merge_comments_into_one,
normalize_name,
split_on_token_type,
split_on_token_value,
)
class IndentNestedKeywords(Transformer):
"""
Format indentation inside run keywords variants such as ``Run Keywords`` or
``Run Keyword And Continue On Failure``.
Keywords inside run keywords variants are detected and
whitespace is formatted to outline them. This code:
```robotframework
Run Keyword Run Keyword If ${True} Run keywords Log foo AND Log bar ELSE Log baz
```
will be transformed to:
```robotframework
Run Keyword
... Run Keyword If ${True}
... Run keywords
... Log foo
... AND
... Log bar
... ELSE
... Log baz
```
``AND`` argument inside ``Run Keywords`` can be handled in different ways. It is controlled via ``indent_and``
parameter. For more details see the full documentation.
To skip formatting run keywords inside settings (such as ``Suite Setup``, ``[Setup]``, ``[Teardown]`` etc.) set
``skip_settings`` to ``True``.
"""
ENABLED = False
HANDLES_SKIP = frozenset({"skip_settings"})
def __init__(self, indent_and: str = "split", skip: Skip = None):
super().__init__(skip=skip)
self.indent_and = indent_and
self.validate_indent_and()
self.run_keywords = get_run_keywords()
def validate_indent_and(self):
modes = {"keep_in_line", "split", "split_and_indent"}
if self.indent_and not in modes:
raise InvalidParameterValueError(
self.__class__.__name__,
"indent_and",
self.indent_and,
f"Select one of: {','.join(modes)}",
)
def get_run_keyword(self, kw_name):
kw_norm = normalize_name(kw_name)
return self.run_keywords.get(kw_norm, None)
def get_setting_lines(self, node, indent): # noqa
if self.skip.setting("any") or node.errors or not len(node.data_tokens) > 1:
return None
run_keyword = self.get_run_keyword(node.data_tokens[1].value)
if not run_keyword:
return None
lines = self.parse_sub_kw(node.data_tokens[1:])
if not lines:
return None
return self.split_too_long_lines(lines, indent)
def get_separator(self, column=1, continuation=False):
if continuation:
separator = self.formatting_config.continuation_indent * column
else:
separator = self.formatting_config.separator * column
return Token(Token.SEPARATOR, separator)
def parse_keyword_lines(self, lines, tokens, new_line, eol):
separator = self.get_separator()
for column, line in lines[1:]:
tokens.extend(new_line)
tokens.append(self.get_separator(column, True))
tokens.extend(join_tokens_with_token(line, separator))
tokens.append(eol)
return tokens
@skip_if_disabled
def visit_SuiteSetup(self, node): # noqa
lines = self.get_setting_lines(node, 0)
if not lines:
return node
comments = collect_comments_from_tokens(node.tokens, indent=None)
separator = self.get_separator()
new_line = get_new_line()
tokens = [node.data_tokens[0], separator, *join_tokens_with_token(lines[0][1], separator)]
node.tokens = self.parse_keyword_lines(lines, tokens, new_line, eol=node.tokens[-1])
return (*comments, node)
visit_SuiteTeardown = visit_TestSetup = visit_TestTeardown = visit_SuiteSetup
@skip_if_disabled
def visit_Setup(self, node): # noqa
indent = len(node.tokens[0].value)
lines = self.get_setting_lines(node, indent)
if not lines:
return node
indent = node.tokens[0]
separator = self.get_separator()
new_line = get_new_line(indent)
tokens = [indent, node.data_tokens[0], separator, *join_tokens_with_token(lines[0][1], separator)]
comment = merge_comments_into_one(node.tokens)
if comment:
# need to add comments on first line for [Setup] / [Teardown] settings
comment_sep = Token(Token.SEPARATOR, " ")
tokens.extend([comment_sep, comment])
node.tokens = self.parse_keyword_lines(lines, tokens, new_line, eol=node.tokens[-1])
return node
visit_Teardown = visit_Setup
@skip_if_disabled
def visit_KeywordCall(self, node): # noqa
if node.errors or not node.keyword:
return node
run_keyword = self.get_run_keyword(node.keyword)
if not run_keyword:
return node
indent = node.tokens[0]
comments = collect_comments_from_tokens(node.tokens, indent)
assign, kw_tokens = split_on_token_type(node.data_tokens, Token.KEYWORD)
lines = self.parse_sub_kw(kw_tokens)
if not lines:
return node
lines = self.split_too_long_lines(lines, len(self.formatting_config.separator))
separator = self.get_separator()
tokens = [indent]
if assign:
tokens.extend([*join_tokens_with_token(assign, separator), separator])
tokens.extend(join_tokens_with_token(lines[0][1], separator))
new_line = get_new_line(indent)
node.tokens = self.parse_keyword_lines(lines, tokens, new_line, eol=node.tokens[-1])
return (*comments, node)
def split_too_long_lines(self, lines, indent):
"""
Parse indented lines to split too long lines
"""
# TODO: Keep things like ELSE IF <condition>, Run Keyword If <> together no matter what
if "SplitTooLongLine" not in self.transformers:
return lines
allowed_length = self.transformers["SplitTooLongLine"].line_length
sep_len = len(self.formatting_config.separator)
new_lines = []
for column, line in lines:
pre_indent = self.calculate_line_indent(column, indent)
if (
column == 0
or len(line) == 1
or (pre_indent + get_line_length_with_sep(line, sep_len)) <= allowed_length
):
new_lines.append((column, line))
continue
if (pre_indent + get_line_length_with_sep(line[:2], sep_len)) <= allowed_length:
first_line_end = 2
else:
first_line_end = 1
new_lines.append((column, line[:first_line_end]))
new_lines.extend([(column + 1, [arg]) for arg in line[first_line_end:]])
return new_lines
def calculate_line_indent(self, column, starting_indent):
"""Calculate with of the continuation indent.
For example following line will have 4 + 3 + 2x column x 4 indent with:
... argument
"""
return starting_indent + len(self.formatting_config.continuation_indent) * column + 3
def parse_sub_kw(self, tokens, column=0):
if not tokens:
return []
run_keyword = self.get_run_keyword(tokens[0].value)
if not run_keyword:
return [(column, list(tokens))]
lines = [(column, tokens[: run_keyword.resolve])]
tokens = tokens[run_keyword.resolve :]
if run_keyword.branches:
if "ELSE IF" in run_keyword.branches:
while is_token_value_in_tokens("ELSE IF", tokens):
column = max(column, 1)
prefix, branch, tokens = split_on_token_value(tokens, "ELSE IF", 2)
lines.extend(self.parse_sub_kw(prefix, column + 1))
lines.append((column, branch))
if "ELSE" in run_keyword.branches and is_token_value_in_tokens("ELSE", tokens):
return self.split_on_else(tokens, lines, column)
elif run_keyword.split_on_and:
return self.split_on_and(tokens, lines, column)
return lines + self.parse_sub_kw(tokens, column + 1)
def split_on_else(self, tokens, lines, column):
column = max(column, 1)
prefix, branch, tokens = split_on_token_value(tokens, "ELSE", 1)
lines.extend(self.parse_sub_kw(prefix, column + 1))
lines.append((column, branch))
lines.extend(self.parse_sub_kw(tokens, column + 1))
return lines
def split_on_and(self, tokens, lines, column):
if is_token_value_in_tokens("AND", tokens):
while is_token_value_in_tokens("AND", tokens):
prefix, branch, tokens = split_on_token_value(tokens, "AND", 1)
if self.indent_and == "keep_in_line":
lines.extend(self.parse_sub_kw(prefix + branch, column + 1))
else:
indent = int(self.indent_and == "split_and_indent") # indent = 1 for split_and_indent, else 0
lines.extend(self.parse_sub_kw(prefix, column + 1 + indent))
lines.append((column + 1, branch))
indent = int(self.indent_and == "split_and_indent") # indent = 1 for split_and_indent, else 0
lines.extend(self.parse_sub_kw(tokens, column + 1 + indent))
else:
lines.extend([(column + 1, [kw_token]) for kw_token in tokens])
return lines | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/vendored/robocorp_ls_core/libs/robotidy_lib/robotidy/transformers/IndentNestedKeywords.py | 0.755907 | 0.703148 | IndentNestedKeywords.py | pypi |
import re
from typing import List
from robot.api.parsing import Comment, Token
try:
from robot.api.parsing import InlineIfHeader
except ImportError:
InlineIfHeader = None
from robotidy.disablers import skip_if_disabled, skip_section_if_disabled
from robotidy.skip import Skip
from robotidy.transformers import Transformer
from robotidy.transformers.run_keywords import get_run_keywords
from robotidy.utils import ROBOT_VERSION, normalize_name
EOL = Token(Token.EOL)
CONTINUATION = Token(Token.CONTINUATION)
class SplitTooLongLine(Transformer):
"""
Split too long lines.
If line exceeds given length limit (120 by default) it will be split:
```robotframework
*** Keywords ***
Keyword
Keyword With Longer Name ${arg1} ${arg2} ${arg3} # let's assume that arg2 is at 120 char
```
To:
```robotframework
*** Keywords ***
Keyword
# let's assume that arg2 is at 120 char
Keyword With Longer Name
... ${arg1}
... ${arg2}
... ${arg3}
```
Allowed line length is configurable using global parameter ``--line-length``:
```
robotidy --line-length 140 src.robot
```
Or using dedicated for this transformer parameter ``line_length``:
```
robotidy --configure SplitTooLongLine:line_length:140 src.robot
```
``split_on_every_arg``, `split_on_every_value`` and ``split_on_every_setting_arg`` flags (``True`` by default)
controls whether arguments and values are split or fills the line until character limit:
```robotframework
*** Test Cases ***
Test with split_on_every_arg = True (default)
# arguments are split
Keyword With Longer Name
... ${arg1}
... ${arg2}
... ${arg3}
Test with split_on_every_arg = False
# ${arg1} fits under limit, so it stays in the line
Keyword With Longer Name ${arg1}
... ${arg2} ${arg3}
```
Supports global formatting params: ``spacecount`` and ``separator``.
"""
IGNORED_WHITESPACE = {Token.EOL, Token.CONTINUATION}
HANDLES_SKIP = frozenset({"skip_comments", "skip_keyword_call", "skip_keyword_call_pattern", "skip_sections"})
def __init__(
self,
line_length: int = None,
split_on_every_arg: bool = True,
split_on_every_value: bool = True,
split_on_every_setting_arg: bool = True,
split_single_value: bool = False,
align_new_line: bool = False,
skip: Skip = None,
):
super().__init__(skip)
self._line_length = line_length
self.split_on_every_arg = split_on_every_arg
self.split_on_every_value = split_on_every_value
self.split_on_every_setting_arg = split_on_every_setting_arg
self.split_single_value = split_single_value
self.align_new_line = align_new_line
self.robocop_disabler_pattern = re.compile(
r"(# )+(noqa|robocop: ?(?P<disabler>disable|enable)=?(?P<rules>[\w\-,]*))"
)
self.run_keywords = get_run_keywords()
@property
def line_length(self):
return self.formatting_config.line_length if self._line_length is None else self._line_length
def is_run_keyword(self, kw_name):
"""
Skip formatting if the keyword is already handled by IndentNestedKeywords transformer.
Special indentation is preserved thanks for this.
"""
if "IndentNestedKeywords" not in self.transformers:
return False
kw_norm = normalize_name(kw_name)
return kw_norm in self.run_keywords
@skip_section_if_disabled
def visit_Section(self, node): # noqa
return self.generic_visit(node)
def visit_If(self, node): # noqa
if self.is_inline(node):
return node
if node.orelse:
self.generic_visit(node.orelse)
return self.generic_visit(node)
@staticmethod
def is_inline(node):
return ROBOT_VERSION.major > 4 and isinstance(node.header, InlineIfHeader)
def should_transform_node(self, node):
if not self.any_line_too_long(node):
return False
# find if any line contains more than one data tokens - so we have something to split
for line in node.lines:
count = 0
for token in line:
if token.type not in Token.NON_DATA_TOKENS:
count += 1
if count > 1:
return True
return False
def any_line_too_long(self, node):
for line in node.lines:
if self.skip.comments:
line = "".join(token.value for token in line if token.type != Token.COMMENT)
else:
line = "".join(token.value for token in line)
line = self.robocop_disabler_pattern.sub("", line)
line = line.rstrip().expandtabs(4)
if len(line) >= self.line_length:
return True
return False
def visit_KeywordCall(self, node): # noqa
if self.skip.keyword_call(node):
return node
if not self.should_transform_node(node):
return node
if self.disablers.is_node_disabled(node, full_match=False):
return node
if self.is_run_keyword(node.keyword):
return node
return self.split_keyword_call(node)
@skip_if_disabled
def visit_Variable(self, node): # noqa
if not self.should_transform_node(node):
return node
return self.split_variable_def(node)
@skip_if_disabled
def visit_Tags(self, node): # noqa
if self.skip.setting("tags"): # TODO test
return node
return self.split_setting_with_args(node, settings_section=False)
@skip_if_disabled
def visit_Arguments(self, node): # noqa
if self.skip.setting("arguments"):
return node
return self.split_setting_with_args(node, settings_section=False)
@skip_if_disabled
def visit_ForceTags(self, node): # noqa
if self.skip.setting("tags"):
return node
return self.split_setting_with_args(node, settings_section=True)
visit_DefaultTags = visit_TestTags = visit_ForceTags
def split_setting_with_args(self, node, settings_section):
if not self.should_transform_node(node):
return node
if self.disablers.is_node_disabled(node, full_match=False):
return node
if settings_section:
indent = 0
token_index = 1
else:
indent = node.tokens[0]
token_index = 2
line = list(node.tokens[:token_index])
tokens, comments = self.split_tokens(node.tokens, line, self.split_on_every_setting_arg, indent)
if indent:
comments = [Comment([indent, comment, EOL]) for comment in comments]
else:
comments = [Comment([comment, EOL]) for comment in comments]
node.tokens = tokens
return (node, *comments)
@staticmethod
def join_on_separator(tokens, separator):
for token in tokens:
yield token
yield separator
@staticmethod
def split_to_multiple_lines(tokens, indent, separator):
first = True
for token in tokens:
yield indent
if not first:
yield CONTINUATION
yield separator
yield token
yield EOL
first = False
def split_tokens(self, tokens, line, split_on, indent=None):
separator = Token(Token.SEPARATOR, self.formatting_config.separator)
align_new_line = self.align_new_line and not split_on
if align_new_line:
cont_indent = None
else:
cont_indent = Token(Token.SEPARATOR, self.formatting_config.continuation_indent)
split_tokens, comments = [], []
# Comments with separators inside them are split into
# [COMMENT, SEPARATOR, COMMENT] tokens in the AST, so in order to preserve the
# original comment, we need a lookback on the separator tokens.
last_separator = None
for token in tokens:
if token.type in self.IGNORED_WHITESPACE:
continue
if token.type == Token.SEPARATOR:
last_separator = token
elif token.type == Token.COMMENT:
self.join_split_comments(comments, token, last_separator)
elif token.type == Token.ARGUMENT:
if token.value == "":
token.value = "${EMPTY}"
if split_on or not self.col_fit_in_line(line + [separator, token]):
if align_new_line and cont_indent is None: # we are yet to calculate aligned indent
cont_indent = Token(Token.SEPARATOR, self.calculate_align_separator(line))
line.append(EOL)
split_tokens.extend(line)
if indent:
line = [indent, CONTINUATION, cont_indent, token]
else:
line = [CONTINUATION, cont_indent, token]
else:
line.extend([separator, token])
split_tokens.extend(line)
split_tokens.append(EOL)
return split_tokens, comments
@staticmethod
def join_split_comments(comments: List, token: Token, last_separator: Token):
"""Join split comments when splitting line.
AST splits comments with separators, e.g.
"# Comment rest" -> ["# Comment", " ", "rest"].
Notice the third value not starting with a hash - we need to join such comment with previous comment.
"""
if comments and not token.value.startswith("#"):
comments[-1].value += last_separator.value + token.value
else:
comments.append(token)
def calculate_align_separator(self, line: List) -> str:
"""Calculate width of the separator required to align new line to previous line."""
if len(line) <= 2:
# line only fits one column, so we don't have anything to align it for
return self.formatting_config.continuation_indent
first_data_token = next((token.value for token in line if token.type != Token.SEPARATOR), "")
# Decrease by 3 for ... token
align_width = len(first_data_token) + len(self.formatting_config.separator) - 3
return align_width * " "
def split_variable_def(self, node):
if len(node.value) < 2 and not self.split_single_value:
return node
line = [node.data_tokens[0]]
tokens, comments = self.split_tokens(node.tokens, line, self.split_on_every_value)
comments = [Comment([comment, EOL]) for comment in comments]
node.tokens = tokens
return (*comments, node)
def split_keyword_call(self, node):
separator = Token(Token.SEPARATOR, self.formatting_config.separator)
cont_indent = Token(Token.SEPARATOR, self.formatting_config.continuation_indent)
indent = node.tokens[0]
keyword = node.get_token(Token.KEYWORD)
# check if assign tokens needs to be split too
assign = node.get_tokens(Token.ASSIGN)
line = [indent, *self.join_on_separator(assign, separator), keyword]
if assign and not self.col_fit_in_line(line):
head = [
*self.split_to_multiple_lines(assign, indent=indent, separator=cont_indent),
indent,
CONTINUATION,
cont_indent,
keyword,
]
line = []
else:
head = []
tokens, comments = self.split_tokens(
node.tokens[node.tokens.index(keyword) + 1 :], line, self.split_on_every_arg, indent
)
head.extend(tokens)
comment_tokens = []
for comment in comments:
comment_tokens.extend([indent, comment, EOL])
node.tokens = comment_tokens + head
return node
def col_fit_in_line(self, tokens):
return self.len_token_text(tokens) < self.line_length
@staticmethod
def len_token_text(tokens):
return sum(len(token.value) for token in tokens) | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/vendored/robocorp_ls_core/libs/robotidy_lib/robotidy/transformers/SplitTooLongLine.py | 0.7696 | 0.62621 | SplitTooLongLine.py | pypi |
from robot.api.parsing import DefaultTags, ForceTags, Tags, Token
from robotidy.disablers import skip_section_if_disabled
from robotidy.transformers import Transformer
class OrderTags(Transformer):
"""
Order tags.
Tags are ordered in lexicographic order like this:
```robotframework
*** Test Cases ***
Tags Upper Lower
[Tags] ba Ab Bb Ca Cb aa
My Keyword
*** Keywords ***
My Keyword
[Tags] ba Ab Bb Ca Cb aa
No Operation
```
To:
```robotframework
*** Test Cases ***
Tags Upper Lower
[Tags] aa Ab ba Bb Ca Cb
My Keyword
*** Keywords ***
My Keyword
[Tags] aa Ab ba Bb Ca Cb
No Operation
```
Default order can be changed using following parameters:
- ``case_sensitive = False``
- ``reverse = False``
"""
ENABLED = False
def __init__(
self,
case_sensitive: bool = False,
reverse: bool = False,
default_tags: bool = True,
force_tags: bool = True,
):
super().__init__()
self.key = self.get_key(case_sensitive)
self.reverse = reverse
self.default_tags = default_tags
self.force_tags = force_tags
@skip_section_if_disabled
def visit_Section(self, node): # noqa
return self.generic_visit(node)
def visit_Tags(self, node): # noqa
return self.order_tags(node, indent=True)
def visit_DefaultTags(self, node): # noqa
return self.order_tags(node) if self.default_tags else node
def visit_ForceTags(self, node): # noqa
return self.order_tags(node) if self.force_tags else node
def order_tags(self, node, indent=False):
if self.disablers.is_node_disabled(node):
return node
ordered_tags = sorted(
(tag.value for tag in node.data_tokens[1:]),
key=self.key,
reverse=self.reverse,
)
if len(ordered_tags) <= 1:
return node
comments = node.get_tokens(Token.COMMENT)
tokens = []
if indent:
tokens.append(Token(Token.SEPARATOR, self.formatting_config.indent))
tokens.append(node.data_tokens[0])
tag_tokens = (Token(Token.ARGUMENT, tag) for tag in ordered_tags)
tokens.extend(self.join_tokens(tag_tokens))
tokens.extend(self.join_tokens(comments))
tokens.append(Token(Token.EOL))
node.tokens = tokens
return node
def join_tokens(self, tokens):
joined_tokens = []
separator = Token(Token.SEPARATOR, self.formatting_config.separator)
for token in tokens:
joined_tokens.append(separator)
joined_tokens.append(token)
return joined_tokens
@staticmethod
def get_key(case_sensitive):
return str if case_sensitive else str.casefold | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/vendored/robocorp_ls_core/libs/robotidy_lib/robotidy/transformers/OrderTags.py | 0.81457 | 0.804866 | OrderTags.py | pypi |
from itertools import chain
from robot.api.parsing import Comment, ElseHeader, ElseIfHeader, End, If, IfHeader, KeywordCall, Token
try:
from robot.api.parsing import Break, Continue, InlineIfHeader, ReturnStatement
except ImportError:
ReturnStatement, Break, Continue, InlineIfHeader = None, None, None, None
from robotidy.disablers import skip_section_if_disabled
from robotidy.transformers import Transformer
from robotidy.utils import flatten_multiline, get_comments, normalize_name
class InlineIf(Transformer):
"""
Replaces IF blocks with inline IF.
It will only replace IF block if it can fit in one line shorter than `line_length` (default 80) parameter and return
variables matches for all ELSE and ELSE IF branches.
Following code:
```robotframework
*** Test Cases ***
Test
IF $condition1
Keyword argument
END
IF $condition2
${var} Keyword
ELSE
${var} Keyword 2
END
IF $condition1
Keyword argument
Keyword 2
END
```
will be transformed to:
```robotframework
*** Test Cases ***
Test
IF $condition1 Keyword argument
${var} IF $condition2 Keyword ELSE Keyword 2
IF $condition1
Keyword argument
Keyword 2
END
```
Too long inline IFs (over `line_length` character limit) will be replaced with normal IF block.
You can decide to not replace IF blocks containing ELSE or ELSE IF branches by setting `skip_else` to True.
Supports global formatting params: `--startline` and `--endline`.
"""
MIN_VERSION = 5
def __init__(self, line_length: int = 80, skip_else: bool = False):
super().__init__()
self.line_length = line_length
self.skip_else = skip_else
@skip_section_if_disabled
def visit_Section(self, node): # noqa
return self.generic_visit(node)
def visit_If(self, node: If): # noqa
if node.errors or getattr(node.end, "errors", None):
return node
if self.disablers.is_node_disabled(node, full_match=False):
return node
if self.is_inline(node):
return self.handle_inline(node)
self.generic_visit(node)
if node.orelse:
self.generic_visit(node.orelse)
if self.no_end(node):
return node
indent = node.header.tokens[0]
if not (self.should_transform(node) and self.assignment_identical(node)):
return node
return self.to_inline(node, indent.value)
def should_transform(self, node):
if node.header.errors:
return False
if (
len(node.body) > 1
or not node.body
or not isinstance(node.body[0], (KeywordCall, ReturnStatement, Break, Continue))
):
return False
if node.orelse:
return self.should_transform(node.orelse)
return True
@staticmethod
def if_to_branches(if_block):
while if_block:
yield if_block
if_block = if_block.orelse
def assignment_identical(self, node):
else_found = False
assigned = []
for branch in self.if_to_branches(node):
if isinstance(branch.header, ElseHeader):
else_found = True
if not isinstance(branch.body[0], KeywordCall) or not branch.body[0].assign:
assigned.append([])
else:
assigned.append([normalize_name(assign).replace("=", "") for assign in branch.body[0].assign])
if len(assigned) > 1 and assigned[-1] != assigned[-2]:
return False
if any(x for x in assigned):
return else_found
return True
def is_shorter_than_limit(self, inline_if):
line_len = sum(self.if_len(branch) for branch in self.if_to_branches(inline_if))
return line_len <= self.line_length
@staticmethod
def no_end(node):
if not node.end:
return True
if not len(node.end.tokens) == 1:
return False
return not node.end.tokens[0].value
@staticmethod
def is_inline(node):
return isinstance(node.header, InlineIfHeader)
@staticmethod
def if_len(if_st):
return sum(
len(tok.value)
for tok in chain(if_st.body[0].tokens if if_st.body else [], if_st.header.tokens)
if tok.value != "\n"
)
def to_inline(self, node, indent):
tail = node
comments = self.collect_comments_from_if(indent, node)
if_block = head = self.inline_if_from_branch(node, indent)
while tail.orelse:
if self.skip_else:
return node
tail = tail.orelse
comments += self.collect_comments_from_if(indent, tail)
head.orelse = self.inline_if_from_branch(tail, self.formatting_config.separator)
head = head.orelse
if self.is_shorter_than_limit(if_block):
return (*comments, if_block)
return node
def inline_if_from_branch(self, node, indent):
if not node:
return None
separator = self.formatting_config.separator
last_token = Token(Token.EOL) if node.orelse is None else Token(Token.SEPARATOR, separator)
assigned = None
if isinstance(node.body[0], KeywordCall):
assigned = node.body[0].assign
keyword = self.to_inline_keyword(node.body[0], separator, last_token)
elif isinstance(node.body[0], ReturnStatement):
keyword = self.to_inline_return(node.body[0], separator, last_token)
elif isinstance(node.body[0], Break):
keyword = Break(self.to_inline_break_continue_tokens(Token.BREAK, separator, last_token))
elif isinstance(node.body[0], Continue):
keyword = Continue(self.to_inline_break_continue_tokens(Token.CONTINUE, separator, last_token))
else:
return node
# check for ElseIfHeader first since it's child of IfHeader class
if isinstance(node.header, ElseIfHeader):
header = ElseIfHeader(
[Token(Token.ELSE_IF), Token(Token.SEPARATOR, separator), Token(Token.ARGUMENT, node.header.condition)]
)
elif isinstance(node.header, IfHeader):
tokens = [Token(Token.SEPARATOR, indent)]
if assigned:
for assign in assigned:
tokens.extend([Token(Token.ASSIGN, assign), Token(Token.SEPARATOR, separator)])
tokens.extend(
[
Token(Token.INLINE_IF),
Token(Token.SEPARATOR, separator),
Token(Token.ARGUMENT, node.header.condition),
]
)
header = InlineIfHeader(tokens)
elif isinstance(node.header, ElseHeader):
header = ElseHeader([Token(Token.ELSE)])
else:
return node
return If(header=header, body=[keyword])
@staticmethod
def to_inline_keyword(keyword, separator, last_token):
tokens = [Token(Token.SEPARATOR, separator), Token(Token.KEYWORD, keyword.keyword)]
for arg in keyword.get_tokens(Token.ARGUMENT):
tokens.extend([Token(Token.SEPARATOR, separator), arg])
tokens.append(last_token)
return KeywordCall(tokens)
@staticmethod
def to_inline_return(node, separator, last_token):
tokens = [Token(Token.SEPARATOR, separator), Token(Token.RETURN_STATEMENT)]
for value in node.values:
tokens.extend([Token(Token.SEPARATOR, separator), Token(Token.ARGUMENT, value)])
tokens.append(last_token)
return ReturnStatement(tokens)
@staticmethod
def to_inline_break_continue_tokens(token, separator, last_token):
return [Token(Token.SEPARATOR, separator), Token(token), last_token]
@staticmethod
def join_on_separator(tokens, separator):
for token in tokens:
yield token
yield separator
@staticmethod
def collect_comments_from_if(indent, node):
comments = get_comments(node.header.tokens)
for statement in node.body:
comments += get_comments(statement.tokens)
if node.end:
comments += get_comments(node.end)
return [Comment.from_params(comment=comment.value, indent=indent) for comment in comments]
def create_keyword_for_inline(self, kw_tokens, indent, assign):
keyword_tokens = []
for token in kw_tokens:
keyword_tokens.append(Token(Token.SEPARATOR, self.formatting_config.separator))
keyword_tokens.append(token)
return KeywordCall.from_tokens(
[
Token(Token.SEPARATOR, indent + self.formatting_config.separator),
*assign,
*keyword_tokens[1:],
Token(Token.EOL),
]
)
def flatten_if_block(self, node):
node.header.tokens = flatten_multiline(
node.header.tokens, self.formatting_config.separator, remove_comments=True
)
for index, statement in enumerate(node.body):
node.body[index].tokens = flatten_multiline(
statement.tokens, self.formatting_config.separator, remove_comments=True
)
return node
def is_if_multiline(self, node):
for branch in self.if_to_branches(node):
if branch.header.get_token(Token.CONTINUATION):
return True
if any(statement.get_token(Token.CONTINUATION) for statement in branch.body):
return True
return False
def flatten_inline_if(self, node):
indent = node.header.tokens[0].value
comments = self.collect_comments_from_if(indent, node)
node = self.flatten_if_block(node)
head = node
while head.orelse:
head = head.orelse
comments += self.collect_comments_from_if(indent, head)
head = self.flatten_if_block(head)
return comments, node
def handle_inline(self, node):
if self.is_if_multiline(node):
comments, node = self.flatten_inline_if(node)
else:
comments = []
if self.is_shorter_than_limit(node): # TODO ignore comments?
return (*comments, node)
indent = node.header.tokens[0]
separator = self.formatting_config.separator
assign_tokens = node.header.get_tokens(Token.ASSIGN)
assign = [*self.join_on_separator(assign_tokens, Token(Token.SEPARATOR, separator))]
else_present = False
branches = []
while node:
new_comments, if_block, else_found = self.handle_inline_if_create(node, indent.value, assign)
else_present = else_present or else_found
comments += new_comments
branches.append(if_block)
node = node.orelse
if not else_present and assign_tokens:
header = ElseHeader.from_params(indent=indent.value)
keyword = self.create_keyword_for_inline(
[
Token(Token.KEYWORD, "Set Variable"),
*[Token(Token.ARGUMENT, "${None}") for _ in range(len(assign_tokens))],
],
indent.value,
assign,
)
branches.append(If(header=header, body=[keyword]))
if_block = head = branches[0]
for branch in branches[1:]:
head.orelse = branch
head = head.orelse
if_block.end = End([indent, Token(Token.END), Token(Token.EOL)])
return (*comments, if_block)
def handle_inline_if_create(self, node, indent, assign):
comments = self.collect_comments_from_if(indent, node)
body = [self.create_keyword_for_inline(node.body[0].data_tokens, indent, assign)]
else_found = False
if isinstance(node.header, InlineIfHeader):
header = IfHeader.from_params(
condition=node.condition, indent=indent, separator=self.formatting_config.separator
)
elif isinstance(node.header, ElseIfHeader):
header = ElseIfHeader.from_params(
condition=node.condition, indent=indent, separator=self.formatting_config.separator
)
else:
header = ElseHeader.from_params(indent=indent)
else_found = True
return comments, If(header=header, body=body), else_found | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/vendored/robocorp_ls_core/libs/robotidy_lib/robotidy/transformers/InlineIf.py | 0.695648 | 0.47384 | InlineIf.py | pypi |
import string
from robotidy.disablers import skip_section_if_disabled
from robotidy.skip import Skip
from robotidy.transformers import Transformer
class NormalizeSectionHeaderName(Transformer):
"""
Normalize section headers names.
Robot Framework is quite flexible with the section header naming. Following lines are equal:
```robotframework
*setting
*** SETTINGS
*** SettingS ***
```
This transformer normalizes naming to follow ``*** SectionName ***`` format (with plural variant):
```robotframework
*** Settings ***
*** Keywords ***
*** Test Cases ***
*** Variables ***
*** Comments ***
```
Optional data after section header (for example data driven column names) is preserved.
It is possible to upper case section header names by passing ``uppercase=True`` parameter:
```robotframework
*** SETTINGS ***
```
"""
HANDLES_SKIP = frozenset({"skip_sections"})
EN_SINGULAR_HEADERS = {"comment", "setting", "variable", "task", "test case", "keyword"}
def __init__(self, uppercase: bool = False, skip: Skip = None):
super().__init__(skip)
self.uppercase = uppercase
@skip_section_if_disabled
def visit_Section(self, node): # noqa
return self.generic_visit(node)
def visit_SectionHeader(self, node): # noqa
if not node.name:
return node
# only normalize, and if found in english ones then add plural
header_name = node.data_tokens[0].value
header_name = header_name.replace("*", "").strip()
if header_name.lower() in self.EN_SINGULAR_HEADERS:
header_name += "s"
if self.uppercase:
header_name = header_name.upper()
else:
header_name = string.capwords(header_name)
# we only modify header token value in order to preserver optional data driven testing column names
node.data_tokens[0].value = f"*** {header_name} ***"
return node | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/vendored/robocorp_ls_core/libs/robotidy_lib/robotidy/transformers/NormalizeSectionHeaderName.py | 0.804981 | 0.779238 | NormalizeSectionHeaderName.py | pypi |
from robot.api.parsing import (
Comment,
ElseHeader,
ElseIfHeader,
EmptyLine,
End,
ForHeader,
IfHeader,
ModelVisitor,
Template,
Token,
)
from robotidy.disablers import skip_if_disabled, skip_section_if_disabled
from robotidy.transformers import Transformer
from robotidy.utils import is_suite_templated, round_to_four
class AlignTemplatedTestCases(Transformer):
"""
Align templated Test Cases to columns.
Following code:
```robotframework
*** Test Cases *** baz qux
# some comment
test1 hi hello
test2 long test name asdfasdf asdsdfgsdfg
```
will be transformed to:
```robotframework
*** Test Cases *** baz qux
# some comment
test1 hi hello
test2 long test name asdfasdf asdsdfgsdfg
bar1 bar2
```
If you don't want to align test case section that does not contain header names (in above example baz and quz are
header names) then configure `only_with_headers` parameter:
```
robotidy -c AlignSettingsSection:only_with_headers:True <src>
```
For non-templated test cases use ``AlignTestCasesSection`` transformer.
"""
ENABLED = False
def __init__(self, only_with_headers: bool = False, min_width: int = None):
super().__init__()
self.only_with_headers = only_with_headers
self.min_width = min_width
self.widths = None
self.test_name_len = 0
self.test_without_eol = False
self.indent = 0
def visit_File(self, node): # noqa
if not is_suite_templated(node):
return node
self.test_without_eol = False
return self.generic_visit(node)
def visit_If(self, node): # noqa
self.indent += 1
self.generic_visit(node)
self.indent -= 1
return node
visit_Else = visit_ElseIf = visit_For = visit_If
@skip_section_if_disabled
def visit_TestCaseSection(self, node): # noqa
if len(node.header.data_tokens) == 1 and self.only_with_headers:
return node
counter = ColumnWidthCounter(self.disablers)
counter.visit(node)
self.widths = counter.widths
return self.generic_visit(node)
def visit_TestCase(self, node): # noqa
for statement in node.body:
if isinstance(statement, Template) and statement.value is None:
return node
return self.generic_visit(node)
@skip_if_disabled
def visit_Statement(self, statement): # noqa
if statement.type == Token.TESTCASE_NAME:
self.test_name_len = len(statement.data_tokens[0].value) if statement.data_tokens else 0
self.test_without_eol = statement.tokens[-1].type != Token.EOL
elif statement.type == Token.TESTCASE_HEADER:
self.align_header(statement)
elif not isinstance(
statement,
(Comment, EmptyLine, ForHeader, IfHeader, ElseHeader, ElseIfHeader, End),
):
self.align_statement(statement)
return statement
def align_header(self, statement):
tokens = []
# *** Test Cases *** baz qux
# *** Test Cases *** baz qux
for index, token in enumerate(statement.data_tokens[:-1]):
tokens.append(token)
if self.min_width:
separator = max(self.formatting_config.space_count, self.min_width - len(token.value)) * " "
else:
separator = (self.widths[index] - len(token.value) + self.formatting_config.space_count) * " "
tokens.append(Token(Token.SEPARATOR, separator))
tokens.append(statement.data_tokens[-1])
tokens.append(statement.tokens[-1]) # eol
statement.tokens = tokens
return statement
def align_statement(self, statement):
tokens = []
for line in statement.lines:
strip_line = [t for t in line if t.type not in (Token.SEPARATOR, Token.EOL)]
line_pos = 0
exp_pos = 0
widths = self.get_widths(statement)
for token, width in zip(strip_line, widths):
if self.min_width:
exp_pos += max(width + self.formatting_config.space_count, self.min_width)
else:
exp_pos += width + self.formatting_config.space_count
if self.test_without_eol:
self.test_without_eol = False
exp_pos -= self.test_name_len
tokens.append(Token(Token.SEPARATOR, (exp_pos - line_pos) * " "))
tokens.append(token)
line_pos += len(token.value) + exp_pos - line_pos
tokens.append(line[-1])
statement.tokens = tokens
def get_widths(self, statement):
indent = self.indent
if isinstance(statement, (ForHeader, End, IfHeader, ElseHeader, ElseIfHeader)):
indent -= 1
if not indent:
return self.widths
return [max(width, indent * self.formatting_config.space_count) for width in self.widths]
def visit_SettingSection(self, node): # noqa
return node
visit_VariableSection = visit_KeywordSection = visit_CommentSection = visit_SettingSection
class ColumnWidthCounter(ModelVisitor):
def __init__(self, disablers):
self.widths = []
self.disablers = disablers
self.test_name_lineno = -1
self.any_one_line_test = False
self.header_with_cols = False
def visit_TestCaseSection(self, node): # noqa
self.generic_visit(node)
if not self.header_with_cols and not self.any_one_line_test and self.widths:
self.widths[0] = 0
self.widths = [round_to_four(length) for length in self.widths]
def visit_TestCase(self, node): # noqa
for statement in node.body:
if isinstance(statement, Template) and statement.value is None:
return
self.generic_visit(node)
@skip_if_disabled
def visit_Statement(self, statement): # noqa
if statement.type == Token.COMMENT:
return
if statement.type == Token.TESTCASE_HEADER:
if len(statement.data_tokens) > 1:
self.header_with_cols = True
self._count_widths_from_statement(statement)
elif statement.type == Token.TESTCASE_NAME:
if self.widths:
self.widths[0] = max(self.widths[0], len(statement.name))
else:
self.widths.append(len(statement.name))
self.test_name_lineno = statement.lineno
else:
if self.test_name_lineno == statement.lineno:
self.any_one_line_test = True
if not isinstance(statement, (ForHeader, IfHeader, ElseHeader, ElseIfHeader, End)):
self._count_widths_from_statement(statement, indent=1)
def _count_widths_from_statement(self, statement, indent=0):
for line in statement.lines:
line = [t for t in line if t.type not in (Token.SEPARATOR, Token.EOL)]
for index, token in enumerate(line, start=indent):
if index < len(self.widths):
self.widths[index] = max(self.widths[index], len(token.value))
else:
self.widths.append(len(token.value)) | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/vendored/robocorp_ls_core/libs/robotidy_lib/robotidy/transformers/AlignTemplatedTestCases.py | 0.746046 | 0.723213 | AlignTemplatedTestCases.py | pypi |
from robot.api.parsing import DefaultTags, ForceTags, Tags, Token
from robotidy.disablers import skip_section_if_disabled
from robotidy.exceptions import InvalidParameterValueError
from robotidy.transformers import Transformer
class NormalizeTags(Transformer):
"""
Normalize tag names by normalizing case and removing duplicates.
Example usage:
```
robotidy --transform NormalizeTags:case=lowercase test.robot
```
Other supported cases: uppercase, title case. The default is lowercase.
You can also run it to remove duplicates but preserve current case by setting ``normalize_case`` parameter to False:
```
robotidy --transform NormalizeTags:normalize_case=False test.robot
```
NormalizeTags will change the formatting of the tags by removing the duplicates, new lines and moving comments.
If you want to preserved formatting set ``preserve_format``:
```
robotidy --configure NormalizeTags:preserve_format=True test.robot
```
The duplicates will not be removed with ``preserve_format`` set to ``True``.
"""
CASE_FUNCTIONS = {
"lowercase": str.lower,
"uppercase": str.upper,
"titlecase": str.title,
}
def __init__(self, case: str = "lowercase", normalize_case: bool = True, preserve_format: bool = False):
super().__init__()
self.case = case.lower()
self.normalize_case = normalize_case
self.preserve_format = preserve_format
try:
self.case_function = self.CASE_FUNCTIONS[self.case]
except KeyError:
raise InvalidParameterValueError(
self.__class__.__name__, "case", case, "Supported cases: lowercase, uppercase, titlecase."
)
@skip_section_if_disabled
def visit_Section(self, node): # noqa
return self.generic_visit(node)
def visit_Tags(self, node): # noqa
return self.normalize_tags(node, indent=True)
def visit_DefaultTags(self, node): # noqa
return self.normalize_tags(node)
def visit_ForceTags(self, node): # noqa
return self.normalize_tags(node)
def normalize_tags(self, node, indent=False):
if self.disablers.is_node_disabled(node, full_match=False):
return node
if self.preserve_format:
return self.normalize_tags_tokens_preserve_formatting(node)
return self.normalize_tags_tokens_ignore_formatting(node, indent)
def normalize_tags_tokens_preserve_formatting(self, node):
if not self.normalize_case:
return node
for token in node.tokens:
if token.type != Token.ARGUMENT:
continue
token.value = self.case_function(token.value)
return node
def normalize_tags_tokens_ignore_formatting(self, node, indent):
separator = Token(Token.SEPARATOR, self.formatting_config.separator)
setting_name = node.data_tokens[0]
tags = [tag.value for tag in node.data_tokens[1:]]
if self.normalize_case:
tags = self.convert_case(tags)
tags = self.remove_duplicates(tags)
comments = node.get_tokens(Token.COMMENT)
if indent:
tokens = [Token(Token.SEPARATOR, self.formatting_config.indent), setting_name]
else:
tokens = [setting_name]
for tag in tags:
tokens.extend([separator, Token(Token.ARGUMENT, tag)])
if comments:
tokens.extend(self.join_tokens(comments))
tokens.append(Token(Token.EOL))
node.tokens = tuple(tokens)
return node
def convert_case(self, tags):
return [self.case_function(item) for item in tags]
@staticmethod
def remove_duplicates(tags):
return list(dict.fromkeys(tags))
def join_tokens(self, tokens):
joined_tokens = []
separator = Token(Token.SEPARATOR, self.formatting_config.separator)
for token in tokens:
joined_tokens.extend([separator, token])
return joined_tokens | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/vendored/robocorp_ls_core/libs/robotidy_lib/robotidy/transformers/NormalizeTags.py | 0.819605 | 0.900223 | NormalizeTags.py | pypi |
from robot.api.parsing import Comment, EmptyLine, Token
from robotidy.disablers import skip_if_disabled, skip_section_if_disabled
from robotidy.exceptions import InvalidParameterValueError, RobotidyConfigError
from robotidy.transformers import Transformer
class InvalidSettingsOrderError(InvalidParameterValueError):
def __init__(self, transformer, param_name, param_value, valid_values):
valid_names = ",".join(sorted(valid_values.keys()))
msg = f"Custom order should be provided in comma separated list with valid setting names: {valid_names}"
super().__init__(transformer, param_name, param_value, msg)
class DuplicateInSettingsOrderError(InvalidParameterValueError):
def __init__(self, transformer, param_name, param_value):
provided_order = ",".join(param.lower() for param in param_value)
msg = "Custom order cannot contain duplicated setting names."
super().__init__(transformer, param_name, provided_order, msg)
class SettingInBothOrdersError(RobotidyConfigError):
def __init__(self, transformer, first_order, second_order, duplicates):
names = ",".join(setting.lower() for setting in duplicates)
msg = (
f"{transformer}: Invalid '{first_order}' and '{second_order}' order values. "
f"Following setting names exists in both orders: {names}"
)
super().__init__(msg)
class OrderSettings(Transformer):
"""
Order settings like ``[Arguments]``, ``[Setup]``, ``[Return]`` inside Keywords and Test Cases.
Keyword settings ``[Documentation]``, ``[Tags]``, ``[Timeout]``, ``[Arguments]`` are put before keyword body and
settings like ``[Teardown]``, ``[Return]`` are moved to the end of the keyword:
```robotframework
*** Keywords ***
Keyword
[Teardown] Keyword
[Return] ${value}
[Arguments] ${arg}
[Documentation] this is
... doc
[Tags] sanity
Pass
```
To:
```robotframework
*** Keywords ***
Keyword
[Documentation] this is
... doc
[Tags] sanity
[Arguments] ${arg}
Pass
[Teardown] Keyword
[Return] ${value}
```
Test case settings ``[Documentation]``, ``[Tags]``, ``[Template]``, ``[Timeout]``, ``[Setup]`` are put before
test case body and ``[Teardown]`` is moved to the end of test case.
Default order can be changed using following parameters:
- ``keyword_before = documentation,tags,timeout,arguments``
- ``keyword_after = teardown,return``
- ``test_before = documentation,tags,template,timeout,setup``
- ``test_after = teardown``
Not all settings names need to be passed to given parameter. Missing setting names are not ordered. Example::
robotidy --configure OrderSettings:keyword_before=:keyword_after=
It will order only test cases because all setting names for keywords are missing.
"""
KEYWORD_SETTINGS = {
"documentation": Token.DOCUMENTATION,
"tags": Token.TAGS,
"timeout": Token.TIMEOUT,
"arguments": Token.ARGUMENTS,
"return": Token.RETURN,
"teardown": Token.TEARDOWN,
}
TEST_SETTINGS = {
"documentation": Token.DOCUMENTATION,
"tags": Token.TAGS,
"timeout": Token.TIMEOUT,
"template": Token.TEMPLATE,
"setup": Token.SETUP,
"teardown": Token.TEARDOWN,
}
def __init__(
self,
keyword_before: str = "documentation,tags,timeout,arguments",
keyword_after: str = "teardown,return",
test_before: str = "documentation,tags,template,timeout,setup",
test_after: str = "teardown",
):
super().__init__()
self.keyword_before = self.get_order(keyword_before, "keyword_before", self.KEYWORD_SETTINGS)
self.keyword_after = self.get_order(keyword_after, "keyword_after", self.KEYWORD_SETTINGS)
self.test_before = self.get_order(test_before, "test_before", self.TEST_SETTINGS)
self.test_after = self.get_order(test_after, "test_after", self.TEST_SETTINGS)
self.all_keyword_settings = {*self.keyword_before, *self.keyword_after}
self.all_test_settings = {*self.test_before, *self.test_after}
self.assert_no_duplicates_in_orders()
def get_order(self, order, param_name, name_map):
if not order:
return []
parts = order.lower().split(",")
try:
return [name_map[part] for part in parts]
except KeyError:
raise InvalidSettingsOrderError(self.__class__.__name__, param_name, order, name_map)
def assert_no_duplicates_in_orders(self):
"""Checks if settings are not duplicated in after/before section and in the same section itself."""
orders = {
"keyword_before": set(self.keyword_before),
"keyword_after": set(self.keyword_after),
"test_before": set(self.test_before),
"test_after": set(self.test_after),
}
# check if there is no duplicate in single order, ie test_after=setup,setup
for name, order_set in orders.items():
if len(self.__dict__[name]) != len(order_set):
raise DuplicateInSettingsOrderError(self.__class__.__name__, name, self.__dict__[name])
# check if there is no duplicate in opposite orders, ie test_before=tags test_after=tags
shared_keyword = orders["keyword_before"].intersection(orders["keyword_after"])
shared_test = orders["test_before"].intersection(orders["test_after"])
if shared_keyword:
raise SettingInBothOrdersError(self.__class__.__name__, "keyword_before", "keyword_after", shared_keyword)
if shared_test:
raise SettingInBothOrdersError(self.__class__.__name__, "test_before", "test_after", shared_test)
@skip_section_if_disabled
def visit_Section(self, node): # noqa
return self.generic_visit(node)
@skip_if_disabled
def visit_Keyword(self, node): # noqa
return self.order_settings(node, self.all_keyword_settings, self.keyword_before, self.keyword_after)
@skip_if_disabled
def visit_TestCase(self, node): # noqa
return self.order_settings(node, self.all_test_settings, self.test_before, self.test_after)
def order_settings(self, node, setting_types, before, after):
if not node.body:
return node
settings = dict()
not_settings, trailing_after = [], []
after_seen = False
# when after_seen is set to True then all statements go to trailing_after and last non data
# will be appended after tokens defined in `after` set (like [Return])
comments, header_line = [], []
for child in node.body:
if isinstance(child, Comment):
if child.lineno == node.lineno: # comment in the same line as test/kw name
header_line.append(child)
else:
comments.append(child)
elif getattr(child, "type", "invalid") in setting_types:
after_seen = after_seen or child.type in after
settings[child.type] = (comments, child)
comments = []
elif after_seen:
trailing_after.extend(comments)
comments = []
trailing_after.append(child)
else:
not_settings.extend(comments)
comments = []
not_settings.append(child)
trailing_after.extend(comments)
# comments after last data statement are considered as comment outside body
trailing_non_data = []
while trailing_after and isinstance(trailing_after[-1], (EmptyLine, Comment)):
trailing_non_data.insert(0, trailing_after.pop())
not_settings += trailing_after
node.body = (
header_line
+ self.add_in_order(before, settings)
+ not_settings
+ self.add_in_order(after, settings)
+ trailing_non_data
)
return node
@staticmethod
def add_in_order(order, settings_in_node):
nodes = []
for token_type in order:
if token_type not in settings_in_node:
continue
comments, node = settings_in_node[token_type]
nodes.extend(comments)
nodes.append(node)
return nodes | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/vendored/robocorp_ls_core/libs/robotidy_lib/robotidy/transformers/OrderSettings.py | 0.813609 | 0.575528 | OrderSettings.py | pypi |
from robot.api.parsing import Token
try:
from robot.api.parsing import InlineIfHeader, ReturnStatement
except ImportError:
InlineIfHeader = None
ReturnStatement = None
from robotidy.disablers import skip_if_disabled, skip_section_if_disabled
from robotidy.skip import Skip
from robotidy.transformers import Transformer
from robotidy.utils import join_comments
class NormalizeSeparators(Transformer):
"""
Normalize separators and indents.
All separators (pipes included) are converted to fixed length of 4 spaces (configurable via global argument
``--spacecount``).
To not format documentation configure ``skip_documentation`` to ``True``.
"""
HANDLES_SKIP = frozenset(
{
"skip_documentation",
"skip_keyword_call",
"skip_keyword_call_pattern",
"skip_comments",
"skip_block_comments",
"skip_sections",
}
)
def __init__(self, flatten_lines: bool = False, align_new_line: bool = False, skip: Skip = None):
super().__init__(skip=skip)
self.indent = 0
self.flatten_lines = flatten_lines
self.is_inline = False
self.align_new_line = align_new_line
self._allowed_line_length = None # we can only retrieve it after all transformers are initialized
@property
def allowed_line_length(self) -> int:
"""Get line length from SplitTooLongLine transformer or global config."""
if self._allowed_line_length is None:
if "SplitTooLongLine" in self.transformers:
self._allowed_line_length = self.transformers["SplitTooLongLine"].line_length
else:
self._allowed_line_length = self.formatting_config.line_length
return self._allowed_line_length
def visit_File(self, node): # noqa
self.indent = 0
return self.generic_visit(node)
@skip_section_if_disabled
def visit_Section(self, node): # noqa
return self.generic_visit(node)
def indented_block(self, node):
self.visit_Statement(node.header)
self.indent += 1
node.body = [self.visit(item) for item in node.body]
self.indent -= 1
return node
def visit_TestCase(self, node): # noqa
return self.indented_block(node)
visit_Keyword = visit_While = visit_TestCase # noqa
def visit_For(self, node):
node = self.indented_block(node)
self.visit_Statement(node.end)
return node
def visit_Try(self, node):
node = self.indented_block(node)
if node.next:
self.visit(node.next)
if node.end:
self.visit_Statement(node.end)
return node
def visit_If(self, node):
if self.is_inline: # nested inline if is ignored
return node
self.is_inline = InlineIfHeader and isinstance(node.header, InlineIfHeader)
self.visit_Statement(node.header)
self.indent += 1
node.body = [self.visit(item) for item in node.body]
self.indent -= 1
if node.orelse:
self.visit(node.orelse)
if node.end:
self.visit_Statement(node.end)
self.is_inline = False
return node
@skip_if_disabled
def visit_Documentation(self, doc): # noqa
if self.skip.documentation or self.flatten_lines:
has_pipes = doc.tokens[0].value.startswith("|")
return self.handle_spaces(doc, has_pipes, only_indent=True)
return self.visit_Statement(doc)
def visit_KeywordCall(self, keyword): # noqa
if self.skip.keyword_call(keyword):
return keyword
return self.visit_Statement(keyword)
@skip_if_disabled
def visit_Comment(self, node): # noqa
if self.skip.comment(node):
return node
has_pipes = node.tokens[0].value.startswith("|")
return self.handle_spaces(node, has_pipes)
def is_keyword_inside_inline_if(self, node):
return self.is_inline and not isinstance(node, InlineIfHeader)
@skip_if_disabled
def visit_Statement(self, statement): # noqa
if statement is None:
return None
has_pipes = statement.tokens[0].value.startswith("|")
if has_pipes or not self.flatten_lines:
return self.handle_spaces(statement, has_pipes)
else:
return self.handle_spaces_and_flatten_lines(statement)
def handle_spaces_and_flatten_lines(self, statement):
"""Normalize separators and flatten multiline statements to one line."""
add_eol, prev_sep = False, False
add_indent = not self.is_keyword_inside_inline_if(statement)
new_tokens, comments = [], []
for token in statement.tokens:
if token.type == Token.SEPARATOR:
if prev_sep:
continue
prev_sep = True
if add_indent:
token.value = self.formatting_config.indent * self.indent
else:
token.value = self.formatting_config.separator
elif token.type == Token.EOL:
add_eol = True
continue
elif token.type == Token.CONTINUATION:
continue
elif token.type == Token.COMMENT:
comments.append(token)
continue
else:
prev_sep = False
new_tokens.append(token)
add_indent = False
if new_tokens and new_tokens[-1].type == Token.SEPARATOR:
new_tokens.pop()
if comments:
new_tokens.extend(join_comments(comments))
if add_eol:
new_tokens.append(Token(Token.EOL))
statement.tokens = new_tokens
self.generic_visit(statement)
return statement
def handle_spaces(self, statement, has_pipes, only_indent=False):
new_tokens = []
prev_token = None
first_col_width = 0
first_data_token = True
is_sep_after_first_data_token = False
align_continuation = self.align_new_line
for line in statement.lines:
prev_sep = False
line_length = 0
for index, token in enumerate(line):
if token.type == Token.SEPARATOR:
if prev_sep:
continue
prev_sep = True
if index == 0 and not self.is_keyword_inside_inline_if(statement):
token.value = self.formatting_config.indent * self.indent
elif not only_indent:
if prev_token and prev_token.type == Token.CONTINUATION:
if align_continuation:
token.value = first_col_width * " "
else:
token.value = self.formatting_config.continuation_indent
else:
token.value = self.formatting_config.separator
else:
prev_sep = False
if align_continuation:
if first_data_token:
first_col_width += max(len(token.value), 3) - 3 # remove ... token length
# Check if first line is not longer than allowed line length - we cant align over limit
align_continuation = align_continuation and first_col_width < self.allowed_line_length
first_data_token = False
elif not is_sep_after_first_data_token and token.type != Token.EOL:
is_sep_after_first_data_token = True
first_col_width += len(self.formatting_config.separator)
prev_token = token
if has_pipes and index == len(line) - 2:
token.value = token.value.rstrip()
line_length += len(token.value)
new_tokens.append(token)
statement.tokens = new_tokens
self.generic_visit(statement)
return statement | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/vendored/robocorp_ls_core/libs/robotidy_lib/robotidy/transformers/NormalizeSeparators.py | 0.783326 | 0.281328 | NormalizeSeparators.py | pypi |
import re
import string
from typing import Optional
from robot.api.parsing import Token
from robot.variables.search import VariableIterator
from robotidy.disablers import skip_if_disabled, skip_section_if_disabled
from robotidy.exceptions import InvalidParameterValueError
from robotidy.transformers import Transformer
from robotidy.transformers.run_keywords import get_run_keywords
from robotidy.utils import is_token_value_in_tokens, normalize_name, split_on_token_type, split_on_token_value
class RenameKeywords(Transformer):
"""
Enforce keyword naming.
Title Case is applied to keyword name and underscores are replaced by spaces.
You can keep underscores if you set remove_underscores to False:
```
robotidy --transform RenameKeywords -c RenameKeywords:remove_underscores=False .
```
It is also possible to configure `replace_pattern` parameter to find and replace regex pattern. Use `replace_to`
to set replacement value. This configuration (underscores are used instead of spaces):
```
robotidy --transform RenameKeywords -c RenameKeywords:replace_pattern=^(?i)rename\s?me$:replace_to=New_Shining_Name .
```
will transform following code:
```robotframework
*** Keywords ***
rename Me
Keyword Call
```
To:
```robotframework
*** Keywords ***
New Shining Name
Keyword Call
```
Use `ignore_library = True` parameter to control if the library name part (Library.Keyword) of keyword call
should be renamed.
"""
ENABLED = False
def __init__(
self,
replace_pattern: Optional[str] = None,
replace_to: Optional[str] = None,
remove_underscores: bool = True,
ignore_library: bool = True,
):
super().__init__()
self.ignore_library = ignore_library
self.remove_underscores = remove_underscores
self.replace_pattern = self.parse_pattern(replace_pattern)
self.replace_to = "" if replace_to is None else replace_to
self.run_keywords = get_run_keywords()
def parse_pattern(self, replace_pattern):
if replace_pattern is None:
return None
try:
return re.compile(replace_pattern)
except re.error as err:
raise InvalidParameterValueError(
self.__class__.__name__,
"replace_pattern",
replace_pattern,
f"It should be a valid regex expression. Regex error: '{err.msg}'",
)
def get_run_keyword(self, kw_name):
kw_norm = normalize_name(kw_name)
return self.run_keywords.get(kw_norm, None)
@skip_section_if_disabled
def visit_Section(self, node): # noqa
return self.generic_visit(node)
def rename_node(self, token, is_keyword_call):
if self.replace_pattern is not None:
new_value = self.rename_with_pattern(token.value, is_keyword_call=is_keyword_call)
else:
new_value = self.normalize_name(token.value, is_keyword_call=is_keyword_call)
if not new_value.strip(): # do not allow renames that removes keywords altogether
return
token.value = new_value
def normalize_name(self, value, is_keyword_call):
var_found = False
parts = []
new_name, remaining = "", ""
for prefix, match, remaining in VariableIterator(value, ignore_errors=True):
var_found = True
# rename strips whitespace, so we need to preserve it if needed
if not prefix.strip() and parts:
parts.extend([" ", match])
else:
leading_space = " " if prefix.startswith(" ") else ""
trailing_space = " " if prefix.endswith(" ") else ""
parts.extend([leading_space, self.rename_part(prefix, is_keyword_call), trailing_space, match])
if var_found:
if remaining.startswith(" "):
parts.append(" ")
parts.append(self.rename_part(remaining, is_keyword_call))
return "".join(parts).strip()
return self.rename_part(value, is_keyword_call)
def rename_part(self, part: str, is_keyword_call: bool):
if is_keyword_call and self.ignore_library:
lib_name, *kw_name = part.rsplit(".", maxsplit=1)
if not kw_name:
return self.remove_underscores_and_capitalize(part)
return f"{lib_name}.{self.remove_underscores_and_capitalize(kw_name[0])}"
return ".".join([self.remove_underscores_and_capitalize(name_part) for name_part in part.split(".")])
def remove_underscores_and_capitalize(self, value: str):
if self.remove_underscores:
value = re.sub("_+", " ", value) # replace one or more _ with one space
value = value.strip()
# capitalize first letter of every word, leave rest untouched
return "".join([a if a.isupper() else b for a, b in zip(value, string.capwords(value))])
def rename_with_pattern(self, value: str, is_keyword_call: bool):
lib_name = ""
if is_keyword_call and "." in value:
# rename only non lib part
found_lib = -1
for prefix, _, _ in VariableIterator(value):
found_lib = prefix.find(".")
break
if found_lib != -1:
lib_name = value[: found_lib + 1]
value = value[found_lib + 1 :]
else:
lib_name, value = value.split(".", maxsplit=1)
lib_name += "."
if lib_name and not self.ignore_library:
lib_name = self.remove_underscores_and_capitalize(lib_name)
return lib_name + self.remove_underscores_and_capitalize(
self.replace_pattern.sub(repl=self.replace_to, string=value)
)
@skip_if_disabled
def visit_KeywordName(self, node): # noqa
name_token = node.get_token(Token.KEYWORD_NAME)
if not name_token or not name_token.value:
return node
self.rename_node(name_token, is_keyword_call=False)
return node
@skip_if_disabled
def visit_KeywordCall(self, node): # noqa
name_token = node.get_token(Token.KEYWORD)
if not name_token or not name_token.value:
return node
# ignore assign, separators and comments
_, tokens = split_on_token_type(node.data_tokens, Token.KEYWORD)
self.parse_run_keyword(tokens)
return node
def parse_run_keyword(self, tokens):
if not tokens:
return
self.rename_node(tokens[0], is_keyword_call=True)
run_keyword = self.get_run_keyword(tokens[0].value)
if not run_keyword:
return
tokens = tokens[run_keyword.resolve :]
if run_keyword.branches:
if "ELSE IF" in run_keyword.branches:
while is_token_value_in_tokens("ELSE IF", tokens):
prefix, branch, tokens = split_on_token_value(tokens, "ELSE IF", 2)
self.parse_run_keyword(prefix)
if "ELSE" in run_keyword.branches and is_token_value_in_tokens("ELSE", tokens):
prefix, branch, tokens = split_on_token_value(tokens, "ELSE", 1)
self.parse_run_keyword(prefix)
self.parse_run_keyword(tokens)
return
elif run_keyword.split_on_and:
return self.split_on_and(tokens)
self.parse_run_keyword(tokens)
def split_on_and(self, tokens):
if not is_token_value_in_tokens("AND", tokens):
for token in tokens:
self.rename_node(token, is_keyword_call=True)
return
while is_token_value_in_tokens("AND", tokens):
prefix, branch, tokens = split_on_token_value(tokens, "AND", 1)
self.parse_run_keyword(prefix)
self.parse_run_keyword(tokens)
@skip_if_disabled
def visit_SuiteSetup(self, node): # noqa
if node.errors:
return node
self.parse_run_keyword(node.data_tokens[1:])
return node
visit_SuiteTeardown = visit_TestSetup = visit_TestTeardown = visit_SuiteSetup
@skip_if_disabled
def visit_Setup(self, node): # noqa
if node.errors:
return node
self.parse_run_keyword(node.data_tokens[1:])
return node
visit_Teardown = visit_Setup | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/vendored/robocorp_ls_core/libs/robotidy_lib/robotidy/transformers/RenameKeywords.py | 0.812384 | 0.643273 | RenameKeywords.py | pypi |
from robot.api.parsing import Comment, EmptyLine
try:
from robot.api.parsing import ReturnStatement
except ImportError:
ReturnStatement = None
from robotidy.disablers import skip_if_disabled, skip_section_if_disabled
from robotidy.transformers import Transformer
from robotidy.utils import (
after_last_dot,
create_statement_from_tokens,
normalize_name,
wrap_in_if_and_replace_statement,
)
class ReplaceReturns(Transformer):
"""
Replace return statements (such as [Return] setting or Return From Keyword keyword) with RETURN statement.
Following code:
```robotframework
*** Keywords ***
Keyword
Return From Keyword If $condition 2
Sub Keyword
[Return] 1
Keyword 2
Return From Keyword ${arg}
```
will be transformed to:
```robotframework
*** Keywords ***
Keyword
IF $condition
RETURN 2
END
Sub Keyword
RETURN 1
Keyword 2
RETURN ${arg}
```
"""
MIN_VERSION = 5
def __init__(self):
super().__init__()
self.return_statement = None
@skip_section_if_disabled
def visit_Section(self, node): # noqa
return self.generic_visit(node)
def visit_Keyword(self, node): # noqa
self.return_statement = None
node = self.generic_visit(node)
if self.return_statement:
skip_lines = []
indent = self.return_statement.tokens[0]
while node.body and isinstance(node.body[-1], (EmptyLine, Comment)):
skip_lines.append(node.body.pop())
return_stmt = create_statement_from_tokens(
statement=ReturnStatement, tokens=self.return_statement.tokens[2:], indent=indent
)
node.body.append(return_stmt)
node.body.extend(skip_lines)
return node
@skip_if_disabled
def visit_KeywordCall(self, node): # noqa
if not node.keyword or node.errors:
return node
normalized_name = after_last_dot(normalize_name(node.keyword))
if normalized_name == "returnfromkeyword":
return create_statement_from_tokens(
statement=ReturnStatement, tokens=node.tokens[2:], indent=node.tokens[0]
)
elif normalized_name == "returnfromkeywordif":
return wrap_in_if_and_replace_statement(node, ReturnStatement, self.formatting_config.separator)
return node
@skip_if_disabled
def visit_Return(self, node): # noqa
self.return_statement = node
@skip_if_disabled
def visit_Error(self, node): # noqa
"""Remove duplicate [Return]"""
for error in node.errors:
if "Setting 'Return' is allowed only once" in error:
return None
return node | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/vendored/robocorp_ls_core/libs/robotidy_lib/robotidy/transformers/ReplaceReturns.py | 0.805632 | 0.683439 | ReplaceReturns.py | pypi |
from robot.api.parsing import Token
from robotidy.disablers import skip_if_disabled, skip_section_if_disabled
from robotidy.skip import Skip
from robotidy.transformers import Transformer
class ReplaceEmptyValues(Transformer):
"""
Replace empty values with ``${EMPTY}`` variable.
Empty variables, lists or elements in the list can be defined in the following way:
```robotframework
*** Variables ***
${EMPTY_VALUE}
@{EMPTY_LIST}
&{EMPTY_DICT}
@{LIST_WITH_EMPTY}
... value
...
... value3
```
To be more explicit, this transformer replace such values with ``${EMPTY}`` variables:
```robotframework
*** Variables ***
${EMPTY_VALUE} ${EMPTY}
@{EMPTY_LIST} @{EMPTY}
&{EMPTY_DICT} &{EMPTY}
@{LIST_WITH_EMPTY}
... value
... ${EMPTY}
... value3
```
"""
HANDLES_SKIP = frozenset({"skip_sections"})
def __init__(self, skip: Skip = None):
super().__init__(skip)
@skip_section_if_disabled
def visit_VariableSection(self, node): # noqa
return self.generic_visit(node)
@skip_if_disabled
def visit_Variable(self, node): # noqa
if node.errors or not node.name:
return node
args = node.get_tokens(Token.ARGUMENT)
sep = Token(Token.SEPARATOR, self.formatting_config.separator)
new_line_sep = Token(Token.SEPARATOR, self.formatting_config.continuation_indent)
if args:
tokens = []
prev_token = None
for token in node.tokens:
if token.type == Token.ARGUMENT and not token.value:
if not prev_token or prev_token.type != Token.SEPARATOR:
tokens.append(new_line_sep)
tokens.append(Token(Token.ARGUMENT, "${EMPTY}"))
else:
if token.type == Token.EOL:
token.value = token.value.lstrip(" ")
tokens.append(token)
prev_token = token
else:
tokens = [node.tokens[0], sep, Token(Token.ARGUMENT, node.name[0] + "{EMPTY}"), *node.tokens[1:]]
node.tokens = tokens
return node | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/vendored/robocorp_ls_core/libs/robotidy_lib/robotidy/transformers/ReplaceEmptyValues.py | 0.757884 | 0.7666 | ReplaceEmptyValues.py | pypi |
from robot.api.parsing import Comment, EmptyLine, End, Token
try:
from robot.api.parsing import InlineIfHeader
except ImportError:
InlineIfHeader = None
from robotidy.disablers import skip_if_disabled, skip_section_if_disabled
from robotidy.skip import Skip
from robotidy.transformers import Transformer
class AddMissingEnd(Transformer):
"""
Add missing END token to FOR loops and IF statements.
Following code:
```robotframework
*** Keywords ***
Keyword
FOR ${x} IN foo bar
Log ${x}
```
will be transformed to:
```robotframework
*** Keywords ***
Keyword
FOR ${x} IN foo bar
Log ${x}
END
```
"""
HANDLES_SKIP = frozenset({"skip_sections"})
def __init__(self, skip: Skip = None):
super().__init__(skip)
def fix_block(self, node, expected_type):
self.generic_visit(node)
self.fix_header_name(node, expected_type)
outside = []
if not node.end: # fix statement position only if END was missing
node.body, outside = self.collect_inside_statements(node)
self.fix_end(node)
return (node, *outside)
@skip_section_if_disabled
def visit_Section(self, node): # noqa
return self.generic_visit(node)
@skip_if_disabled
def visit_For(self, node): # noqa
return self.fix_block(node, Token.FOR)
@skip_if_disabled
def visit_While(self, node): # noqa
return self.fix_block(node, Token.WHILE)
@skip_if_disabled
def visit_Try(self, node): # noqa
self.generic_visit(node)
if node.type != Token.TRY:
return node
self.fix_header_name(node, node.type)
outside = []
if not node.end: # fix statement position only if END was missing
node.body, outside = self.collect_inside_statements(node)
try_branch = self.get_last_except(node)
if try_branch:
try_branch.body, outside_try = self.collect_inside_statements(try_branch)
outside += outside_try
self.fix_end(node)
return (node, *outside)
@skip_if_disabled
def visit_If(self, node): # noqa
self.generic_visit(node)
if node.type != Token.IF:
return node
if InlineIfHeader and isinstance(node.header, InlineIfHeader):
self.fix_header_name(node, "IF")
return node
self.fix_header_name(node, node.type)
outside = []
if not node.end:
node.body, outside = self.collect_inside_statements(node)
or_else = self.get_last_or_else(node)
if or_else:
or_else.body, outside_or_else = self.collect_inside_statements(or_else)
outside += outside_or_else
self.fix_end(node)
return (node, *outside)
def fix_end(self, node):
"""Fix END (missing END, End -> END, END position should be the same as FOR etc)."""
if node.header.tokens[0].type == Token.SEPARATOR:
indent = node.header.tokens[0]
else:
indent = Token(Token.SEPARATOR, self.formatting_config.separator)
node.end = End([indent, Token(Token.END, Token.END), Token(Token.EOL)])
@staticmethod
def fix_header_name(node, header_name):
node.header.data_tokens[0].value = header_name
def collect_inside_statements(self, node):
"""Split statements from node for those that belong to it and outside nodes.
In this example with missing END:
FOR ${i} IN RANGE 10
Keyword
Other Keyword
RF will store 'Other Keyword' inside FOR block even if it should be outside.
"""
new_body = [[], []]
is_outside = False
starting_col = self.get_column(node)
for child in node.body:
if not isinstance(child, EmptyLine) and self.get_column(child) <= starting_col:
is_outside = True
new_body[is_outside].append(child)
while new_body[0] and isinstance(new_body[0][-1], EmptyLine):
new_body[1].insert(0, new_body[0].pop())
return new_body
@staticmethod
def get_column(node):
if hasattr(node, "header"):
return node.header.data_tokens[0].col_offset
if isinstance(node, Comment):
token = node.get_token(Token.COMMENT)
return token.col_offset
if not node.data_tokens:
return node.col_offset
return node.data_tokens[0].col_offset
@staticmethod
def get_last_or_else(node):
if not node.orelse:
return None
or_else = node.orelse
while or_else.orelse:
or_else = or_else.orelse
return or_else
@staticmethod
def get_last_except(node):
if not node.next:
return None
try_branch = node.next
while try_branch.next:
try_branch = try_branch.next
return try_branch | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/vendored/robocorp_ls_core/libs/robotidy_lib/robotidy/transformers/AddMissingEnd.py | 0.729134 | 0.715039 | AddMissingEnd.py | pypi |
import re
from pathlib import Path
from typing import Optional
from jinja2 import Template
from jinja2.exceptions import TemplateError
from robot.api.parsing import Documentation, ModelVisitor, Token
from robotidy.exceptions import InvalidParameterValueError
from robotidy.transformers import Transformer
GOOGLE_TEMPLATE = """ Short description.
{% if keyword.arguments|length > 0 %}
{{ formatting.cont_indent }}Args:
{%- for arg in keyword.arguments %}
{{ formatting.cont_indent }}{{ formatting.cont_indent }}{{ arg.name }}: <description>{% endfor %}
{% endif -%}
{% if keyword.returns|length > 0 %}
{{ formatting.cont_indent }}Returns:
{%- for value in keyword.returns %}
{{ formatting.cont_indent }}{{ formatting.cont_indent }}{{ value }}: <description>{% endfor %}
{% endif -%}
"""
class Argument:
def __init__(self, arg):
if "=" in arg:
self.name, self.default = arg.split("=", 1)
else:
self.name = arg
self.default = None
self.full_name = arg
def __str__(self):
return self.full_name
class KeywordData:
def __init__(self, name, arguments, returns):
self.name = name
self.arguments = arguments
self.returns = returns
class FormattingData:
def __init__(self, cont_indent, separator):
self.cont_indent = cont_indent
self.separator = separator
class ArgumentsAndReturnsVisitor(ModelVisitor):
def __init__(self):
self.arguments = []
self.returns = []
self.doc_exists = False
def visit_Keyword(self, node): # noqa
self.arguments = []
self.returns = []
# embedded variables
for variable in node.header.data_tokens[0].tokenize_variables():
if variable.type == Token.VARIABLE:
self.arguments.append(Argument(variable.value))
self.doc_exists = False
self.generic_visit(node)
def visit_Documentation(self, node): # noqa
self.doc_exists = True
def visit_Arguments(self, node): # noqa
if node.errors:
return
self.arguments = [Argument(arg) for arg in node.values]
def visit_ReturnStatement(self, node): # noqa
if node.errors:
return
self.returns = list(node.values)
visit_Return = visit_ReturnStatement
class GenerateDocumentation(Transformer):
"""
Generate keyword documentation with the documentation template.
By default, GenerateDocumentation uses Google documentation template.
Following keyword:
```robotframework
*** Keywords ***
Keyword
[Arguments] ${arg}
${var} ${var2} Step
RETURN ${var} ${var2}
```
will produce following documentation:
```robotframework
*** Keywords ***
Keyword
[Documentation]
...
... Arguments:
... ${arg}:
...
... Returns:
... ${var}
... ${var2}
[Arguments] ${arg}
${var} ${var2} Step
RETURN ${var} ${var2}
```
It is possible to create own template and insert dynamic text like keyword name, argument default values
or static text (like ``[Documentation] Documentation stub``). See our docs for more details.
Generated documentation will be affected by ``NormalizeSeparators`` transformer that's why it is best to
skip formatting documentation by this transformer:
```
> robotidy --configure GenerateDocumentation:enabled=True --configure NormalizeSeparators:skip_documentation=True src
```
"""
ENABLED = False
WHITESPACE_PATTERN = re.compile(r"(\s{2,}|\t)", re.UNICODE)
def __init__(self, overwrite: bool = False, doc_template: str = "google", template_directory: Optional[str] = None):
self.overwrite = overwrite
self.doc_template = self.load_template(doc_template, template_directory)
self.args_returns_finder = ArgumentsAndReturnsVisitor()
super().__init__()
def load_template(self, template: str, template_directory: Optional[str] = None) -> str:
try:
return Template(self.get_template(template, template_directory))
except TemplateError as err:
raise InvalidParameterValueError(
self.__class__.__name__,
"doc_template",
"template content",
f"Failed to load the template: {err}",
)
def get_template(self, template: str, template_directory: Optional[str] = None) -> str:
if template == "google":
return GOOGLE_TEMPLATE
template_path = Path(template)
if not template_path.is_file():
if not template_path.is_absolute() and template_directory is not None:
template_path = Path(template_directory) / template_path
if not template_path.is_file():
raise InvalidParameterValueError(
self.__class__.__name__,
"doc_template",
template,
"The template path does not exist or cannot be found.",
)
with open(template_path) as fp:
return fp.read()
def visit_Keyword(self, node): # noqa
self.args_returns_finder.visit(node)
if not self.overwrite and self.args_returns_finder.doc_exists:
return node
formatting = FormattingData(self.formatting_config.continuation_indent, self.formatting_config.separator)
kw_data = KeywordData(node.name, self.args_returns_finder.arguments, self.args_returns_finder.returns)
generated = self.doc_template.render(keyword=kw_data, formatting=formatting)
doc_node = self.create_documentation_from_string(generated)
if self.overwrite:
self.generic_visit(node) # remove existing [Documentation]
node.body.insert(0, doc_node)
return node
def visit_Documentation(self, node): # noqa
return None
def create_documentation_from_string(self, doc_string):
new_line = [Token(Token.EOL), Token(Token.SEPARATOR, self.formatting_config.indent), Token(Token.CONTINUATION)]
tokens = [
Token(Token.SEPARATOR, self.formatting_config.indent),
Token(Token.DOCUMENTATION, "[Documentation]"),
]
for index, line in enumerate(doc_string.splitlines()):
if index != 0:
tokens.extend(new_line)
for value in self.WHITESPACE_PATTERN.split(line):
if not value:
continue
if value.strip():
tokens.append(Token(Token.ARGUMENT, value))
else:
tokens.append(Token(Token.SEPARATOR, value))
tokens.append(Token(Token.EOL))
return Documentation(tokens) | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/vendored/robocorp_ls_core/libs/robotidy_lib/robotidy/transformers/GenerateDocumentation.py | 0.77768 | 0.37339 | GenerateDocumentation.py | pypi |
try:
from robot.api.parsing import InlineIfHeader, TryHeader
except ImportError:
InlineIfHeader, TryHeader = None, None
from robotidy.disablers import skip_if_disabled
from robotidy.skip import Skip
from robotidy.transformers.aligners_core import AlignKeywordsTestsSection
from robotidy.utils import is_suite_templated
class AlignTestCasesSection(AlignKeywordsTestsSection):
"""
Align ``*** Test Cases ***`` section to columns.
Align non-templated tests and settings into columns with predefined width. There are two possible alignment types
(configurable via ``alignment_type``):
- ``fixed`` (default): pad the tokens to the fixed width of the column
- ``auto``: pad the tokens to the width of the longest token in the column
Example output:
```robotframework
*** Test Cases ***
Test
${var} Create Resource ${argument} value
Assert value
Multi
... line
... args
```
Column widths can be configured via ``widths`` (default ``24``). It accepts comma separated list of column widths.
Tokens that are longer than width of the column go into "overflow" state. It's possible to decide in this
situation (by configuring ``handle_too_long``):
- ``overflow`` (default): align token to the next column
- ``compact_overflow``: try to fit next token between current (overflowed) token and next column
- ``ignore_rest``: ignore remaining tokens in the line
- ``ignore_line``: ignore whole line
It is possible to skip formatting on various types of the syntax (documentation, keyword calls with specific names
or settings).
"""
def __init__(
self,
widths: str = "",
alignment_type: str = "fixed",
handle_too_long: str = "overflow",
compact_overflow_limit: int = 2,
skip_documentation: str = "True", # noqa - override skip_documentation from Skip
skip: Skip = None,
):
super().__init__(widths, alignment_type, handle_too_long, compact_overflow_limit, skip)
def visit_File(self, node): # noqa
if is_suite_templated(node):
return node
return self.generic_visit(node)
@skip_if_disabled
def visit_TestCase(self, node): # noqa
self.create_auto_widths_for_context(node)
self.generic_visit(node)
self.remove_auto_widths_for_context()
return node
def visit_Keyword(self, node): # noqa
return node | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/vendored/robocorp_ls_core/libs/robotidy_lib/robotidy/transformers/AlignTestCasesSection.py | 0.856437 | 0.784236 | AlignTestCasesSection.py | pypi |
import re
import string
from typing import Optional
from robot.api.parsing import Token
from robotidy.disablers import skip_if_disabled, skip_section_if_disabled
from robotidy.exceptions import InvalidParameterValueError
from robotidy.transformers import Transformer
def cap_string_until_succeed(word: str):
"""
Yield characters from the word and capitalize character until we are able to make char uppercase.
"""
capitalize = True
for char in word:
if capitalize:
char = char.upper()
if char.isupper():
capitalize = False
yield char
def cap_word(word: str):
"""
Capitalize the word. The word can start with ( or contain ':
word -> Word
(word -> (Word
word's -> Word's
"""
if not word or any(c.isupper() for c in word): # ignore JIRA or sOme
return word
new_word = word.capitalize()
if new_word != word:
return new_word
return "".join(cap_string_until_succeed(word))
class RenameTestCases(Transformer):
r"""
Enforce test case naming.
Capitalize first letter of test case name, remove trailing dot and strip leading/trailing whitespace. If
capitalize_each_word is true, will capitalize each word in test case name.
It is also possible to configure `replace_pattern` parameter to find and replace regex pattern. Use `replace_to`
to set replacement value. This configuration:
```
robotidy --transform RenameTestCases -c RenameTestCases:replace_pattern=[A-Z]{3,}-\d{2,}:replace_to=foo
```
will transform following code:
```robotframework
*** Test Cases ***
test ABC-123
No Operation
```
To:
```robotframework
*** Test Cases ***
Test foo
No Operation
```
```
robotidy --transform RenameTestCases -c RenameTestCases:capitalize_each_word=True
```
will transform following code:
```robotframework
*** Test Cases ***
compare XML with json
No Operation
```
To:
```robotframework
*** Test Cases ***
Compare XML With Json
No Operation
```
"""
ENABLED = False
def __init__(
self,
replace_pattern: Optional[str] = None,
replace_to: Optional[str] = None,
capitalize_each_word: bool = False,
):
super().__init__()
try:
self.replace_pattern = re.compile(replace_pattern) if replace_pattern is not None else None
except re.error as err:
raise InvalidParameterValueError(
self.__class__.__name__,
"replace_pattern",
replace_pattern,
f"It should be a valid regex expression. Regex error: '{err.msg}'",
)
self.replace_to = "" if replace_to is None else replace_to
self.capitalize_each_word = capitalize_each_word
@skip_section_if_disabled
def visit_TestCaseSection(self, node): # noqa
return self.generic_visit(node)
@skip_if_disabled
def visit_TestCaseName(self, node): # noqa
token = node.get_token(Token.TESTCASE_NAME)
if token.value:
if self.capitalize_each_word:
value = token.value.strip()
token.value = " ".join(cap_word(word) for word in value.split(" "))
else:
token.value = token.value[0].upper() + token.value[1:]
if self.replace_pattern is not None:
token.value = self.replace_pattern.sub(repl=self.replace_to, string=token.value)
if token.value.endswith("."):
token.value = token.value[:-1]
token.value = token.value.strip()
return node | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/vendored/robocorp_ls_core/libs/robotidy_lib/robotidy/transformers/RenameTestCases.py | 0.894502 | 0.767102 | RenameTestCases.py | pypi |
import ast
from robot.api.parsing import Token
from robotidy.disablers import skip_section_if_disabled
from robotidy.exceptions import InvalidParameterValueError
from robotidy.transformers import Transformer
# TODO: preserve comments?
class RemoveEmptySettings(Transformer):
"""
Remove empty settings.
You can configure which settings are affected by parameter ``work_mode``. Possible values:
- overwrite_ok (default): does not remove settings that are overwriting suite settings (Test Setup,
Test Teardown, Test Template, Test Timeout or Default Tags)
- always : works on every settings
Empty settings that are overwriting suite settings will be converted to be more explicit
(given that there is related suite settings present):
```robotframework
*** Keywords ***
Keyword
No timeout
[Documentation] Empty timeout means no timeout even when Test Timeout has been used.
[Timeout]
```
To:
```robotframework
*** Keywords ***
No timeout
[Documentation] Disabling timeout with NONE works too and is more explicit.
[Timeout] NONE
```
You can disable that behavior by changing ``more_explicit`` parameter value to ``False``.
"""
def __init__(self, work_mode: str = "overwrite_ok", more_explicit: bool = True):
super().__init__()
if work_mode not in ("overwrite_ok", "always"):
raise InvalidParameterValueError(
self.__class__.__name__, "work_mode", work_mode, "Possible values:\n overwrite_ok\n always"
)
self.work_mode = work_mode
self.more_explicit = more_explicit
self.overwritten_settings = set()
self.child_types = {
Token.SETUP,
Token.TEARDOWN,
Token.TIMEOUT,
Token.TEMPLATE,
Token.TAGS,
}
@skip_section_if_disabled
def visit_Section(self, node): # noqa
return self.generic_visit(node)
def visit_Statement(self, node): # noqa
# when not setting type or setting type but not empty
if node.type not in Token.SETTING_TOKENS or len(node.data_tokens) != 1:
return node
if self.disablers.is_node_disabled(node):
return node
# when empty and not overwriting anything - remove
if (
node.type not in self.child_types
or self.work_mode == "always"
or node.type not in self.overwritten_settings
):
return None
if self.more_explicit:
indent = node.tokens[0].value if node.tokens[0].type == Token.SEPARATOR else ""
setting_token = node.data_tokens[0]
node.tokens = [
Token(Token.SEPARATOR, indent),
setting_token,
Token(Token.SEPARATOR, self.formatting_config.separator),
Token(Token.ARGUMENT, "NONE"),
Token(Token.EOL, "\n"),
]
return node
def visit_File(self, node): # noqa
if self.work_mode == "overwrite_ok":
self.overwritten_settings = self.find_overwritten_settings(node)
self.generic_visit(node)
self.overwritten_settings = set()
@staticmethod
def find_overwritten_settings(node):
auto_detector = FindSuiteSettings()
auto_detector.visit(node)
return auto_detector.suite_settings
class FindSuiteSettings(ast.NodeVisitor):
def __init__(self):
self.suite_settings = set()
def check_setting(self, node, overwritten_type):
if len(node.data_tokens) != 1:
self.suite_settings.add(overwritten_type)
def visit_TestSetup(self, node): # noqa
self.check_setting(node, Token.SETUP)
def visit_TestTeardown(self, node): # noqa
self.check_setting(node, Token.TEARDOWN)
def visit_TestTemplate(self, node): # noqa
self.check_setting(node, Token.TEMPLATE)
def visit_TestTimeout(self, node): # noqa
self.check_setting(node, Token.TIMEOUT)
def visit_DefaultTags(self, node): # noqa
self.check_setting(node, Token.TAGS) | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/vendored/robocorp_ls_core/libs/robotidy_lib/robotidy/transformers/RemoveEmptySettings.py | 0.416322 | 0.653887 | RemoveEmptySettings.py | pypi |
try:
from robot.api.parsing import InlineIfHeader, TryHeader
except ImportError:
InlineIfHeader, TryHeader = None, None
from robotidy.disablers import skip_if_disabled
from robotidy.skip import Skip
from robotidy.transformers.aligners_core import AlignKeywordsTestsSection
class AlignKeywordsSection(AlignKeywordsTestsSection):
"""
Align ``*** Keywords ***`` section to columns.
Align keyword calls and settings into columns with predefined width. There are two possible alignment types
(configurable via ``alignment_type``):
- ``fixed`` (default): pad the tokens to the fixed width of the column
- ``auto``: pad the tokens to the width of the longest token in the column
Example output:
```robotframework
*** Keywords ***
Keyword
${var} Create Resource ${argument} value
Assert value
Multi
... line
... args
```
Column widths can be configured via ``widths`` (default ``24``). It accepts comma separated list of column widths.
Tokens that are longer than width of the column go into "overflow" state. It's possible to decide in this
situation (by configuring ``handle_too_long``):
- ``overflow`` (default): align token to the next column
- ``compact_overflow``: try to fit next token between current (overflowed) token and the next column
- ``ignore_rest``: ignore remaining tokens in the line
- ``ignore_line``: ignore whole line
It is possible to skip formatting on various types of the syntax (documentation, keyword calls with specific names
or settings).
"""
def __init__(
self,
widths: str = "",
alignment_type: str = "fixed",
handle_too_long: str = "overflow",
compact_overflow_limit: int = 2,
skip_documentation: str = "True", # noqa - override skip_documentation from Skip
skip: Skip = None,
):
super().__init__(widths, alignment_type, handle_too_long, compact_overflow_limit, skip)
@skip_if_disabled
def visit_Keyword(self, node): # noqa
self.create_auto_widths_for_context(node)
self.generic_visit(node)
self.remove_auto_widths_for_context()
return node
def visit_TestCase(self, node): # noqa
return node | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/vendored/robocorp_ls_core/libs/robotidy_lib/robotidy/transformers/AlignKeywordsSection.py | 0.863248 | 0.779951 | AlignKeywordsSection.py | pypi |
from robot.api.parsing import ElseHeader, ElseIfHeader, End, If, IfHeader, KeywordCall, Token
from robotidy.disablers import skip_if_disabled, skip_section_if_disabled
from robotidy.transformers import Transformer
from robotidy.utils import after_last_dot, is_var, normalize_name
def insert_separators(indent, tokens, separator):
yield Token(Token.SEPARATOR, indent)
for token in tokens[:-1]:
yield token
yield Token(Token.SEPARATOR, separator)
yield tokens[-1]
yield Token(Token.EOL)
class ReplaceRunKeywordIf(Transformer):
"""
Replace ``Run Keyword If`` keyword calls with IF expressions.
Following code:
```robotframework
*** Keywords ***
Keyword
Run Keyword If ${condition}
... Keyword ${arg}
... ELSE IF ${condition2} Keyword2
... ELSE Keyword3
```
Will be transformed to:
```robotframework
*** Keywords ***
Keyword
IF ${condition}
Keyword ${arg}
ELSE IF ${condition2}
Keyword2
ELSE
Keyword3
END
```
Any return value will be applied to every ``ELSE``/``ELSE IF`` branch:
```robotframework
*** Keywords ***
Keyword
${var} Run Keyword If ${condition} Keyword ELSE Keyword2
```
Output:
```robotframework
*** Keywords ***
Keyword
IF ${condition}
${var} Keyword
ELSE
${var} Keyword2
END
```
Run Keywords inside Run Keyword If will be split into separate keywords:
```robotframework
*** Keywords ***
Keyword
Run Keyword If ${condition} Run Keywords Keyword ${arg} AND Keyword2
```
Output:
```robotframework
*** Keywords ***
Keyword
IF ${condition}
Keyword ${arg}
Keyword2
END
```
"""
@skip_section_if_disabled
def visit_Section(self, node): # noqa
return self.generic_visit(node)
@skip_if_disabled
def visit_KeywordCall(self, node): # noqa
if not node.keyword:
return node
if after_last_dot(normalize_name(node.keyword)) == "runkeywordif":
return self.create_branched(node)
return node
def create_branched(self, node):
separator = node.tokens[0]
assign = node.get_tokens(Token.ASSIGN)
raw_args = node.get_tokens(Token.ARGUMENT)
if len(raw_args) < 2:
return node
end = End([separator, Token(Token.END), Token(Token.EOL)])
prev_if = None
for branch in reversed(list(self.split_args_on_delimiters(raw_args, ("ELSE", "ELSE IF"), assign=assign))):
if branch[0].value == "ELSE":
if len(branch) < 2:
return node
args = branch[1:]
if self.check_for_useless_set_variable(args, assign):
continue
header = ElseHeader([separator, Token(Token.ELSE), Token(Token.EOL)])
elif branch[0].value == "ELSE IF":
if len(branch) < 3:
return node
header = ElseIfHeader(
[
separator,
Token(Token.ELSE_IF),
Token(Token.SEPARATOR, self.formatting_config.separator),
branch[1],
Token(Token.EOL),
]
)
args = branch[2:]
else:
if len(branch) < 2:
return node
header = IfHeader(
[
separator,
Token(Token.IF),
Token(Token.SEPARATOR, self.formatting_config.separator),
branch[0],
Token(Token.EOL),
]
)
args = branch[1:]
keywords = self.create_keywords(args, assign, separator.value + self.formatting_config.indent)
if_block = If(header=header, body=keywords, orelse=prev_if)
prev_if = if_block
prev_if.end = end
return prev_if
def create_keywords(self, arg_tokens, assign, indent):
keyword_name = normalize_name(arg_tokens[0].value)
if keyword_name == "runkeywords":
return [
self.args_to_keyword(keyword[1:], assign, indent)
for keyword in self.split_args_on_delimiters(arg_tokens, ("AND",))
]
elif is_var(keyword_name):
keyword_token = Token(Token.KEYWORD_NAME, "Run Keyword")
arg_tokens = [keyword_token] + arg_tokens
return [self.args_to_keyword(arg_tokens, assign, indent)]
def args_to_keyword(self, arg_tokens, assign, indent):
separated_tokens = list(
insert_separators(
indent,
[*assign, Token(Token.KEYWORD, arg_tokens[0].value), *arg_tokens[1:]],
self.formatting_config.separator,
)
)
return KeywordCall.from_tokens(separated_tokens)
@staticmethod
def split_args_on_delimiters(args, delimiters, assign=None):
split_points = [index for index, arg in enumerate(args) if arg.value in delimiters]
prev_index = 0
for split_point in split_points:
yield args[prev_index:split_point]
prev_index = split_point
yield args[prev_index : len(args)]
if assign and "ELSE" in delimiters and not any(arg.value == "ELSE" for arg in args):
values = [Token(Token.ARGUMENT, "${None}")] * len(assign)
yield [Token(Token.ELSE), Token(Token.ARGUMENT, "Set Variable"), *values]
@staticmethod
def check_for_useless_set_variable(tokens, assign):
if not assign or normalize_name(tokens[0].value) != "setvariable" or len(tokens[1:]) != len(assign):
return False
for var, var_assign in zip(tokens[1:], assign):
if normalize_name(var.value) != normalize_name(var_assign.value):
return False
return True | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/vendored/robocorp_ls_core/libs/robotidy_lib/robotidy/transformers/ReplaceRunKeywordIf.py | 0.779196 | 0.623835 | ReplaceRunKeywordIf.py | pypi |
from __future__ import annotations
from datetime import date, datetime, time, timedelta, timezone, tzinfo
from functools import lru_cache
import re
from typing import Any
from ._types import ParseFloat
# E.g.
# - 00:32:00.999999
# - 00:32:00
_TIME_RE_STR = r"([01][0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9])(?:\.([0-9]{1,6})[0-9]*)?"
RE_NUMBER = re.compile(
r"""
0
(?:
x[0-9A-Fa-f](?:_?[0-9A-Fa-f])* # hex
|
b[01](?:_?[01])* # bin
|
o[0-7](?:_?[0-7])* # oct
)
|
[+-]?(?:0|[1-9](?:_?[0-9])*) # dec, integer part
(?P<floatpart>
(?:\.[0-9](?:_?[0-9])*)? # optional fractional part
(?:[eE][+-]?[0-9](?:_?[0-9])*)? # optional exponent part
)
""",
flags=re.VERBOSE,
)
RE_LOCALTIME = re.compile(_TIME_RE_STR)
RE_DATETIME = re.compile(
rf"""
([0-9]{{4}})-(0[1-9]|1[0-2])-(0[1-9]|[12][0-9]|3[01]) # date, e.g. 1988-10-27
(?:
[Tt ]
{_TIME_RE_STR}
(?:([Zz])|([+-])([01][0-9]|2[0-3]):([0-5][0-9]))? # optional time offset
)?
""",
flags=re.VERBOSE,
)
def match_to_datetime(match: re.Match) -> datetime | date:
"""Convert a `RE_DATETIME` match to `datetime.datetime` or `datetime.date`.
Raises ValueError if the match does not correspond to a valid date
or datetime.
"""
(
year_str,
month_str,
day_str,
hour_str,
minute_str,
sec_str,
micros_str,
zulu_time,
offset_sign_str,
offset_hour_str,
offset_minute_str,
) = match.groups()
year, month, day = int(year_str), int(month_str), int(day_str)
if hour_str is None:
return date(year, month, day)
hour, minute, sec = int(hour_str), int(minute_str), int(sec_str)
micros = int(micros_str.ljust(6, "0")) if micros_str else 0
if offset_sign_str:
tz: tzinfo | None = cached_tz(
offset_hour_str, offset_minute_str, offset_sign_str
)
elif zulu_time:
tz = timezone.utc
else: # local date-time
tz = None
return datetime(year, month, day, hour, minute, sec, micros, tzinfo=tz)
@lru_cache(maxsize=None)
def cached_tz(hour_str: str, minute_str: str, sign_str: str) -> timezone:
sign = 1 if sign_str == "+" else -1
return timezone(
timedelta(
hours=sign * int(hour_str),
minutes=sign * int(minute_str),
)
)
def match_to_localtime(match: re.Match) -> time:
hour_str, minute_str, sec_str, micros_str = match.groups()
micros = int(micros_str.ljust(6, "0")) if micros_str else 0
return time(int(hour_str), int(minute_str), int(sec_str), micros)
def match_to_number(match: re.Match, parse_float: ParseFloat) -> Any:
if match.group("floatpart"):
return parse_float(match.group())
return int(match.group(), 0) | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/vendored/robocorp_ls_core/libs/robotidy_lib/tomli/_re.py | 0.892463 | 0.247589 | _re.py | pypi |
import datetime
import json
def _decode_oid(decoder, oid):
return decoder.memo[oid]
def _decode_float(decoder, msg):
return float(msg)
def _decode_int(decoder, msg):
return int(msg)
def _decode_str(decoder, msg):
return msg
def _decode(message_definition, level_diff=0):
names = []
name_to_decode = {}
for s in message_definition.split(","):
s = s.strip()
i = s.find(":")
decode = "oid"
if i != -1:
s, decode = s.split(":", 1)
names.append(s)
if decode == "oid":
name_to_decode[s] = _decode_oid
elif decode == "int":
name_to_decode[s] = _decode_int
elif decode == "float":
name_to_decode[s] = _decode_float
elif decode == "str":
name_to_decode[s] = _decode_str
else:
raise RuntimeError(f"Unexpected: {decode}")
def dec_impl(decoder, message):
decoder.level += level_diff
splitted = message.split("|", len(names) - 1)
ret = {}
for i, s in enumerate(splitted):
name = names[i]
try:
ret[name] = name_to_decode[name](decoder, s)
except:
ret[name] = None
return ret
return dec_impl
def decode_time(decoder, time):
decoder.initial_time = datetime.datetime.fromisoformat(time)
return {"initial_time": time}
def decode_memo(decoder, message):
memo_id, memo_value = message.split(":", 1)
memo_value = json.loads(memo_value)
decoder.memo[memo_id] = memo_value
return None
_MESSAGE_TYPE_INFO = {
"V": lambda _decoder, message: {"version": message},
"I": lambda _decoder, message: {"info": json.loads(message)},
"ID": _decode("part:int, id:str"),
"T": decode_time,
"M": decode_memo,
"L": _decode("level:str, message:oid, time_delta_in_seconds:float"),
"LH": _decode("level:str, message:oid, time_delta_in_seconds:float"),
"SS": _decode(
"name:oid, suite_id:oid, suite_source:oid, time_delta_in_seconds:float",
level_diff=+1,
),
"ES": _decode("status:oid, time_delta_in_seconds:float", level_diff=-1),
"ST": _decode(
"name:oid, suite_id:oid, lineno:int, time_delta_in_seconds:float", level_diff=+1
),
"ET": _decode(
"status:oid, message:oid, time_delta_in_seconds:float", level_diff=-1
),
"SK": _decode(
"name:oid, libname:oid, keyword_type:oid, doc:oid, source:oid, lineno:int, time_delta_in_seconds:float",
level_diff=+1,
),
"EK": _decode("status:oid, time_delta_in_seconds:float", level_diff=-1),
"KA": _decode("argument:oid"),
"AS": _decode("assign:oid"),
"TG": _decode("tag:oid"),
"S": _decode("start_time_delta:float"),
}
_MESSAGE_TYPE_INFO["RS"] = _MESSAGE_TYPE_INFO["SS"]
_MESSAGE_TYPE_INFO["RT"] = _MESSAGE_TYPE_INFO["ST"]
_MESSAGE_TYPE_INFO["RK"] = _MESSAGE_TYPE_INFO["SK"]
class Decoder:
def __init__(self):
self.memo = {}
self.initial_time = None
self.level = 0
@property
def ident(self):
return " " * self.level
def decode_message_type(self, message_type, message):
handler = _MESSAGE_TYPE_INFO[message_type]
ret = {"message_type": message_type}
try:
r = handler(self, message)
if not r:
if message_type == "M":
return None
raise RuntimeError(
f"No return when decoding: {message_type} - {message}"
)
if not isinstance(r, dict):
ret[
"error"
] = f"Expected dict return when decoding: {message_type} - {message}. Found: {ret}"
ret.update(r)
except Exception as e:
ret["error"] = f"Error decoding: {message_type}: {e}"
return ret
def iter_decoded_log_format(stream):
decoder = Decoder()
for line in stream.readlines():
line = line.strip()
if line:
message_type, message = line.split(" ", 1)
decoded = decoder.decode_message_type(message_type, message)
if decoded:
yield decoded | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/vendored/robot_out_stream/_decoder.py | 0.535584 | 0.285456 | _decoder.py | pypi |
from typing import Sequence, Dict, Any
class RobotFrameworkFacade(object):
"""
Nothing on Robot Framework is currently typed, so, this is a facade
to help to deal with it so that we don't add lots of things to ignore its
imports/typing.
"""
@property
def get_model(self):
from robot.api import get_model # type:ignore
return get_model
@property
def TestSuite(self):
from robot.api import TestSuite
return TestSuite
@property
def Token(self):
from robot.api import Token
return Token
@property
def DataError(self):
from robot.errors import DataError # type:ignore
return DataError
@property
def EXECUTION_CONTEXTS(self):
from robot.running.context import EXECUTION_CONTEXTS # type:ignore
return EXECUTION_CONTEXTS
@property
def SettingsBuilder(self):
from robot.running.builder.transformers import SettingsBuilder # type:ignore
return SettingsBuilder
@property
def SuiteBuilder(self):
from robot.running.builder.transformers import SuiteBuilder # type:ignore
return SuiteBuilder
@property
def TestDefaults(self):
# This is the 2nd argument to SettingsBuilder or SuiteBuilder.
try:
try:
from robot.running.builder.settings import FileSettings
return FileSettings
except ImportError:
pass
# RF 5.1 onwards.
from robot.running.builder.settings import (
Defaults as TestDefaults, # type:ignore
)
except ImportError:
from robot.running.builder.testsettings import TestDefaults # type:ignore
return TestDefaults
def get_libraries_imported_in_namespace(self):
EXECUTION_CONTEXTS = self.EXECUTION_CONTEXTS
return set(EXECUTION_CONTEXTS.current.namespace._kw_store.libraries)
def run_test_body(self, context, test, model):
assign_token = None
if len(model.sections) == 1:
section = next(iter(model.sections))
body = getattr(section, "body", None)
if body is not None and len(body) == 1:
t = next(iter(body))
if t.__class__.__name__ == "TestCase":
body = getattr(t, "body", None)
if len(body) == 1:
line = next(iter(body))
if line.__class__.__name__ == "KeywordCall":
if not line.keyword:
for token in line.tokens:
if token.type == token.ASSIGN:
assign_token = token
break
elif line.__class__.__name__ == "EmptyLine":
for token in line.tokens:
if token.type == token.ASSIGN:
assign_token = token
break
if assign_token:
return context.namespace.variables.replace_string(str(token))
from robot import version
IS_ROBOT_4_ONWARDS = not version.get_version().startswith("3.")
if IS_ROBOT_4_ONWARDS:
if len(test.body) == 1:
# Unfortunately bodyrunner.BodyRunner.run doesn't return the
# value, so, we have to do it ourselves.
from robot.errors import ExecutionPassed
from robot.errors import ExecutionFailed
from robot.errors import ExecutionFailures
errors = []
passed = None
step = next(iter(test.body))
ret = None
try:
ret = step.run(context, True, False)
except ExecutionPassed as exception:
exception.set_earlier_failures(errors)
passed = exception
except ExecutionFailed as exception:
errors.extend(exception.get_errors())
if passed:
raise passed
if errors:
raise ExecutionFailures(errors)
return ret
from robot.running.bodyrunner import BodyRunner # noqa
BodyRunner(context, templated=False).run(test.body)
return None
else:
from robot.running.steprunner import StepRunner # noqa
StepRunner(context, False).run_steps(test.keywords.normal)
return None
@property
def EmbeddedArgumentsHandler(self):
from robot.running.userkeyword import EmbeddedArgumentsHandler # type:ignore
return EmbeddedArgumentsHandler
def parse_arguments_options(self, arguments: Sequence[str]) -> Dict[str, Any]:
from robot.run import RobotFramework # type:ignore
arguments = list(arguments)
# Add the target as an arg (which is to be ignored as
# we just want the options in this API).
arguments.append("<ignore>")
opts, _ = RobotFramework().parse_arguments(
arguments,
)
return opts | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/vendored/robotframework_interactive/robotfacade.py | 0.811788 | 0.374676 | robotfacade.py | pypi |
from robot.api.deco import keyword
from ..mailclient.variables import Variables
class SetterKeywords:
def __init__(self,MailUsername:str, MailPassword:str, MailServerAddress:str, ImapPorts:list, Pop3Ports:list, SmtpPorts:list):
self.set_mail_username_and_password(MailUsername, MailPassword)
self.set_mail_server_address(MailServerAddress)
# set all initial ports
self.set_both_imap_ports(int(ImapPorts[0]),int(ImapPorts[1]))
self.set_both_pop3_ports(int(Pop3Ports[0]),int(Pop3Ports[1]))
self.set_both_smtp_ports(int(SmtpPorts[0]),int(SmtpPorts[1]))
@keyword
def set_mail_username_and_password(self,username:str, password:str):
"""
This Keyword is a setter keyword to set mail username and password
These will be used as login cridential for all three mail protocls
(Imap, Pop3, Smtp)
Args:
| Name | Type | Description |
| username | String | The Username needed to login to mail protocols |
| password | Boolean | The Password needed to login to mail protocols |
Return:
| Boolean | True |
Examples:
| Set Mail Username And Password | <User> | <Pass> |
See also: `Set Imap Username and Password`, `Set Pop3 Username and Password`, `Set Smtp Username and Password`
"""
self.set_imap_username_and_password(username,password)
self.set_pop3_username_and_password(username,password)
self.set_smtp_username_and_password(username,password)
return True
@keyword
def set_imap_username_and_password(self,username:str, password:str):
"""
This Keyword is a setter keyword to set Imap username and password
These will be used as login cridential for Imap protocol only
This means it can be set three different login cridentials for every protocol individualy.
Args:
| Name | Type | Description |
| username | String | The Username needed to login to Imap protocol |
| password | Boolean | The Password needed to login to Imap protocol |
Return:
| Boolean | True |
Examples:
| Set Imap Username And Password | <User> | <Pass> |
See also: `Set Mail Username and Password`, `Set Pop3 Username and Password`, `Set Smtp Username and Password`
"""
Variables.imap_username = str(username)
Variables.imap_password = str(password)
return True
@keyword
def set_pop3_username_and_password(self,username:str, password:str):
"""
This Keyword is a setter keyword to set mail username and password
These will be used as login cridential for Pop3 protocol only
This means it can be set three different login cridentials for every protocol individualy.
Args:
| Name | Type | Description |
| username | String | The Username needed to login to Pop3 protocol |
| password | Boolean | The Password needed to login to Pop3 protocol |
Return:
| Boolean | True |
Examples:
| Set Pop3 Username And Password | <User> | <Pass> |
See also: `Set Mail Username and Password`, `Set Iamp Username and Password`, `Set Smtp Username and Password`
"""
Variables.pop3_username = str(username)
Variables.pop3_password = str(password)
return True
@keyword
def set_smtp_username_and_password(self,username:str, password:str):
"""
This Keyword is a setter keyword to set Smtp username and password
These will be used as login cridential for Smtp protocol only
This means it can be set three different login cridentials for every protocol individualy.
Args:
| Name | Type | Description |
| username | String | The Username needed to login to Smtp protocol |
| password | Boolean | The Password needed to login to Smtp protocol |
Return:
| Boolean | True |
Examples:
| Set Smtp Username And Password | <User> | <Pass> |
See also: `Set Mail Username and Password`, `Set Imap Username and Password`, `Set Pop3 Username and Password`
"""
Variables.smtp_username = str(username)
Variables.smtp_password = str(password)
return True
@keyword
def set_mail_server_address(self, address:str):
"""
This Keyword is a setter keyword to set mail server's host address
This will be used as host address for mail server of all three protocols
(Imap, Pop3, Smtp)
Args:
| Name | Type | Description |
| address | String | The Host address of mail server |
Return:
| Boolean | True |
Examples:
| Set Mail Server Address | 127.0.0.1 |
See also: `Set Imap Server Address`, `Set Pop3 Server Address`, `Set Smtp Server Address`
"""
self.set_imap_server_address(address)
self.set_pop3_server_address(address)
self.set_smtp_server_address(address)
return True
@keyword
def set_imap_server_address(self, address:str):
"""
This Keyword is a setter keyword to set Imap mail server's host address
This will be used as host address for mail server of Imap protocol only
This means it can be set three different hosts for every protocol individualy.
Args:
| Name | Type | Description |
| address | String | The Host address of Imap server |
Return:
| Boolean | True |
Examples:
| Set Imap Server Address | 127.0.0.1 |
See also: `Set Mail Server Address`, `Set Pop3 Server Address`, `Set Smtp Server Address`
"""
Variables.imap_mail_server = str(address)
return True
@keyword
def set_pop3_server_address(self, address:str):
"""
This Keyword is a setter keyword to set Pop3 mail server's host address
This will be used as host address for mail server of Pop3 protocol only
This means it can be set three different hosts for every protocol individualy.
Args:
| Name | Type | Description |
| address | String | The Host address of Pop3 server |
Return:
| Boolean | True |
Examples:
| Set Pop3 Server Address | 127.0.0.1 |
See also: `Set Mail Server Address`, `Set Imap Server Address`, `Set Smtp Server Address`
"""
Variables.pop3_mail_server = str(address)
return True
@keyword
def set_smtp_server_address(self, address:str):
"""
This Keyword is a setter keyword to set Smtp mail server's host address
This will be used as host address for mail server of Smtp protocol only
This means it can be set three different hosts for every protocol individualy.
Args:
| Name | Type | Description |
| address | String | The Host address of Smtp server |
Return:
| Boolean | True |
Examples:
| Set Smtp Server Address | 127.0.0.1 |
See also: `Set Mail Server Address`, `Set Imap Server Address`, `Set Pop3 Server Address`
"""
Variables.smtp_mail_server = str(address)
return True
@keyword
def set_both_imap_ports(self, portssl:str, port:str):
"""
This Keyword is a setter keyword to set the both ports of Imap server
These will be used as initial ports to connect to
Args:
| Name | Type | Description |
| portssl | String | The SSL/TLS or StartTls Port number of Imap Server |
| port | String | The Port number of Imap Server with no encryption |
Return:
| Boolean | True |
Examples:
| Set Both Imap Ports | 993 | 143 |
See also: `Set Imap Ssl Port`, `Set Imap Port`
"""
self.set_imap_ssl_port(portssl)
self.set_imap_port(port)
return True
@keyword
def set_imap_ssl_port(self, portssl:str):
"""
This Keyword is a setter keyword to set the ssl port of Imap server
These will be used initial ports to connect to
Args:
| Name | Type | Description |
| portssl | String | The SSL/TLS or StartTls Port number of Imap Server |
Return:
| Boolean | True |
Examples:
| Set Imap Ssl Port | 993 |
See also: `Set Both Imap Ports`, `Set Imap Port`
"""
Variables.imap_ssl_port = int(portssl)
return True
@keyword
def set_imap_port(self, port:str):
"""
This Keyword is a setter keyword to set the (NoSsl) port of Imap server
These will be used initial ports to connect to
Args:
| Name | Type | Description |
| port | String | The Port number of Imap Server with no encryption |
Return:
| Boolean | True |
Examples:
| Set Imap Port | 143 |
See also: `Set Both Imap Ports`, `Set Imap Ssl Port`
"""
Variables.imap_port = int(port)
return True
@keyword
def set_both_pop3_ports(self, portssl:str, port:str):
"""
This Keyword is a setter keyword to set the both ports of Pop3 server
These will be used as initial ports to connect to
Args:
| Name | Type | Description |
| portssl | String | The SSL/TLS or StartTls Port number of Pop3 Server |
| port | String | The Port number of Pop3 Server with no encryption |
Return:
| Boolean | True |
Examples:
| Set Both Pop3 Ports | 995 | 110 |
See also: `Set Pop3 Ssl Port`, `Set Pop3 Port`
"""
self.set_pop3_ssl_port(portssl)
self.set_pop3_port(port)
return True
@keyword
def set_pop3_ssl_port(self, portssl:str):
"""
This Keyword is a setter keyword to set the ssl port of Pop3 server
These will be used initial ports to connect to
Args:
| Name | Type | Description |
| portssl | String | The SSL/TLS or StartTls Port number of Pop3 Server |
Return:
| Boolean | True |
Examples:
| Set Pop3 Ssl Port | 995 |
See also: `Set Both Pop3 Ports`, `Set Pop3 Port`
"""
Variables.pop3_ssl_port = int(portssl)
return True
@keyword
def set_pop3_port(self, port:str):
"""
This Keyword is a setter keyword to set the (NoSsl) port of Pop3 server
These will be used initial ports to connect to
Args:
| Name | Type | Description |
| port | String | The Port number of Pop3 Server with no encryption |
Return:
| Boolean | True |
Examples:
| Set Pop3 Port | 110 |
See also: `Set Both Pop3 Ports`, `Set Pop3 Ssl Port`
"""
Variables.pop3_port = int(port)
return True
@keyword
def set_both_smtp_ports(self, portssl:str, port:str):
"""
This Keyword is a setter keyword to set the both ports of Smtp server
These will be used as initial ports to connect to
Args:
| Name | Type | Description |
| portssl | String | The SSL/TLS or StartTls Port number of Smtp Server |
| port | String | The Port number of Smtp Server with no encryption |
Return:
| Boolean | True |
Examples:
| Set Both Smtp Ports | 465 | 25 |
See also: `Set Smtp Ssl Port`, `Set Smtp Port`
"""
self.set_smtp_ssl_port(portssl)
self.set_smtp_port(port)
return True
@keyword
def set_smtp_ssl_port(self, portssl:str):
"""
This Keyword is a setter keyword to set the ssl port of Smtp server
These will be used initial ports to connect to
Args:
| Name | Type | Description |
| portssl | String | The SSL/TLS or StartTls Port number of Smtp Server |
Return:
| Boolean | True |
Examples:
| Set Smtp Ssl Port | 465 |
See also: `Set Both Smtp Ports`, `Set Smtp Port`
"""
Variables.smtp_ssl_port = int(portssl)
return True
@keyword
def set_smtp_port(self, port:str):
"""
This Keyword is a setter keyword to set the (NoSsl) port of Smtp server
These will be used initial ports to connect to
Args:
| Name | Type | Description |
| port | String | The Port number of Smtp Server with no encryption |
Return:
| Boolean | True |
Examples:
| Set Smtp Port | 25 |
See also: `Set Both Smtp Ports`, `Set Smtp Ssl Port`
"""
Variables.smtp_port = int(port)
return True | /robotframework-mailclient-0.0.11.tar.gz/robotframework-mailclient-0.0.11/src/MailClientLibrary/keywords/setter.py | 0.790449 | 0.311689 | setter.py | pypi |
from ..mailclient.protocols.pop3 import Pop3
from ..mailclient.errors import MailClientError
from robot.api.deco import keyword
# ToDo Add logger.infos
class Pop3Keywords:
@keyword
def open_pop3_mail_by_subject(self, subject, useSsl=True):
"""
This keyword reaches the mail server using Pop3 protocol
This keyword reads the mails and returns the raw source of
the first found mail in the mailbox with the given subject
Args:
| Name | Type | Description |
| subject | String | The subject of the mail to be opened |
| useSsl | Boolean | Whether to use SSL or not to connect to Pop3 protocol |
Return:
| Type | Description |
| String | The MIME content of the first mail found by the subject |
Examples:
| ${mail_content} | Open Pop3 Mail By Subject | <subject> |
| Should Contain | ${mail_content} | <text> |
"""
return Pop3(bool(useSsl)).open_mail_by_criteria("SUBJECT", subject)
@keyword
def open_pop3_mail_by_sender(self, sender, useSsl=True):
"""
This keyword reaches the mail server using Pop3 protocol
This keyword reads the mails and returns the raw source of
the first found mail in the mailbox with the given sender
Args:
| Name | Type | Description |
| sender | String | The sender of the mail to be opened |
| useSsl | Boolean | Whether to use SSL or not to connect to Pop3 protocol |
Return:
| Type | Description |
| String | The MIME content of the first mail found by the sender |
Examples:
| ${mail_content} | Open Pop3 Mail By Sender | <sender> |
| Should Contain | ${mail_content} | <text> |
"""
return Pop3(bool(useSsl)).open_mail_by_criteria("FROM", sender)
@keyword
def open_pop3_mail_by_index(self, index, useSsl=True):
"""
This keyword reaches the mail server using Pop3 protocol
This keyword reads the mails and returns the raw source of
the mail in the mailbox at the given index
Args:
| Name | Type | Description |
| index | String (number) | The index of the mail to be opened |
| useSsl | Boolean | Whether to use SSL or not to connect to Pop3 protocol |
Return:
| Type | Description |
| String | The MIME content of the mail in mailbox at the index |
Examples:
| ${mail_content} | Open Pop3 Mail By Index | <index> |
| Should Contain | ${mail_content} | <text> |
"""
return Pop3(bool(useSsl)).open_mail_by_index(index)
@keyword
def open_latest_pop3_mail(self, useSsl=True):
"""
This keyword reaches the mail server using Pop3 protocol
This keyword reads the mails and returns the raw source of
the latest sent mail in the mailbox
Args:
| Name | Type | Description |
| useSsl | Boolean | Whether to use ssl or not to connect to Pop3 protocol |
Return:
| Type | Description |
| String | The MIME content of the first mail in the mailbox |
Examples:
| ${mail_content} | Open Latest Pop3 Mail |
| Should Contain | ${mail_content} | <text> |
"""
return self.open_pop3_mail_by_index(self.get_pop3_mail_count(useSsl), useSsl)
@keyword
def delete_pop3_mail_by_subject(self, subject, useSsl=True):
"""
This keyword reaches the mail server using Pop3 protocol
and deletes the first found mail in the mailbox with the given subject
Args:
| Name | Type | Description |
| subject | String | The subject of the mail to be deleted |
| useSsl | Boolean | Whether to use ssl or not to connect to Pop3 protocol |
Return:
| Type | Description |
| Boolean | True if the mail found and deleted else False |
Examples:
| ${status} | Delete Pop3 Mail By Subject | <subject> |
| Should Be True | ${status} |
"""
return Pop3(bool(useSsl)).delete_mail_by_criteria("SUBJECT", subject)
@keyword
def delete_every_pop3_mail_by_subject(self, subject, useSsl=True):
"""
This keyword reaches the mail server using Imap protocol
and deletes every found mail in the mailbox with the given subject
Args:
| Name | Type | Description |
| subject | String | The subject of the mails to be deleted |
| useSsl | Boolean | Whether to use ssl or not to connect to Imap protocol |
Return:
| Type | Description |
| Boolean | True if any mail found and deleted else False |
Examples:
| ${status} | Delete Every Imap Mail By Subject | <subject> |
| Should Be True | ${status} |
"""
return Pop3(bool(useSsl)).delete_mail_by_criteria("SUBJECT", subject, firstOnly=False)
@keyword
def delete_pop3_mail_by_sender(self, sender, useSsl=True):
"""
This keyword reaches the mail server using Pop3 protocol
and deletes the first found mail in the mailbox with the given sender
Args:
| Name | Type | Description |
| sender | String | The sender of the mail to be deleted |
| useSsl | Boolean | Whether to use ssl or not to connect to Pop3 protocol |
Return:
| Type | Description |
| Boolean | True if the mail found and deleted else False |
Examples:
| ${status} | Delete Pop3 Mail By Sender | <Sender> |
| Should Be True | ${status} |
"""
return Pop3(bool(useSsl)).delete_mail_by_criteria("FROM", sender)
@keyword
def delete_every_pop3_mail_by_sender(self, sender, useSsl=True):
"""
This keyword reaches the mail server using Pop3 protocol
and deletes every found mail in the mailbox with the given sender
Args:
| Name | Type | Description |
| sender | String | The sender of the mails to be deleted |
| useSsl | Boolean | Whether to use ssl or not to connect to Pop3 protocol |
Return:
| Type | Description |
| Boolean | True if any mail found and deleted else False |
Examples:
| ${status} | Delete Every Pop3 Mail By Sender | <sender> |
| Should Be True | ${status} |
"""
return Pop3(bool(useSsl)).delete_mail_by_criteria("FROM", sender, firstOnly=False)
@keyword
def delete_pop3_mail_by_index(self, index, useSsl=True):
"""
This keyword reaches the mail server using Pop3 protocol
and deletes the mail at the given index
Args:
| Name | Type | Description |
| index | String (number) | The index number of the mail to be deleted |
| useSsl | Boolean | Whether to use ssl or not to connect to Pop3 protocol |
Return:
| Type | Description |
| Boolean | True if the mail found and deleted else False |
Examples:
| ${status} | Delete Pop3 Mail By Index | <index> |
| Should Be True | ${status} |
"""
return Pop3(bool(useSsl)).delete_mail_by_index(int(index))
@keyword
def delete_latest_pop3_mail(self, useSsl=True):
"""
This keyword reaches the mail server using Pop3 protocol
and deletes the latest mail in mailbox
Args:
| Name | Type | Description |
| useSsl | Boolean | Whether to use ssl or not to connect to Pop3 protocol |
Return:
| Type | Description |
| Boolean | True if the mail found and deleted else False |
Examples:
| ${status} | Delete Latest Pop3 Mail |
| Should Be True | ${status} |
"""
return self.delete_pop3_mail_by_index(self.get_pop3_mail_count(useSsl), useSsl)
@keyword
def delete_every_pop3_mail(self, useSsl=True):
"""
This keyword reaches the mail server using Pop3 protocol
and deletes every found mail in the mailbox
Args:
| Name | Type | Description |
| useSsl | Boolean | Whether to use ssl or not to connect to Pop3 protocol |
Return:
| Type | Description |
| Boolean | True if any mail found and deleted else False |
Examples:
| ${status} | Delete Every Pop3 Mail |
| Should Be True | ${status} |
"""
return Pop3(bool(useSsl)).delete_every_mail()
@keyword
def is_pop3_inbox_empty(self, useSsl=True):
"""
This keyword reaches the mail server using Pop3 protocol
and reads the mailbox
Args:
| Name | Type | Description |
| useSsl | Boolean | Whether to use ssl or not to connect to Pop3 protocol |
Return:
| Type | Description |
| Boolean | False if any mail found else True |
Examples:
| ${status} | Is Pop3 Inbox Empty |
| Should Be True | ${status} |
See also: `Pop3 Inbox Should Be Empty`
"""
return Pop3(bool(useSsl)).is_inbox_empty()
@keyword
def Pop3_inbox_should_be_empty(self, useSsl=True):
"""
This keyword reaches the mail server using Pop3 protocol
and fails if the mailbox is not empty
Args:
| Name | Type | Description |
| useSsl | Boolean | Whether to use ssl or not to connect to Pop3 protocol |
Return:
| Type | Description |
| Boolean | True if no mail found else fails |
Examples:
| Delete Every Pop3 Mail |
| Is Pop3 Inbox Empty |
See also: `Is Pop3 Inbox Empty`
"""
if not Pop3(bool(useSsl)).is_inbox_empty():
MailClientError.raise_mail_client_error(MailClientError.InboxNotEmpty.format("Pop3"))
@keyword
def get_pop3_mail_count(self, useSsl=True):
"""
This keyword reaches the mail server using Pop3 protocol
and fails if the mailbox is not empty
Args:
| Name | Type | Description |
| useSsl | Boolean | Whether to use ssl or not to connect to Pop3 protocol |
Return:
| Type | Description |
| Int | The number of mails found in mailbox |
Examples:
| ${number} | Get Pop3 Mail Count |
| Should Be Equal As Integers | ${number} | <number> |
See also: `Is Pop3 Inbox Empty`
"""
return Pop3(bool(useSsl)).get_mail_count()
# ToDo
# mark as read or if marked as read or get count not read | /robotframework-mailclient-0.0.11.tar.gz/robotframework-mailclient-0.0.11/src/MailClientLibrary/keywords/pop3.py | 0.789558 | 0.365768 | pop3.py | pypi |
from ..mailclient.protocols.imap import Imap
from ..mailclient.errors import MailClientError
from robot.api.deco import keyword
# ToDo Add logger.infos
class ImapKeywords:
@keyword
def open_imap_mail_by_subject(self, subject, useSsl=True):
"""
This keyword reaches the mail server using Imap protocol
This keyword reads the mails and returns the raw source of
the first found mail in the mailbox with the given subject
Args:
| Name | Type | Description |
| subject | String | The subject of the mail to be opened |
| useSsl | Boolean | Whether to use SSL or not to connect to Imap protocol |
Return:
| Type | Description |
| String | The MIME content of the first mail found by the subject |
Examples:
| ${mail_content} | Open Imap Mail By Subject | <subject> |
| Should Contain | ${mail_content} | <text> |
"""
return Imap(bool(useSsl)).open_mail_by_criteria("SUBJECT", subject)
@keyword
def open_imap_mail_by_sender(self, sender, useSsl=True):
"""
This keyword reaches the mail server using Imap protocol
This keyword reads the mails and returns the raw source of
the first found mail in the mailbox with the given sender
Args:
| Name | Type | Description |
| sender | String | The sender of the mail to be opened |
| useSsl | Boolean | Whether to use SSL or not to connect to Imap protocol |
Return:
| Type | Description |
| String | The MIME content of the first mail found by the sender |
Examples:
| ${mail_content} | Open Imap Mail By Sender | <sender> |
| Should Contain | ${mail_content} | <text> |
"""
return Imap(bool(useSsl)).open_mail_by_criteria("FROM", sender)
@keyword
def open_imap_mail_by_index(self, index, useSsl=True):
"""
This keyword reaches the mail server using Imap protocol
This keyword reads the mails and returns the raw source of
the mail in the mailbox at the given index
Args:
| Name | Type | Description |
| index | String (number) | The index of the mail to be opened |
| useSsl | Boolean | Whether to use SSL or not to connect to Imap protocol |
Return:
| Type | Description |
| String | The MIME content of the mail in mailbox at the index |
Examples:
| ${mail_content} | Open Imap Mail By Index | <index> |
| Should Contain | ${mail_content} | <text> |
"""
return Imap(bool(useSsl)).open_mail_by_index(index)
@keyword
def open_latest_imap_mail(self, useSsl=True):
"""
This keyword reaches the mail server using Imap protocol
This keyword reads the mails and returns the raw source of
the latest sent mail in the mailbox
Args:
| Name | Type | Description |
| useSsl | Boolean | Whether to use ssl or not to connect to Imap protocol |
Return:
| Type | Description |
| String | The MIME content of the first mail in the mailbox |
Examples:
| ${mail_content} | Open Latest Imap Mail |
| Should Contain | ${mail_content} | <text> |
"""
return self.open_imap_mail_by_index(self.get_imap_mail_count(useSsl), useSsl)
@keyword
def delete_imap_mail_by_subject(self, subject, useSsl=True):
"""
This keyword reaches the mail server using Imap protocol
and deletes the first found mail in the mailbox with the given subject
Args:
| Name | Type | Description |
| subject | String | The subject of the mail to be deleted |
| useSsl | Boolean | Whether to use ssl or not to connect to Imap protocol |
Return:
| Type | Description |
| Boolean | True if the mail found and deleted else False |
Examples:
| ${status} | Delete Imap Mail By Subject | <subject> |
| Should Be True | ${status} |
"""
return Imap(bool(useSsl)).delete_mail_by_criteria("SUBJECT", subject)
@keyword
def delete_every_imap_mail_by_subject(self, subject, useSsl=True):
"""
This keyword reaches the mail server using Imap protocol
and deletes every found mail in the mailbox with the given subject
Args:
| Name | Type | Description |
| subject | String | The subject of the mails to be deleted |
| useSsl | Boolean | Whether to use ssl or not to connect to Imap protocol |
Return:
| Type | Description |
| Boolean | True if any mail found and deleted else False |
Examples:
| ${status} | Delete Every Imap Mail By Subject | <subject> |
| Should Be True | ${status} |
"""
return Imap(bool(useSsl)).delete_mail_by_criteria("SUBJECT", subject, firstOnly=False)
@keyword
def delete_imap_mail_by_sender(self, sender, useSsl=True):
"""
This keyword reaches the mail server using Imap protocol
and deletes the first found mail in the mailbox with the given sender
Args:
| Name | Type | Description |
| sender | String | The sender of the mail to be deleted |
| useSsl | Boolean | Whether to use ssl or not to connect to Imap protocol |
Return:
| Type | Description |
| Boolean | True if the mail found and deleted else False |
Examples:
| ${status} | Delete Imap Mail By Sender | <Sender> |
| Should Be True | ${status} |
"""
return Imap(bool(useSsl)).delete_mail_by_criteria("FROM", sender)
@keyword
def delete_every_imap_mail_by_sender(self, sender, useSsl=True):
"""
This keyword reaches the mail server using Imap protocol
and deletes every found mail in the mailbox with the given sender
Args:
| Name | Type | Description |
| sender | String | The sender of the mails to be deleted |
| useSsl | Boolean | Whether to use ssl or not to connect to Imap protocol |
Return:
| Type | Description |
| Boolean | True if any mail found and deleted else False |
Examples:
| ${status} | Delete Every Imap Mail By Sender | <sender> |
| Should Be True | ${status} |
"""
return Imap(bool(useSsl)).delete_mail_by_criteria("FROM", sender, firstOnly=False)
@keyword
def delete_imap_mail_by_index(self, index, useSsl=True):
"""
This keyword reaches the mail server using Imap protocol
and deletes the mail at the given index
Args:
| Name | Type | Description |
| index | String (number) | The index number of the mail to be deleted |
| useSsl | Boolean | Whether to use ssl or not to connect to Imap protocol |
Return:
| Type | Description |
| Boolean | True if the mail found and deleted else False |
Examples:
| ${status} | Delete Imap Mail By Index | <index> |
| Should Be True | ${status} |
"""
return Imap(bool(useSsl)).delete_mail_by_index(int(index))
@keyword
def delete_latest_imap_mail(self, useSsl=True):
"""
This keyword reaches the mail server using Imap protocol
and deletes the latest mail in mailbox
Args:
| Name | Type | Description |
| useSsl | Boolean | Whether to use ssl or not to connect to Imap protocol |
Return:
| Type | Description |
| Boolean | True if the mail found and deleted else False |
Examples:
| ${status} | Delete Latest Imap Mail |
| Should Be True | ${status} |
"""
return self.delete_imap_mail_by_index(self.get_imap_mail_count(useSsl), useSsl)
@keyword
def delete_every_imap_mail(self, useSsl=True):
"""
This keyword reaches the mail server using Imap protocol
and deletes every found mail in the mailbox
Args:
| Name | Type | Description |
| useSsl | Boolean | Whether to use ssl or not to connect to Imap protocol |
Return:
| Type | Description |
| Boolean | True if any mail found and deleted else False |
Examples:
| ${status} | Delete Every Imap Mail |
| Should Be True | ${status} |
"""
return Imap(bool(useSsl)).delete_every_mail()
@keyword
def is_imap_inbox_empty(self, useSsl=True):
"""
This keyword reaches the mail server using Imap protocol
and reads the mailbox
Args:
| Name | Type | Description |
| useSsl | Boolean | Whether to use ssl or not to connect to Imap protocol |
Return:
| Type | Description |
| Boolean | False if any mail found else True |
Examples:
| ${status} | Is Imap Inbox Empty |
| Should Be True | ${status} |
See also: `Imap Inbox Should Be Empty`
"""
return Imap(bool(useSsl)).is_inbox_empty()
@keyword
def imap_inbox_should_be_empty(self, useSsl=True):
"""
This keyword reaches the mail server using Imap protocol
and fails if the mailbox is not empty
Args:
| Name | Type | Description |
| useSsl | Boolean | Whether to use ssl or not to connect to Imap protocol |
Return:
| Type | Description |
| Boolean | True if no mail found else fails |
Examples:
| Delete Every Imap Mail |
| Is Imap Inbox Empty |
See also: `Is Imap Inbox Empty`
"""
if not Imap(bool(useSsl)).is_inbox_empty():
MailClientError.raise_mail_client_error(MailClientError.InboxNotEmpty.format("Imap"))
return True
@keyword
def get_imap_mail_count(self, useSsl=True):
"""
This keyword reaches the mail server using Imap protocol
and fails if the mailbox is not empty
Args:
| Name | Type | Description |
| useSsl | Boolean | Whether to use ssl or not to connect to Imap protocol |
Return:
| Type | Description |
| Int | The number of mails found in mailbox |
Examples:
| ${number} | Get Imap Mail Count |
| Should Be Equal As Integers | ${number} | <number> |
See also: `Is Imap Inbox Empty`
"""
return Imap(bool(useSsl)).get_mail_count()
# mark as read or if marked as read or get count not read | /robotframework-mailclient-0.0.11.tar.gz/robotframework-mailclient-0.0.11/src/MailClientLibrary/keywords/imap.py | 0.767951 | 0.355943 | imap.py | pypi |
from enum import Enum
from email.message import EmailMessage
import os
import mimetypes
class Mail:
class Alternative:
class Subtype(Enum):
"""
"plain": Plain text, no formatting (default).
"html": Hypertext Markup Language.
"enriched": Rich text format.
"richtext": Rich text format (alternative to "enriched").
"mixed": Message with multiple parts, of different types (e.g. text and image).
"related": Message with an HTML body that refers to images and other content within the message.
"alternative": Message with multiple parts, of different types, but only one should be displayed (e.g. text and HTML).
"""
plain = 'plain'
html = 'html'
enriched = 'enriched'
richtext = 'richtext'
mixed = 'mixed'
related = 'related'
alternative = 'alternative'
def __init__(self, body: str, subtype: Subtype = Subtype.plain):
self.body = body
self.subtype = subtype.value
@staticmethod
def compose_mail(sender, receiver, subject:str, body_text=None, alternative=None,
cc=None, bcc=None, attachments=[]):
"""
Args:
| Name | Type | Description |
| sender | String | The sender mail address |
| receiver | String/stringList | The receiver mail address(s) |
| subject | String | The mail subject |
| body_text | String | The mail body content in form of plaintext|
| alternative | Alternative | The alternative mail body content in form of alternative.subtype |
| attachments | String/stringList | The paths of attachments to be added to mail |
| cc | String/stringList | The cc mail address |
| bcc | String/stringList | The bcc mail addresse |
Return:
EmailMessage() instance with given arguments
"""
if isinstance(receiver, str):
receiver = [receiver]
if isinstance(cc, str):
cc = [cc]
if isinstance(bcc, str):
bcc = [bcc]
if isinstance(attachments, str):
attachments = [attachments]
# Create the email message object
msg = EmailMessage()
# Set the message headers
msg['From'] = sender
msg['To'] = ', '.join(receiver)
msg['Subject'] = subject
# Set the message body
if not alternative:
msg.set_content(body_text, subtype='plain')
else:
msg.set_content(body_text, subtype='plain')
msg.add_alternative(alternative.body, subtype=alternative.subtype)
# Set the CC and BCC recipients, if any
if cc is not None:
msg['Cc'] = ', '.join(cc)
if bcc is not None:
msg['Bcc'] = ', '.join(bcc)
# Add any attachments, if any
if attachments is not None:
for attachmentPath in attachments:
with open(attachmentPath, 'rb') as f:
filename = os.path.basename(attachmentPath)
# guess the content type based on the file's extension.
ctype, encoding = mimetypes.guess_type(attachmentPath)
if ctype is None or encoding is not None:
ctype = 'application/octet-stream'
maintype, subtype = ctype.split('/', 1)
attachment_data = f.read()
msg.add_attachment(attachment_data, maintype=maintype, subtype=subtype, filename=filename)
return msg | /robotframework-mailclient-0.0.11.tar.gz/robotframework-mailclient-0.0.11/src/MailClientLibrary/mailclient/mail.py | 0.6973 | 0.226169 | mail.py | pypi |
import re
class keywords(object):
def email_subject_should_match(self, regex: str, message=None):
"""
Checks the email subject of the last email received on the current server_domain matches the given regular expression.
"""
self.criteria.sent_to = self.server_domain
last_email = self.mailosaur.messages.get(self.server_id, self.criteria)
if message is not None:
last_email = message
check = bool(re.match(r'{}'.format(regex), last_email.subject))
if check is False:
raise Exception(
"The regexp does not match {}".format(last_email.subject))
def email_subject_should_contain(self, matcher: str, message=None):
"""
Checks the email subject of the last email received on the current server_domain contains the matcher.
"""
self.criteria.sent_to = self.server_domain
last_email = self.mailosaur.messages.get(self.server_id, self.criteria)
if message is not None:
last_email = message
try:
assert matcher in last_email.subject
except AssertionError as e:
raise Exception("AssertionError: '{0}' does not contain '{1}'".format(
last_email.subject, matcher))
def delete_all_emails(self):
"""
deletes all emails contained in the currently selected server domain.
"""
self.criteria.sent_to = self.server_domain
try:
self.mailosaur.messages.delete_all(self.server_id)
except Exception as e:
raise e
def email_should_have_links(self, links_number: int, message=None):
"""
Checks the last email contains X number of links where X == links_number.
"""
self.criteria.sent_to = self.server_domain
last_email = self.mailosaur.messages.get(
self.server_id, self.criteria)
if message is not None:
last_email = message
links = len(last_email.html.links)
try:
assert links == links_number
except AssertionError as e:
raise Exception("AssertionError: {0} does not equal {1}".format(
links, links_number))
def email_should_have_attachments(self, attachments_number: int, message=None):
"""
Checks the last email contains X number of attachments where X == attachments_number.
"""
self.criteria.sent_to = self.server_domain
last_email = self.mailosaur.messages.get(
self.server_id, self.criteria)
if message is not None:
last_email = message
attachments = len(last_email.attachments)
try:
assert attachments == attachments_number
except AssertionError as e:
raise Exception("AssertionError: {0} does not equal {1}".format(
attachments, attachments_number))
def email_body_should_contain(self, matcher, case_insensitive: bool, message=None):
"""
Checks the last email's body contains a specific string (matcher).
If case_insensitive is set to True, then case is not checked in the substring.
"""
self.criteria.sent_to = self.server_domain
last_email = self.mailosaur.messages.get(
self.server_id, self.criteria)
if message is not None:
last_email = message
text = last_email.text.body
if case_insensitive is True:
text = text.lower()
matcher = matcher.lower()
try:
assert matcher in text
except AssertionError as e:
raise Exception("AssertionError: {0} is not contained {1}".format(
matcher, text))
def email_links_should_contain_text(self, text: str, message=None):
"""
Checks if atleast one of the links contained in the last email contains text.
"""
self.criteria.sent_to = self.server_domain
last_email = self.mailosaur.messages.get(
self.server_id, self.criteria)
if message is not None:
last_email = message
links = [link.text for link in last_email.text.links]
assert any(map(lambda link: text in link, links))
def get_links_href(self, message=None):
"""
Returns a list of last email's link href attributes
"""
self.criteria.sent_to = self.server_domain
last_email = self.mailosaur.messages.get(
self.server_id, self.criteria)
if message is not None:
last_email = message
links_href = [link.href for link in last_email.html.links]
return links_href
def email_sender_should_be(self, matcher: str, message=None):
"""
Checks that last email sender matches the given matcher.
"""
self.criteria.sent_to = self.server_domain
last_email = self.mailosaur.messages.get(
self.server_id, self.criteria)
if message is not None:
last_email = message
sender = last_email.sender[0].email
try:
assert sender == matcher
except AssertionError as e:
raise Exception("AssertionError: '{0}' does not match sender '{1}'".format(
matcher, sender))
def html_content_should_contain_text(self, matcher: str, case_insensitive, message=None):
"""
Checks that last email's HTML content contains sub-string
"""
self.criteria.sent_to = self.server_domain
last_email = self.mailosaur.messages.get(
self.server_id, self.criteria)
if message is not None:
last_email = message
html = last_email.html.body
if case_insensitive is True:
html = html.lower()
matcher = matcher.lower()
try:
assert matcher in html
except AssertionError as e:
raise Exception("AssertionError: '{0}' is not contained in '{1}'".format(
matcher, html))
def list_emails(self):
"""
Returns a list of all email messages
"""
self.criteria.sent_to = self.server_domain
results = self.mailosaur.messages.list(self.server_id)
return results.items
def get_last_email(self):
"""
Returns last email message.
Note: this keyword returns a 'MessageSummary' object which does not include several properties.
To get a more complete message, please use 'get email' keywords.
"""
self.criteria.sent_to = self.server_domain
results = self.mailosaur.messages.list(self.server_id)
last_email = results.items[0]
return last_email
def get_email_by_sender(self, sender, timeout=10000):
"""
Waits for a message to be found. Returns as soon as a message matching the specified search criteria is found.
timeout: Specify how long to wait for a matching result (in milliseconds, default value is 10 seconds).
"""
self.criteria.sent_from = sender
message = self.mailosaur.messages.get(
self.server_id, self.criteria, timeout=timeout)
return message
def get_email_by_subject(self, subject, timeout=10000):
"""
Waits for a message to be found. Returns as soon as a message matching the specified search criteria is found.
timeout: Specify how long to wait for a matching result (in milliseconds, default value is 10 seconds).
"""
self.criteria.subject = subject
message = self.mailosaur.messages.get(
self.server_id, self.criteria, timeout=timeout)
return message
def get_email_by_body(self, body, timeout=10000):
"""
Waits for a message to be found. Returns as soon as a message matching the specified search criteria is found.
timeout: Specify how long to wait for a matching result (in milliseconds, default value is 10 seconds).
"""
self.criteria.body = body
message = self.mailosaur.messages.get(
self.server_id, self.criteria, timeout=timeout)
return message | /robotframework_mailosaur-1.0.2-py3-none-any.whl/rfmailosaur/keywords.py | 0.59514 | 0.286568 | keywords.py | pypi |
import time
import os
import socket
import re
from .py3270 import Emulator
from robot.api import logger
from robot.libraries.BuiltIn import BuiltIn
from robot.libraries.BuiltIn import RobotNotRunningError
from robot.utils import Matcher
class x3270(object):
def __init__(self, visible=True, timeout='30', wait_time='0.5', wait_time_after_write='0', img_folder='.'):
"""You can change to hide the emulator screen set the argument visible=${False}
For change the wait_time see `Change Wait Time`, to change the img_folder
see the `Set Screenshot Folder` and to change the timeout see the `Change Timeout` keyword.
"""
self.lu = None
self.host = None
self.port = None
self.credential = None
self.imgfolder = img_folder
# Try Catch to run in Pycharm, and make a documentation in libdoc with no error
try:
self.output_folder = BuiltIn().get_variable_value('${OUTPUT DIR}')
except RobotNotRunningError as rnrex:
if "Cannot access execution context" in str(rnrex):
self.output_folder = os.getcwd()
else:
raise RobotNotRunningError()
except Exception as e:
raise AssertionError(e)
self.wait = float(wait_time)
self.wait_write = float(wait_time_after_write)
self.timeout = int(timeout)
self.visible = visible
self.mf = None
def change_timeout(self, seconds):
"""Change the timeout for connection in seconds.
"""
self.timeout = float(seconds)
def open_connection(self, host, LU=None, port=23, isSessionFile=False):
"""Create a connection with IBM3270 mainframe with the default port 23. To make a connection with the mainframe
you only must inform the Host. You can pass the Logical Unit Name and the Port as optional.
Example:
| Open Connection | Hostname |
| Open Connection | Hostname | LU=LUname |
| Open Connection | Hostname | port=992 |
| Open Connection | host=path\\\\to\\\\sessionFile.wc3270 | isSessionFile=${True} |
"""
self.host = host
self.lu = LU
self.port = port
self.isSessionFile = isSessionFile
if self.lu:
self.credential = "%s@%s:%s" % (self.lu, self.host, self.port)
else:
self.credential = "%s:%s" % (self.host, self.port)
if self.mf:
self.close_connection()
if isSessionFile:
self.mf = Emulator(visible=bool(self.visible), timeout=int(self.timeout), session_file=True)
self.mf.connect(self.host)
else:
self.mf = Emulator(visible=bool(self.visible), timeout=int(self.timeout))
self.mf.connect(self.credential)
def close_connection(self):
"""Disconnect from the host.
"""
try:
self.mf.terminate()
except socket.error:
pass
self.mf = None
def change_wait_time(self, wait_time):
"""To give time for the mainframe screen to be "drawn" and receive the next commands, a "wait time" has been
created, which by default is set to 0.5 seconds. This is a sleep applied AFTER the follow keywords:
`Execute Command`
`Send Enter`
`Send PF`
`Write`
`Write in position`
If you want to change this value just use this keyword passing the time in seconds.
Examples:
| Change Wait Time | 0.1 |
| Change Wait Time | 2 |
"""
self.wait = float(wait_time)
def change_wait_time_after_write(self, wait_time_after_write):
"""To give the user time to see what is happening inside the mainframe, a "change wait time after write" has
been created, which by default is set to 0 seconds. This is a sleep applied AFTER the string sent in this
keywords:
`Write`
`Write Bare`
`Write in position`
`Write Bare in position`
If you want to change this value just use this keyword passing the time in seconds.
Note: This keyword is useful for debug purpose
Examples:
| Change Wait Time After Write | 0.5 |
| Change Wait Time After Write | 2 |
"""
self.wait_write = float(wait_time_after_write)
def read(self, ypos, xpos, length):
"""Get a string of "length" at screen co-ordinates "ypos"/"xpos".
Co-ordinates are 1 based, as listed in the status area of the terminal.
Example for read a string in the position y=8 / x=10 of a length 15:
| ${value} | Read | 8 | 10 | 15 |
"""
self._check_limits(ypos, xpos)
# Checks if the user has passed a length that will be larger than the x limit of the screen.
if int(xpos) + int(length) > (80+1):
raise Exception('You have exceeded the x-axis limit of the mainframe screen')
string = self.mf.string_get(int(ypos), int(xpos), int(length))
return str(string)
def read_value_for_given_string(self, string, nchar=0, from_col=0):
'''Finds given string on a mainframe screen and return specified number of characters, e.g.
| @{result} | Read Value For Given String | RC_VALUE: | 6 | #After finding "RC_VALUE:" it should return next 6 characters after found string |
'''
result=[]
col_number = int(from_col)
if not self._search_string(string):
logger.warn(f"Could not find given string: {string}", html=True)
return result
screen_content = self._read_screen_lines()
if col_number == 0:
for _row, line in screen_content.items():
if string in line:
row_result = line.split(string)[1:]
[result.append(item[:nchar]) for item in row_result]
return result
elif 1 <= col_number <= 80:
for _row, line in screen_content.items():
if string in line:
result.append(line[col_number-1:(col_number+nchar-1)]) #appending sliced string, using col_number-1 as a start position for string
return result
else:
raise Exception("Column size must be smaller than 80")
def execute_command(self, cmd):
"""Execute an [http://x3270.bgp.nu/wc3270-man.html#Actions|x3270 command].
Examples:
| Execute Command | Enter |
| Execute Command | Home |
| Execute Command | Tab |
| Execute Command | PF(1) |
"""
self.mf.exec_command((str(cmd)).encode("utf-8"))
time.sleep(self.wait)
def set_screenshot_folder(self, path):
"""Set a folder to keep the html files generated by the `Take Screenshot` keyword.
Example:
| Set Screenshot Folder | C:\\\Temp\\\Images |
"""
if os.path.exists(os.path.normpath(os.path.join(self.output_folder, path))):
self.imgfolder = path
else:
logger.error('Given screenshots path "%s" does not exist' % path)
logger.warn('Screenshots will be saved in "%s"' % self.imgfolder)
def take_screenshot(self, height='410', width='670', format="html"):
"""Generate a screenshot of the IBM 3270 Mainframe in a html format. The
default folder is the log folder of RobotFramework, if you want change see the `Set Screenshot Folder`.
The Screenshot is printed in a iframe log, with the values of height=410 and width=670, you
can change this values passing them from the keyword.
Examples:
| Take Screenshot |
| Take Screenshot | height=500 | width=700 |
"""
filename_prefix = 'screenshot'
extension = 'html'
filename_sufix = time.strftime("%Y%m%d%H%M%S")
filepath = os.path.join(self.imgfolder, '%s_%s.%s' % (filename_prefix, filename_sufix, extension))
fullpath = os.path.join(self.output_folder, filepath)
self.mf.save_screen(fullpath)
if format.lower() == "jpg":
self._convert_html_to_jpg(fullpath, format)
logger.write(f'Screenshot saved in location {self.output_folder}')
else:
logger.write(f'screenshot saved in location: {filepath}')
logger.write('<iframe src="%s" height="%s" width="%s"></iframe>' % (filepath.replace("\\", "/"), height, width), level='INFO', html=True)
def wait_field_detected(self):
"""Wait until the screen is ready, the cursor has been positioned
on a modifiable field, and the keyboard is unlocked.
Sometimes the server will "unlock" the keyboard but the screen
will not yet be ready. In that case, an attempt to read or write to the
screen will result in a 'E' keyboard status because we tried to read from
a screen that is not yet ready.
Using this method tells the client to wait until a field is
detected and the cursor has been positioned on it.
"""
self.mf.wait_for_field()
def delete_char(self, ypos=None, xpos=None):
"""Delete character under cursor. If you want to delete a character that is in
another position, simply pass the coordinates "ypos"/"xpos".
Co-ordinates are 1 based, as listed in the status area of the
terminal.
Examples:
| Delete Char |
| Delete Char | ypos=9 | xpos=25 |
"""
if ypos is not None and xpos is not None:
self.mf.move_to(int(ypos), int(xpos))
self.mf.exec_command(b'Delete')
def delete_field(self, ypos=None, xpos=None):
"""Delete a entire contents in field at current cursor location and positions
cursor at beginning of field. If you want to delete a field that is in
another position, simply pass the coordinates "ypos"/"xpos" of any part of the field.
Co-ordinates are 1 based, as listed in the status area of the
terminal.
Examples:
| Delete Field |
| Delete Field | ypos=12 | xpos=6 |
"""
if ypos is not None and xpos is not None:
self.mf.move_to(int(ypos), int(xpos))
self.mf.exec_command(b'DeleteField')
def send_enter(self):
"""Send a Enter to the screen.
"""
self.mf.send_enter()
time.sleep(self.wait)
def move_next_field(self):
"""Move the cursor to the next input field. Equivalent to pressing the Tab key.
"""
self.mf.exec_command(b'Tab')
def move_previous_field(self):
"""Move the cursor to the previous input field. Equivalent to pressing the Shift+Tab keys.
"""
self.mf.exec_command(b'BackTab')
def send_PF(self, PF):
"""Send a Program Function to the screen.
Example:
| Send PF | 3 |
"""
self.mf.exec_command(('PF('+str(PF)+')').encode("utf-8"))
time.sleep(self.wait)
def write(self, txt):
"""Send a string to the screen at the current cursor location *and a Enter.*
Example:
| Write | something |
"""
self._write(txt, enter='1')
def write_bare(self, txt):
"""Send only the string to the screen at the current cursor location.
Example:
| Write Bare | something |
"""
self._write(txt)
def write_in_position(self, txt, ypos, xpos):
"""Send a string to the screen at screen co-ordinates "ypos"/"xpos" and a Enter.
Co-ordinates are 1 based, as listed in the status area of the
terminal.
Example:
| Write in Position | something | 9 | 11 |
"""
self._write(txt, ypos=ypos, xpos=xpos, enter='1')
def write_bare_in_position(self, txt, ypos, xpos):
"""Send only the string to the screen at screen co-ordinates "ypos"/"xpos".
Co-ordinates are 1 based, as listed in the status area of the
terminal.
Example:
| Write Bare in Position | something | 9 | 11 |
"""
self._write(txt, ypos=ypos, xpos=xpos)
def _write(self, txt, ypos=None, xpos=None, enter='0'):
txt = txt.encode('utf-8')
if ypos is not None and xpos is not None:
self._check_limits(int(ypos), int(xpos))
self.mf.move_to(int(ypos), int(xpos))
if not isinstance(txt, (list, tuple)): txt = [txt]
[self.mf.send_string(el) for el in txt if el != '']
time.sleep(self.wait_write)
for i in range(int(enter)):
self.mf.send_enter()
time.sleep(self.wait)
def wait_until_string(self, txt, timeout=5):
"""Wait until a string exists on the mainframe screen to perform the next step. If the string not appear on
5 seconds the keyword will raise a exception. You can define a different timeout.
Example:
| Wait Until String | something |
| Wait Until String | something | timeout=10 |
"""
max_time = time.ctime(int(time.time())+int(timeout))
while time.ctime(int(time.time())) < max_time:
result = self._search_string(str(txt))
if result:
return txt
raise Exception('String "' + txt + '" not found in ' + str(timeout) + ' seconds')
def _search_string(self, string, ignore_case=False):
"""Search if a string exists on the mainframe screen and return True or False.
"""
def __read_screen(string, ignore_case):
for ypos in range(24):
line = self.mf.string_get(ypos+1, 1, 80)
if ignore_case: line = line.lower()
if string in line:
return True
return False
status = __read_screen(string, ignore_case)
return status
def page_should_contain_string(self, txt, ignore_case=False, error_message=None):
"""Search if a given string exists on the mainframe screen.
The search is case sensitive, if you want ignore this you can pass the argument: ignore_case=${True}
and you can edit the raise exception message with error_message.
Example:
| Page Should Contain String | something |
| Page Should Contain String | someTHING | ignore_case=${True} |
| Page Should Contain String | something | error_message=New error message |
"""
message = 'The string "' + txt + '" was not found'
if error_message: message = error_message
if ignore_case: txt = str(txt).lower()
result = self._search_string(txt, ignore_case)
if not result: raise Exception(message)
logger.info('The string "' + txt + '" was found')
def page_should_not_contain_string(self, txt, ignore_case=False, error_message=None):
"""Search if a given string NOT exists on the mainframe screen.
The search is case sensitive, if you want ignore this you can pass the argument: ignore_case=${True}
and you can edit the raise exception message with error_message.
Example:
| Page Should Not Contain String | something |
| Page Should Not Contain String | someTHING | ignore_case=${True} |
| Page Should Not Contain String | something | error_message=New error message |
"""
message = 'The string "' + txt + '" was found'
if error_message: message = error_message
if ignore_case: txt = str(txt).lower()
result = self._search_string(txt, ignore_case)
if result: raise Exception(message)
def page_should_contain_any_string(self, list_string, ignore_case=False, error_message=None):
"""Search if one of the strings in a given list exists on the mainframe screen.
The search is case sensitive, if you want ignore this you can pass the argument: ignore_case=${True}
and you can edit the raise exception message with error_message.
Example:
| Page Should Contain Any String | ${list_of_string} |
| Page Should Contain Any String | ${list_of_string} | ignore_case=${True} |
| Page Should Contain Any String | ${list_of_string} | error_message=New error message |
"""
message = 'The strings "' + str(list_string) + '" was not found'
if error_message: message = error_message
if ignore_case: list_string = [item.lower() for item in list_string]
for string in list_string:
result = self._search_string(string, ignore_case)
if result: break
if not result: raise Exception(message)
def page_should_not_contain_any_string(self, list_string, ignore_case=False, error_message=None):
"""Fails if one or more of the strings in a given list exists on the mainframe screen. if one or more of the
string are found, the keyword will raise a exception.
The search is case sensitive, if you want ignore this you can pass the argument: ignore_case=${True}
and you can edit the raise exception message with error_message.
Example:
| Page Should Not Contain Any Strings | ${list_of_string} |
| Page Should Not Contain Any Strings | ${list_of_string} | ignore_case=${True} |
| Page Should Not Contain Any Strings | ${list_of_string} | error_message=New error message |
"""
self._compare_all_list_with_screen_text(list_string, ignore_case, error_message, should_match=False)
def page_should_contain_all_strings(self, list_string, ignore_case=False, error_message=None):
"""Search if all of the strings in a given list exists on the mainframe screen.
The search is case sensitive, if you want ignore this you can pass the argument: ignore_case=${True}
and you can edit the raise exception message with error_message.
Example:
| Page Should Contain All Strings | ${list_of_string} |
| Page Should Contain All Strings | ${list_of_string} | ignore_case=${True} |
| Page Should Contain All Strings | ${list_of_string} | error_message=New error message |
"""
self._compare_all_list_with_screen_text(list_string, ignore_case, error_message, should_match=True)
def page_should_not_contain_all_strings(self, list_string, ignore_case=False, error_message=None):
"""Fails if one of the strings in a given list exists on the mainframe screen. if one of the string
are found, the keyword will raise a exception.
The search is case sensitive, if you want ignore this you can pass the argument: ignore_case=${True}
and you can edit the raise exception message with error_message.
Example:
| Page Should Not Contain All Strings | ${list_of_string} |
| Page Should Not Contain All Strings | ${list_of_string} | ignore_case=${True} |
| Page Should Not Contain All Strings | ${list_of_string} | error_message=New error message |
"""
message = error_message
if ignore_case: list_string = [item.lower() for item in list_string]
for string in list_string:
result = self._search_string(string, ignore_case)
if result:
if message is None:
message = 'The string "' + string + '" was found'
raise Exception(message)
def page_should_contain_string_x_times(self, txt, number, ignore_case=False, error_message=None):
"""Search if the entered string appears the desired number of times on the mainframe screen.
The search is case sensitive, if you want ignore this you can pass the argument: ignore_case=${True} and you
can edit the raise exception message with error_message.
Example:
| Page Should Contain String X Times | something | 3 |
| Page Should Contain String X Times | someTHING | 3 | ignore_case=${True} |
| Page Should Contain String X Times | something | 3 | error_message=New error message |
"""
message = error_message
number = int(number)
all_screen = self._read_all_screen()
if ignore_case:
txt = str(txt).lower()
all_screen = str(all_screen).lower()
number_of_times = all_screen.count(txt)
if number_of_times != number:
if message is None:
message = 'The string "' + txt + '" was not found "' + str(number) + '" times, it appears "' \
+ str(number_of_times) + '" times'
raise Exception(message)
logger.info('The string "' + txt + '" was found "' + str(number) + '" times')
def page_should_match_regex(self, regex_pattern):
"""Fails if string does not match pattern as a regular expression. Regular expression check is
implemented using the Python [https://docs.python.org/2/library/re.html|re module]. Python's
regular expression syntax is derived from Perl, and it is thus also very similar to the syntax used,
for example, in Java, Ruby and .NET.
Backslash is an escape character in the test data, and possible backslashes in the pattern must
thus be escaped with another backslash (e.g. \\\d\\\w+).
"""
page_text = self._read_all_screen()
if not re.findall(regex_pattern, page_text, re.MULTILINE):
raise Exception('No matches found for "' + regex_pattern + '" pattern')
def page_should_not_match_regex(self, regex_pattern):
"""Fails if string does match pattern as a regular expression. Regular expression check is
implemented using the Python [https://docs.python.org/2/library/re.html|re module]. Python's
regular expression syntax is derived from Perl, and it is thus also very similar to the syntax used,
for example, in Java, Ruby and .NET.
Backslash is an escape character in the test data, and possible backslashes in the pattern must
thus be escaped with another backslash (e.g. \\\d\\\w+).
"""
page_text = self._read_all_screen()
if re.findall(regex_pattern, page_text, re.MULTILINE):
raise Exception('There are matches found for "' + regex_pattern + '" pattern')
def page_should_contain_match(self, txt, ignore_case=False, error_message=None):
"""Fails unless the given string matches the given pattern.
Pattern matching is similar as matching files in a shell, and it is always case-sensitive.
In the pattern, * matches to anything and ? matches to any single character.
Note that the entire screen is only considered a string for this keyword, so if you want to search
for the string "something" and it is somewhere other than at the beginning or end of the screen it
should be reported as follows: **something**
The search is case sensitive, if you want ignore this you can pass the argument: ignore_case=${True} and you
can edit the raise exception message with error_message.
Example:
| Page Should Contain Match | **something** |
| Page Should Contain Match | **so???hing** |
| Page Should Contain Match | **someTHING** | ignore_case=${True} |
| Page Should Contain Match | **something** | error_message=New error message |
"""
message = error_message
all_screen = self._read_all_screen()
if ignore_case:
txt = txt.lower()
all_screen = all_screen.lower()
matcher = Matcher(txt, caseless=False, spaceless=False)
result = matcher.match(all_screen)
if not result:
if message is None:
message = 'No matches found for "' + txt + '" pattern'
raise Exception(message)
def page_should_not_contain_match(self, txt, ignore_case=False, error_message=None):
"""Fails if the given string matches the given pattern.
Pattern matching is similar as matching files in a shell, and it is always case-sensitive.
In the pattern, * matches to anything and ? matches to any single character.
Note that the entire screen is only considered a string for this keyword, so if you want to search
for the string "something" and it is somewhere other than at the beginning or end of the screen it
should be reported as follows: **something**
The search is case sensitive, if you want ignore this you can pass the argument: ignore_case=${True} and you
can edit the raise exception message with error_message.
Example:
| Page Should Not Contain Match | **something** |
| Page Should Not Contain Match | **so???hing** |
| Page Should Not Contain Match | **someTHING** | ignore_case=${True} |
| Page Should Not Contain Match | **something** | error_message=New error message |
"""
message = error_message
all_screen = self._read_all_screen()
if ignore_case:
txt = txt.lower()
all_screen = all_screen.lower()
matcher = Matcher(txt, caseless=False, spaceless=False)
result = matcher.match(all_screen)
if result:
if message is None:
message = 'There are matches found for "' + txt + '" pattern'
raise Exception(message)
def _read_all_screen(self):
"""Read all the mainframe screen and return in a single string.
"""
full_text = ''
for ypos in range(24):
line = self.mf.string_get(ypos + 1, 1, 80)
for char in line:
if char:
full_text += char
return full_text
def _read_screen_lines(self):
"""Read all the mainframe screen and return screen lines as a list.
"""
screen_lines = {}
for row in range(24):
numb = int(row+1)
line = self.mf.string_get(numb, 1, 80)
screen_lines[numb] = line
return screen_lines
def _compare_all_list_with_screen_text(self, list_string, ignore_case, message, should_match):
if ignore_case: list_string = [item.lower() for item in list_string]
for string in list_string:
result = self._search_string(string, ignore_case)
if not should_match and result:
if message is None:
message = 'The string "' + string + '" was found'
raise Exception(message)
elif should_match and not result:
if message is None:
message = 'The string "' + string + '" was not found'
raise Exception(message)
def _convert_html_to_jpg(self, input_file, new_format="jpg"):
"""Converts html file generated originally by wc3270 to jpg image
"""
try:
import imgkit
except ModuleNotFoundError:
raise Exception("Please install 'imgkit' module and wkhtmltox executable: https://wkhtmltopdf.org/")
imgkit.from_file(input_file, ''.join([input_file.rstrip(".html"),".",new_format]))
def get_current_cursor_position(self, return_type="str"):
"""Gets current cursor position and return as a list of either integers or strings.
Robot framework create lists as a strings, but sometimes it's useful to get integers
"""
return self.mf.get_cursor_position(ret_type=return_type)
@staticmethod
def _check_limits(ypos, xpos):
"""Checks if the user has passed some coordinate y / x greater than that existing in the mainframe
"""
if int(ypos) > 24:
raise Exception('You have exceeded the y-axis limit of the mainframe screen')
if int(xpos) > 80:
raise Exception('You have exceeded the x-axis limit of the mainframe screen') | /robotframework_mainframe3270_extended-1.2.2-py3-none-any.whl/ExtendedMainframe3270/x3270.py | 0.581778 | 0.288181 | x3270.py | pypi |
import os
from datetime import timedelta
from typing import Any
from robot.api import logger
from robot.api.deco import keyword
from robot.libraries.BuiltIn import BuiltIn, RobotNotRunningError
from robot.utils import ConnectionCache
from robotlibcore import DynamicCore
from Mainframe3270.keywords import (
AssertionKeywords,
CommandKeywords,
ConnectionKeywords,
ReadWriteKeywords,
ScreenshotKeywords,
WaitAndTimeoutKeywords,
)
from Mainframe3270.py3270 import Emulator
from Mainframe3270.utils import convert_timeout
from Mainframe3270.version import VERSION
class Mainframe3270(DynamicCore):
r"""
Mainframe3270 is a library for Robot Framework based on the [https://pypi.org/project/py3270/|py3270 project],
a Python interface to x3270, an IBM 3270 terminal emulator. It provides an API to a x3270 or s3270 subprocess.
= Installation =
To use this library, you must have the [http://x3270.bgp.nu/download.html|x3270 project] installed and included in your PATH.
On Windows, you can install wc3270 and add "C:\Program Files\wc3270" to your PATH in the Environment Variables.
= Example =
| *** Settings ***
| Library Mainframe3270
|
| *** Test Cases ***
| Example
| Open Connection Hostname LUname
| Change Wait Time 0.4 seconds
| Change Wait Time After Write 0.4 seconds
| Set Screenshot Folder C:\\Temp\\IMG
| ${value} Read 3 10 17
| Page Should Contain String ENTER APPLICATION
| Wait Field Detected
| Write Bare applicationname
| Send Enter
| Take Screenshot
| Close Connection
= Concurrent Connections =
The library allows you to have multiple sessions open at the same time. Each session opened by `Open Connection` or
`Open Connection From Session File` will return an index that can be used to reference it
when switching between connections using the `Switch Connection` keyword. The indices start from 1 and are incremented
by each newly opened connection. Calling `Close All Connection` will reset the index counter to 1.
Additionally, you can provide aliases to your sessions when opening a connection, and switch the connection
using that alias instead of the index.
It is worth noting that the connection that was opened last is always the current connection.
| *** Test Cases ***
| Concurrent Sessions
| ${index_1} Open Connection Hostname # this is the current connection
| Write Bare First String
| ${index_2} Open Connection Hostname alias=second # 'second' is now the current connection
| Write Bare Second String
| Switch Connection ${index_1} # swtiching the connection using the index
| Page Should Contain String First String
| Switch Connection second # switchting the ocnnection using the alias
| Page Should Contain String Second String
| [Teardown] Close All Connections
= Changing the emulator model (experimental) =
By default, the library uses the emulator model 2, which is 24 rows by 80 columns.
You can, however, change the model globally when `importing` the library with the `model` argument
set to the model of your choice.
The basic models are 2, 3, 4, and 5. These models differ in their screen size as illustrated in this table:
| *3270 Model* | *Rows* | *Columns* |
| 2 | 24 | 80 |
| 3 | 32 | 80 |
| 4 | 43 | 80 |
| 5 | 27 | 132 |
They can be combined with the 3278 (monochrome green-screen) or 3279 (color) prefix, e.g. 3278-2 or 3279-2.
In addition to that, there is a -E suffix that indicates support for the [https://x3270.miraheze.org/wiki/3270_data_stream_protocol#extended|x3270 extended data stream].
You can find more information on emulator models on the [https://x3270.miraheze.org/wiki/3270_models|x3270 wiki].
In addition to setting the model globally, you can also set the model on the individual emulator basis by providing the model arguments to the `Open Connection`
or `Open Connection From Session File` keywords.
Here is an example for setting the emulator in the Open Connection keyword:
| Open Connection pub400.com extra_args=["-xrm", "*model: 4"]
And this is how you would set the emulator model in the Open Connection From Session File keyword:
| Open Connection From Session File /path/to/session/file
Where the content of the session file would be
| *hostname: pub400.com
| *model: 4
Note that this is an experimental feature, so not all models might work as expected.
"""
ROBOT_LIBRARY_SCOPE = "TEST SUITE"
ROBOT_LIBRARY_VERSION = VERSION
def __init__(
self,
visible: bool = True,
timeout: timedelta = timedelta(seconds=30),
wait_time: timedelta = timedelta(milliseconds=500),
wait_time_after_write: timedelta = timedelta(seconds=0),
img_folder: str = ".",
run_on_failure_keyword: str = "Take Screenshot",
model: str = "2",
) -> None:
"""
By default the emulator visibility is set to visible=True.
In this case test cases are executed using wc3270 (Windows) or x3270 (Linux/MacOSX).
You can change this by setting visible=False.
Then test cases are run using ws3720 (Windows) or s3270 (Linux/MacOS).
This is useful when test cases are run in a CI/CD-pipeline and there is no need for a graphical user interface.
Timeout, waits and screenshot folder are set on library import as shown above.
However, they can be changed during runtime. To modify the ``wait_time``, see `Change Wait Time`,
to modify the ``img_folder``, see `Set Screenshot Folder`,
and to modify the ``timeout``, see the `Change Timeout` keyword. Timeouts support all available
Robot Framework [https://robotframework.org/robotframework/latest/RobotFrameworkUserGuide.html#time-format|time formats].
By default, Mainframe3270 will take a screenshot on failure.
You can overwrite this to run any other keyword by setting the ``run_on_failure_keyword`` option.
If you pass ``None`` to this argument, no keyword will be run.
To change the ``run_on_failure_keyword`` during runtime, see `Register Run On Failure Keyword`.
"""
self.visible = visible
self.timeout = convert_timeout(timeout)
self.wait_time = convert_timeout(wait_time)
self.wait_time_after_write = convert_timeout(wait_time_after_write)
self.img_folder = img_folder
self._running_on_failure_keyword = False
self.register_run_on_failure_keyword(run_on_failure_keyword)
self.model = model
self.cache = ConnectionCache()
# When generating the library documentation with libdoc, BuiltIn.get_variable_value throws
# a RobotNotRunningError. Therefore, we catch it here to be able to generate the documentation.
try:
self.output_folder = BuiltIn().get_variable_value("${OUTPUT DIR}")
except RobotNotRunningError:
self.output_folder = os.getcwd()
libraries = [
AssertionKeywords(self),
CommandKeywords(self),
ConnectionKeywords(self),
ReadWriteKeywords(self),
ScreenshotKeywords(self),
WaitAndTimeoutKeywords(self),
]
DynamicCore.__init__(self, libraries)
@property
def mf(self) -> Emulator:
return self.cache.current
@keyword("Register Run On Failure Keyword")
def register_run_on_failure_keyword(self, keyword: str) -> None:
"""
This keyword lets you change the keyword that runs on failure during test execution.
The default is `Take Screenshot`, which is set on library import.
You can set ``None`` to this keyword, if you do not want to run any keyword on failure.
Example:
| Register Run On Failure Keyword | None | # no keyword is run on failure |
| Register Run On Failure Keyword | Custom Keyword | # Custom Keyword is run on failure |
"""
if keyword.lower() == "none":
self.run_on_failure_keyword = None
else:
self.run_on_failure_keyword = keyword
def run_keyword(self, name: str, args: list, kwargs: dict) -> Any:
try:
return DynamicCore.run_keyword(self, name, args, kwargs)
except Exception:
self.run_on_failure()
raise
def run_on_failure(self) -> None:
if self._running_on_failure_keyword or not self.run_on_failure_keyword:
return
try:
self._running_on_failure_keyword = True
BuiltIn().run_keyword(self.run_on_failure_keyword)
except Exception as error:
logger.warn(f"Keyword '{self.run_on_failure_keyword}' could not be run on failure: {error}")
finally:
self._running_on_failure_keyword = False | /robotframework-mainframe3270-4.0.tar.gz/robotframework-mainframe3270-4.0/Mainframe3270/__init__.py | 0.803791 | 0.393909 | __init__.py | pypi |
import time
from typing import Any, Optional
from robot.api.deco import keyword
from Mainframe3270.librarycomponent import LibraryComponent
class ReadWriteKeywords(LibraryComponent):
@keyword("Read")
def read(self, ypos: int, xpos: int, length: int) -> str:
"""Get a string of ``length`` at screen coordinates ``ypos`` / ``xpos``.
Coordinates are 1 based, as listed in the status area of the terminal.
Example for read a string in the position y=8 / x=10 of a length 15:
| ${value} | Read | 8 | 10 | 15 |
"""
return self.mf.string_get(ypos, xpos, length)
@keyword("Read All Screen")
def read_all_screen(self) -> str:
"""Read the current screen and returns all content in one string.
This is useful if your automation scripts should take different routes depending
on a message shown on the screen.
Example:
| ${screen} | Read All Screen |
| IF | 'certain text' in '''${screen}''' |
| | Do Something |
| ELSE | |
| | Do Something Else |
| END | |
"""
return self.mf.read_all_screen()
@keyword("Write")
def write(self, txt: str) -> None:
"""Send a string *and Enter* to the screen at the current cursor location.
Example:
| Write | something |
"""
self._write(txt, enter=True)
@keyword("Write Bare")
def write_bare(self, txt: str) -> None:
"""Send only the string to the screen at the current cursor location.
Example:
| Write Bare | something |
"""
self._write(txt)
@keyword("Write In Position")
def write_in_position(self, txt: str, ypos: int, xpos: int) -> None:
"""Send a string *and Enter* to the screen at screen coordinates ``ypos`` / ``xpos``.
Coordinates are 1 based, as listed in the status area of the
terminal.
Example:
| Write in Position | something | 9 | 11 |
"""
self._write(txt, ypos, xpos, enter=True)
@keyword("Write Bare In Position")
def write_bare_in_position(self, txt: str, ypos: int, xpos: int):
"""Send only the string to the screen at screen coordinates ``ypos`` / ``xpos``.
Coordinates are 1 based, as listed in the status area of the
terminal.
Example:
| Write Bare in Position | something | 9 | 11 |
"""
self._write(txt, ypos, xpos)
def _write(
self,
txt: Any,
ypos: Optional[int] = None,
xpos: Optional[int] = None,
enter: bool = False,
) -> None:
txt = txt.encode("unicode_escape")
if ypos and xpos:
self.mf.send_string(txt, ypos, xpos)
else:
self.mf.send_string(txt)
time.sleep(self.wait_time_after_write)
if enter:
self.mf.send_enter()
time.sleep(self.wait_time) | /robotframework-mainframe3270-4.0.tar.gz/robotframework-mainframe3270-4.0/Mainframe3270/keywords/read_write.py | 0.831656 | 0.41745 | read_write.py | pypi |
import time
from datetime import timedelta
from robot.api.deco import keyword
from robot.utils import secs_to_timestr
from Mainframe3270.librarycomponent import LibraryComponent
from Mainframe3270.utils import convert_timeout
class WaitAndTimeoutKeywords(LibraryComponent):
@keyword("Change Timeout")
def change_timeout(self, seconds: timedelta) -> None:
"""
Change the timeout for connection in seconds.
Example:
| Change Timeout | 3 seconds |
"""
self.timeout = convert_timeout(seconds)
@keyword("Change Wait Time")
def change_wait_time(self, wait_time: timedelta) -> None:
"""To give time for the mainframe screen to be "drawn" and receive the next commands, a "wait time" has been
created, which by default is set to 0.5 seconds. This is a sleep applied AFTER the following keywords:
- `Execute Command`
- `Send Enter`
- `Send PF`
- `Write`
- `Write in position`
If you want to change this value, just use this keyword passing the time in seconds.
Example:
| Change Wait Time | 0.5 |
| Change Wait Time | 200 milliseconds |
| Change Wait Time | 0:00:01.500 |
"""
self.wait_time = convert_timeout(wait_time)
@keyword("Change Wait Time After Write")
def change_wait_time_after_write(self, wait_time_after_write: timedelta) -> None:
"""To give the user time to see what is happening inside the mainframe, a "wait time after write" has
been created, which by default is set to 0 seconds. This is a sleep applied AFTER sending a string in these
keywords:
- `Write`
- `Write Bare`
- `Write in position`
- `Write Bare in position`
If you want to change this value, just use this keyword passing the time in seconds.
Note: This keyword is useful for debug purpose
Example:
| Change Wait Time After Write | 1 |
| Change Wait Time After Write | 0.5 seconds |
| Change Wait Time After Write | 0:00:02 |
"""
self.wait_time_after_write = convert_timeout(wait_time_after_write)
@keyword("Wait Field Detected")
def wait_field_detected(self) -> None:
"""Wait until the screen is ready, the cursor has been positioned
on a modifiable field, and the keyboard is unlocked.
Sometimes the server will "unlock" the keyboard but the screen
will not yet be ready. In that case, an attempt to read or write to the
screen will result in a 'E' keyboard status because we tried to read from
a screen that is not ready yet.
Using this method tells the client to wait until a field is
detected and the cursor has been positioned on it.
"""
self.mf.wait_for_field()
@keyword("Wait Until String")
def wait_until_string(self, txt: str, timeout: timedelta = timedelta(seconds=5)) -> str:
"""Wait until a string exists on the mainframe screen to perform the next step. If the string does not appear
in 5 seconds, the keyword will raise an exception. You can define a different timeout.
Example:
| Wait Until String | something |
| Wait Until String | something | 10 |
| Wait Until String | something | 15 s |
| Wait Until String | something | 0:00:15 |
"""
timeout = convert_timeout(timeout)
max_time = time.time() + timeout # type: ignore
while time.time() < max_time:
result = self.mf.search_string(str(txt))
if result:
return txt
raise Exception(f'String "{txt}" not found in {secs_to_timestr(timeout)}') | /robotframework-mainframe3270-4.0.tar.gz/robotframework-mainframe3270-4.0/Mainframe3270/keywords/wait_and_timeout.py | 0.726911 | 0.345133 | wait_and_timeout.py | pypi |
import time
from typing import Optional
from robot.api.deco import keyword
from Mainframe3270.librarycomponent import LibraryComponent
class CommandKeywords(LibraryComponent):
@keyword("Execute Command")
def execute_command(self, cmd: str) -> None:
"""Execute a [http://x3270.bgp.nu/wc3270-man.html#Actions|x3270 command].
Example:
| Execute Command | Enter |
| Execute Command | Home |
| Execute Command | Tab |
| Execute Command | PF(1) |
"""
self.mf.exec_command(cmd.encode("utf-8"))
time.sleep(self.wait_time)
@keyword("Delete Char")
def delete_char(self, ypos: Optional[int] = None, xpos: Optional[int] = None) -> None:
"""Delete the character under the cursor. If you want to delete a character that is in
another position, simply pass the coordinates ``ypos`` / ``xpos``.
Coordinates are 1 based, as listed in the status area of the terminal.
Example:
| Delete Char |
| Delete Char | ypos=9 | xpos=25 |
"""
if ypos and xpos:
self.mf.move_to(ypos, xpos)
self.mf.exec_command(b"Delete")
@keyword("Delete Field")
def delete_field(self, ypos: Optional[int] = None, xpos: Optional[int] = None) -> None:
"""Delete the entire content of a field at the current cursor location and positions
the cursor at beginning of field. If you want to delete a field that is in
another position, simply pass the coordinates ``ypos`` / ``xpos`` of any part in the field.
Coordinates are 1 based, as listed in the status area of the terminal.
Example:
| Delete Field |
| Delete Field | ypos=12 | xpos=6 |
"""
if ypos and xpos:
self.mf.move_to(ypos, xpos)
self.mf.delete_field()
@keyword("Send Enter")
def send_enter(self) -> None:
"""
Send an Enter to the screen.
"""
self.mf.send_enter()
time.sleep(self.wait_time)
@keyword("Move Next Field")
def move_next_field(self) -> None:
"""
Move the cursor to the next input field. Equivalent to pressing the Tab key.
"""
self.mf.exec_command(b"Tab")
@keyword("Move Previous Field")
def move_previous_field(self) -> None:
"""
Move the cursor to the previous input field. Equivalent to pressing the Shift+Tab keys.
"""
self.mf.exec_command(b"BackTab")
@keyword("Send PF")
def send_pf(self, pf: str) -> None:
"""Send a Program Function to the screen.
Example:
| Send PF | 3 |
"""
self.mf.exec_command("PF({0})".format(pf).encode("utf-8"))
time.sleep(self.wait_time) | /robotframework-mainframe3270-4.0.tar.gz/robotframework-mainframe3270-4.0/Mainframe3270/keywords/commands.py | 0.829871 | 0.423577 | commands.py | pypi |
import os
import re
import shlex
from os import name as os_name
from typing import List, Optional, Union
from robot.api import logger
from robot.api.deco import keyword
from Mainframe3270.librarycomponent import LibraryComponent
from Mainframe3270.py3270 import Emulator
class ConnectionKeywords(LibraryComponent):
@keyword("Open Connection")
def open_connection(
self,
host: str,
LU: Optional[str] = None,
port: int = 23,
extra_args: Optional[Union[List[str], os.PathLike]] = None,
alias: Optional[str] = None,
) -> int:
"""Create a connection to an IBM3270 mainframe with the default port 23.
To establish a connection, only the hostname is required.
Optional parameters include logical unit name (LU) and port.
Additional configuration data can be provided through the `extra_args` parameter.
`extra_args` accepts either a list or a path
to a file containing [https://x3270.miraheze.org/wiki/Category:Command-line_options|x3270 command line options].
Entries in the argfile can be on one line or multiple lines. Lines starting with "#" are considered comments.
Arguments containing whitespace must be enclosed in single or double quotes.
| # example_argfile_oneline.txt
| -accepthostname myhost.com
| # example_argfile_multiline.txt
| -xrm "wc3270.acceptHostname: myhost.com"
| # this is a comment
| -charset french
| -port 992
Please ensure that the arguments provided are available for your specific x3270 application and version.
Refer to the [https://x3270.miraheze.org/wiki/Wc3270/Command-line_options|wc3270 command line options]
for a subset of available options.
Note: If you specify the port with the `-port` command-line option in `extra_args`
(or use the -xrm resource command for it), it will take precedence over the `port` argument provided
in the `Open Connection` keyword.
This keyword returns the index of the opened connection, which can be used to reference the connection
when switching between connections using the `Switch Connection` keyword. For more information on opening
and switching between multiple connections, please refer to the `Concurrent Connections` section.
Example:
| Open Connection | Hostname |
| Open Connection | Hostname | LU=LUname |
| Open Connection | Hostname | port=992 |
| @{extra_args} | Create List | -accepthostname | myhost.com | -cafile | ${CURDIR}/cafile.crt |
| Append To List | ${extra_args} | -port | 992 |
| Open Connection | Hostname | extra_args=${extra_args} |
| Open Connection | Hostname | extra_args=${CURDIR}/argfile.txt |
| Open Connection | Hostname | alias=my_first_connection |
"""
extra_args = self._process_args(extra_args)
model = self._get_model_from_list_or_file(extra_args)
connection = Emulator(self.visible, self.timeout, extra_args, model or self.model)
host_string = f"{LU}@{host}" if LU else host
if self._port_in_extra_args(extra_args):
if port != 23:
logger.warn(
"The connection port has been specified both in the `port` argument and in `extra_args`. "
"The port specified in `extra_args` will take precedence over the `port` argument. "
"To avoid this warning, you can either remove the port command-line option from `extra_args`, "
"or leave the `port` argument at its default value of 23."
)
connection.connect(host_string)
else:
connection.connect(f"{host_string}:{port}")
return self.cache.register(connection, alias)
@staticmethod
def _process_args(args) -> list:
processed_args = []
if not args:
return []
elif isinstance(args, list):
processed_args = args
elif isinstance(args, os.PathLike) or isinstance(args, str):
with open(args) as file:
for line in file:
if line.lstrip().startswith("#"):
continue
for arg in shlex.split(line):
processed_args.append(arg)
return processed_args
@staticmethod
def _get_model_from_list_or_file(list_or_file):
pattern = re.compile(r"[wcxs3270.*]+model:\s*([327892345E-]+)")
match = None
if isinstance(list_or_file, list):
match = pattern.findall(str(list_or_file))
elif isinstance(list_or_file, os.PathLike) or isinstance(list_or_file, str):
with open(list_or_file) as file:
match = pattern.findall(file.read())
return None if not match else match[-1]
@staticmethod
def _port_in_extra_args(args) -> bool:
if not args:
return False
pattern = re.compile(r"[wcxs3270.*-]+port[:]{0,1}")
if pattern.search(str(args)):
return True
return False
@keyword("Open Connection From Session File")
def open_connection_from_session_file(self, session_file: os.PathLike, alias: Optional[str] = None) -> int:
"""Create a connection to an IBM3270 mainframe
using a [https://x3270.miraheze.org/wiki/Session_file|session file].
The session file contains [https://x3270.miraheze.org/wiki/Category:Resources|resources (settings)]
for a specific host session.
The only mandatory setting required to establish the connection
is the [https://x3270.miraheze.org/wiki/Hostname_resource|hostname resource].
This keyword is an alternative to `Open Connection`. Please note that the Robot-Framework-Mainframe-3270-Library
currently only supports model "2". Specifying any other model will result in a failure.
For more information on session file syntax and detailed examples, please
consult the [https://x3270.miraheze.org/wiki/Session_file|x3270 wiki].
This keyword returns the index of the opened connection, which can be used to reference the connection
when switching between connections using the `Switch Connection` keyword. For more information on opening and
switching between multiple connections, please refer to the `Concurrent Connections` section.
Example:
| Open Connection From Session File | ${CURDIR}/session.wc3270 |
where the content of `session.wc3270` is:
| wc3270.hostname: myhost.com:23
| wc3270.model: 2
"""
self._check_session_file_extension(session_file)
self._check_contains_hostname(session_file)
model = self._get_model_from_list_or_file(session_file)
if os_name == "nt" and self.visible:
connection = Emulator(self.visible, self.timeout, model=model or self.model)
connection.connect(str(session_file))
else:
connection = Emulator(self.visible, self.timeout, [str(session_file)], model or self.model)
return self.cache.register(connection, alias)
def _check_session_file_extension(self, session_file):
file_extension = str(session_file).rsplit(".")[-1]
expected_extensions = {
("nt", True): "wc3270",
("nt", False): "ws3270",
("posix", True): "x3270",
("posix", False): "s3270",
}
expected_extension = expected_extensions.get((os_name, self.visible))
if file_extension != expected_extension:
raise ValueError(
f"Based on the emulator that you are using, "
f'the session file extension has to be ".{expected_extension}", '
f'but it was ".{file_extension}"'
)
@staticmethod
def _check_contains_hostname(session_file):
with open(session_file) as file:
if "hostname:" not in file.read():
raise ValueError(
"Your session file needs to specify the hostname resource "
"to set up the connection. "
"An example for wc3270 looks like this: \n"
"wc3270.hostname: myhost.com\n"
)
@keyword("Switch Connection")
def switch_connection(self, alias_or_index: Union[str, int]):
"""Switch the current connection to the one identified by index or alias. Indices are returned from
and aliases can be optionally provided to the `Open Connection` and `Open Connection From Session File`
keywords.
For more information on opening and switching between multiple connections,
please refer to the `Concurrent Connections` section.
Examples:
| Open Connection | Hostname | alias=first |
| Open Connection | Hostname | alias=second | # second is now the current connection |
| Switch Connection | first | | # first is now the current connection |
"""
self.cache.switch(alias_or_index)
@keyword("Close Connection")
def close_connection(self) -> None:
"""
Close the current connection.
"""
self.mf.terminate()
@keyword("Close All Connections")
def close_all_connections(self) -> None:
"""
Close all currently opened connections and reset the index counter to 1.
"""
self.cache.close_all("terminate") | /robotframework-mainframe3270-4.0.tar.gz/robotframework-mainframe3270-4.0/Mainframe3270/keywords/connection.py | 0.79799 | 0.314735 | connection.py | pypi |
import asyncio
from mitmproxy import options
from mitmproxy.tools import dump
from robot.api.deco import library, not_keyword
from robot.api import logger
from .version import VERSION
from .async_loop_thread import AsyncLoopThread
from .request_logger import RequestLogger
@library(scope='SUITE', version=VERSION, auto_keywords=True)
class MitmLibrary:
"""MitmLibrary is a library that implements the mitmproxy package into
robotframework. Mitmproxy can be used to listen, intercept and manipulate network
traffic. This enables us to manipulate our traffic on request level, without needing
to build stubs or mocks.
= Why use Mitm? =
Mitm allows manipulation on single browser instance, by using a proxy. It does not
require you to set up stubs or mocks that might influence the entire application at
once, also resulting in stubbed/mocked behaviour while manual testing.
Examples where Mitm is useful:
- When running in parallel, if you do not want your other instances to be influenced.
- Manipulate the response of a request to see how the front end handles it for a integrated service that is always up.
- Or if stubs or mocks are not available (yet).
- Or if their behaviour is not sufficient.
= Mitm Certificates =
To test with SSL verification, you will need to set up the certificates related to
mitm. Follow the guide on the
[https://docs.mitmproxy.org/stable/concepts-certificates/|Mitm website]
"""
@not_keyword
def __init__(self):
self.proxy_master = ""
self.request_logger = ""
self.loop_handler = AsyncLoopThread()
self.loop_handler.start()
async def start_proxy(self, listen_host='0.0.0.0', listen_port=8080,
certificates_directory=None, ssl_insecure=False):
"""Starts a proxy at the given host and port. Default host is ``localhost``.
It is possible to add ssl-verification by loading the mitm certificates.
See the `Mitm Certificates` section for more information."""
opts = options.Options(listen_host=listen_host, listen_port=listen_port,
confdir=certificates_directory, ssl_insecure=ssl_insecure)
self.proxy_master = dump.DumpMaster(
opts,
with_termlog=False,
with_dumper=False,
)
self.request_logger = RequestLogger(self.proxy_master)
self.proxy_master.addons.add(self.request_logger)
asyncio.run_coroutine_threadsafe(self.proxy_master.run(),
self.loop_handler.loop)
async def stop_proxy(self):
"""Stops the proxy."""
self.proxy_master.shutdown()
def add_to_blocklist(self, url):
"""Adds a (partial) url to the list of blocked urls. If the url is found in any
part of the pretty_url of the host it will be blocked."""
self.request_logger.add_to_blocklist(url)
def add_custom_response(self, alias, url, overwrite_headers=None,
overwrite_body=None, status_code=200):
"""Adds a custom response based on a (partial) url to the list of blocked urls.
If the (partial) url is found in any part of the pretty_url of the it's response
will be changed ."""
self.request_logger.add_custom_response_item(alias, url, overwrite_headers,
overwrite_body, status_code)
def add_response_delay(self, alias, url, delay):
self.request_logger.add_response_delay_item(alias,url,delay)
def add_custom_response_status_code(self, alias, url, status_code=200):
"""Adds a custom response status_code to each request where the url contains
the (partial) url of the custom status_code.
Often used status codes:
- 200. Success
- 401. Unauthorized
- 403. Forbidden
- 404. Not found
- 418. I'm a Teapot
- 500. Internal Server error
For more information, visit: https://developer.mozilla.org/en-US/docs/Web/HTTP/Status
"""
def clear_all_proxy_items(self):
"""Removes all custom responses, blocked urls, etc. Basically, this acts as
restarting the proxy, without actually restarting the proxy."""
self.request_logger.clear_all_proxy_items()
def log_blocked_urls(self):
"""Logs the current list of items that will result in a block, if the url is
found in the pretty_url of a host."""
block_items = ", ".join(self.request_logger.block_list)
logger.info(f"URLs containing any of the following in their url will "
f"be blocked: {block_items}.")
def log_delayed_responses(self):
delayed_items = ", ".join([response.url for response in self.request_logger.response_delays_list])
logger.info(f"URLs containing any of the following in their url will "
f"be delayed: {delayed_items}.")
def log_custom_response_items(self):
"""Logs the current list of urls that will result in a custom response, if the
url is found in the pretty_url of a host.
Will also log the custom response items themselves."""
custom_responses = ", ".join([response.url for response in self.request_logger.custom_response_list])
logger.info(f"The following custom responses are currently loaded: "
f"{custom_responses}.")
for response in self.request_logger.custom_response_list:
logger.info(f"{response}")
def log_custom_status_items(self):
"""Logs the current list of urls that will result in a custom response, if the
url is found in the pretty_url of a host.
Will also log the custom response items themselves."""
custom_responses = ", ".join(self.request_logger.custom_response_status)
logger.info(f"The following custom responses are currently loaded: "
f"{custom_responses}.")
for response in self.request_logger.custom_response_status:
logger.info(f"{response}")
def remove_url_from_blocklist(self, url):
"""Removes a custom (partial) url from the list."""
self.request_logger.remove_from_blocklist(url)
def remove_custom_response(self, alias):
"""Removes a custom response from the list, based on it's alias."""
self.request_logger.remove_custom_response_item(alias)
def remove_custom_status_code(self, alias):
"""Removes a custom status_code from the list."""
self.request_logger.remove_custom_status(alias) | /robotframework-mitmlibrary-0.1.1.tar.gz/robotframework-mitmlibrary-0.1.1/MitmLibrary/__init__.py | 0.762778 | 0.249139 | __init__.py | pypi |
import os
from keywords import *
from version import VERSION
from utils import LibraryListener
from robot.libraries.BuiltIn import BuiltIn
__version__ = VERSION
class AppiumLibrary(
_LoggingKeywords,
_RunOnFailureKeywords,
_ElementKeywords,
_ScreenshotKeywords,
_ApplicationManagementKeywords,
_WaitingKeywords,
_TouchKeywords,
_KeyeventKeywords,
_AndroidUtilsKeywords,
_MobileKeywords,
):
"""AppiumLibrary is a App testing library for Robot Framework.
*Locating elements*
All keywords in AppiumLibrary that need to find an element on the app
take an argument, `locator`. By default, when a locator value is provided,
it is matched against the key attributes of the particular element type.
For example, `id` and `name` are key attributes to all elements, and
locating elements is easy using just the `id` as a `locator`. For example:
``Click Element my_element``
Appium additionally supports some of the _Mobile JSON Wire Protocol_
(https://code.google.com/p/selenium/source/browse/spec-draft.md?repo=mobile) locator strategies
It is also possible to specify the approach AppiumLibrary should take
to find an element by specifying a lookup strategy with a locator
prefix. Supported strategies are:
| *Strategy* | *Example* | *Description* |
| identifier | Click Element `|` identifier=my_element | Matches by @id or @name attribute |
| id | Click Element `|` id=my_element | Matches by @id attribute |
| name | Click Element `|` name=my_element | Matches by @name attribute |
| xpath | Click Element `|` xpath=//UIATableView/UIATableCell/UIAButton | Matches with arbitrary XPath |
| link | Click Element `|` link=My Link | Webview only Matches anchor elements by their link text |
| partial link | Click Element `|` partial link=My Link | Webveiw only Matches anchor elements by their partial link text |
| class | Click Element `|` class=UIAPickerWheel | Matches by class |
| accessibility_id | Click Element `|` accessibility_id=t | Accessibility options utilize. |
| android | Click Element `|` android=new UiSelector().description('Apps') | Matches by Android UI Automator |
| ios | Click Element `|` ios=.buttons().withName('Apps') | Matches by iOS UI Automation |
| css | Click Element `|` css=.green_button | Matches by css in webview |
"""
ROBOT_LIBRARY_SCOPE = 'GLOBAL'
ROBOT_LIBRARY_VERSION = VERSION
def __init__(self, timeout=5, run_on_failure='Capture Page Screenshot',mobile_gif='False'):
"""AppiumLibrary can be imported with optional arguments.
`timeout` is the default timeout used to wait for all waiting actions.
It can be later set with `Set Appium Timeout`.
`run_on_failure` specifies the name of a keyword (from any available
libraries) to execute when a AppiumLibrary keyword fails. By default
`Capture Page Screenshot` will be used to take a screenshot of the current page.
Using the value `No Operation` will disable this feature altogether. See
`Register Keyword To Run On Failure` keyword for more information about this
functionality.
`mobile_gif` Enable/Disable gif generation for each Test Case, Defalut setting is False/FALSE
Examples:
| Library | AppiumLibrary | 10 | # Sets default timeout to 10 seconds |
| Library | AppiumLibrary | timeout=10 | run_on_failure=No Operation | # Sets default timeout to 10 seconds and does nothing on failure |
| Library | AppiumLibrary | timeout=10 | mobile_gif=TRUE | # Sets default timeout to 10 seconds and enable gif file generation for each case |
"""
for base in AppiumLibrary.__bases__:
base.__init__(self)
self.set_appium_timeout(timeout)
self.register_keyword_to_run_on_failure(run_on_failure)
self.Mobile_Set_Gif_Flag(mobile_gif)
if self._mobile_gen_gif == True:
self.ROBOT_LIBRARY_LISTENER = LibraryListener() | /robotframework-mobilelibrary-2.0.5.tar.gz/robotframework-mobilelibrary-2.0.5/src/AppiumLibrary/__init__.py | 0.73173 | 0.383006 | __init__.py | pypi |
from appium.webdriver.common.touch_action import TouchAction
from AppiumLibrary.locators import ElementFinder
from keywordgroup import KeywordGroup
class _TouchKeywords(KeywordGroup):
def __init__(self):
self._element_finder = ElementFinder()
# Public, element lookups
def zoom(self, locator, percent="200%", steps=1):
"""
Zooms in on an element a certain amount.
"""
driver = self._current_application()
element = self._element_find(locator, True, True)
driver.zoom(element=element, percent=percent, steps=steps)
def pinch(self, locator, percent="200%", steps=1):
"""
Pinch in on an element a certain amount.
"""
driver = self._current_application()
element = self._element_find(locator, True, True)
driver.pinch(element=element, percent=percent, steps=steps)
def swipe(self, start_x, start_y, end_x, end_y, duration=1000):
"""
Swipe from one point to another point, for an optional duration.
"""
driver = self._current_application()
driver.swipe(start_x, start_y, end_x, end_y, duration)
def scroll(self, start_locator, end_locator):
"""
Scrolls from one element to another
Key attributes for arbitrary elements are `id` and `name`. See
`introduction` for details about locating elements.
"""
el1 = self._element_find(start_locator, True, True)
el2 = self._element_find(end_locator, True, True)
driver = self._current_application()
driver.scroll(el1, el2)
def scroll_to(self, locator):
"""Scrolls to element"""
driver = self._current_application()
element = self._element_find(locator, True, True)
driver.execute_script("mobile: scrollTo", {"element": element.id})
def long_press(self, locator, index=1):
""" Long press the element
:param locator: locate method
:param index: select the n th element
"""
driver = self._current_application()
element = self._get_selected_element(locator, index)
long_press = TouchAction(driver).long_press(element)
long_press.perform()
def tap(self, locator):
""" Tap on element """
driver = self._current_application()
el = self._element_find(locator, True, True)
action = TouchAction(driver)
action.tap(el).perform()
def click_a_point(self, x=0, y=0):
""" Click on a point"""
self._info("Clicking on a point (%s,%s)." % (x,y))
driver = self._current_application()
action = TouchAction(driver)
try:
#此处添加了release,想完成click后释放的操作
action.press(x=float(x), y=float(y)).release().perform()
except:
assert False, "Can't click on a point at (%s,%s)" % (x,y) | /robotframework-mobilelibrary-2.0.5.tar.gz/robotframework-mobilelibrary-2.0.5/src/AppiumLibrary/keywords/_touch.py | 0.737725 | 0.248477 | _touch.py | pypi |
import os
import robot
from keywordgroup import KeywordGroup
class _ScreenshotKeywords(KeywordGroup):
def __init__(self):
self._screenshot_index = 0
self._gif_index=0
# Public
def capture_page_screenshot(self, filename=None):
"""Takes a screenshot of the current page and embeds it into the log.
`filename` argument specifies the name of the file to write the
screenshot into. If no `filename` is given, the screenshot is saved into file
`appium-screenshot-<counter>.png` under the directory where
the Robot Framework log file is written into. The `filename` is
also considered relative to the same directory, if it is not
given in absolute format.
`css` can be used to modify how the screenshot is taken. By default
the bakground color is changed to avoid possible problems with
background leaking when the page layout is somehow broken.
"""
path, link = self._get_screenshot_paths(filename)
if hasattr(self._current_application(), 'get_screenshot_as_file'):
self._current_application().get_screenshot_as_file(path)
else:
self._current_application().save_screenshot(path)
# Image is shown on its own row and thus prev row is closed on purpose
self._html('</td></tr><tr><td colspan="3"><a href="%s">'
'<img src="%s" width="800px"></a>' % (link, link))
def capture_page_screenshot_without_html_log(self, filename=None):
"""Takes a screenshot of the current page and >do not< embeds it into the log.
`filename` argument specifies the name of the file to write the
screenshot into. If no `filename` is given, the screenshot is saved into file
`appium-screenshot-<counter>.png` under the directory where
the Robot Framework log file is written into. The `filename` is
also considered relative to the same directory, if it is not
given in absolute format.
`css` can be used to modify how the screenshot is taken. By default
the bakground color is changed to avoid possible problems with
background leaking when the page layout is somehow broken.
"""
path, link = self._get_gif_screenshot_paths(filename)
if hasattr(self._current_application(), 'get_screenshot_as_file'):
self._current_application().get_screenshot_as_file(path)
else:
self._current_application().save_screenshot(path)
# Private
def _get_screenshot_paths(self, filename):
if not filename:
self._screenshot_index += 1
filename = 'appium-screenshot-%d.png' % self._screenshot_index
else:
filename = filename.replace('/', os.sep)
logdir = self._get_log_dir()
path = os.path.join(logdir, filename)
link = robot.utils.get_link_path(path, logdir)
return path, link
def _get_gif_screenshot_paths(self, filename):
if not filename:
self._gif_index += 1
filename = 'mobile-gif-%d.png' % self._gif_index
else:
filename = filename.replace('/', os.sep)
logdir = self._get_log_dir()
path = os.path.join(logdir, filename)
link = robot.utils.get_link_path(path, logdir)
return path, link | /robotframework-mobilelibrary-2.0.5.tar.gz/robotframework-mobilelibrary-2.0.5/src/AppiumLibrary/keywords/_screenshot.py | 0.561455 | 0.248835 | _screenshot.py | pypi |
import base64
from keywordgroup import KeywordGroup
from appium.webdriver.connectiontype import ConnectionType
class _AndroidUtilsKeywords(KeywordGroup):
# Public
def get_network_connection_status(self):
"""Returns an integer bitmask specifying the network connection type.
Android only.
See `set network connection status` for more details.
"""
driver = self._current_application()
return driver.network_connection
def set_network_connection_status(self, connectionStatus):
"""Sets the network connection Status.
Android only.
Possible values:
| =Value= | =Alias= | =Data= | =Wifi= | =Airplane Mode= |
| 0 | (None) | 0 | 0 | 0 |
| 1 | (Airplane Mode) | 0 | 0 | 1 |
| 2 | (Wifi only) | 0 | 1 | 0 |
| 4 | (Data only) | 1 | 0 | 0 |
| 6 | (All network on) | 1 | 1 | 0 |
"""
driver = self._current_application()
return driver.set_network_connection(int(connectionStatus))
def pull_file(self, path, decode=False):
"""Retrieves the file at `path` and return it's content.
Android only.
- _path_ - the path to the file on the device
- _decode_ - True/False decode the data (base64) before returning it (default=False)
"""
driver = self._current_application()
theFile = driver.pull_file(path)
if decode:
theFile = base64.b64decode(theFile)
return theFile
def pull_folder(self, path, decode=False):
"""Retrieves a folder at `path`. Returns the folder's contents zipped.
Android only.
- _path_ - the path to the folder on the device
- _decode_ - True/False decode the data (base64) before returning it (default=False)
"""
driver = self._current_application()
theFolder = driver.pull_folder(path)
if decode:
theFolder = base64.b64decode(theFolder)
return theFolder
def push_file(self, path, data, encode=False):
"""Puts the data in the file specified as `path`.
Android only.
- _path_ - the path on the device
- _data_ - data to be written to the file
- _encode_ - True/False encode the data as base64 before writing it to the file (default=False)
"""
driver = self._current_application()
if encode:
data = base64.b64encode(data)
driver.push_file(path, data) | /robotframework-mobilelibrary-2.0.5.tar.gz/robotframework-mobilelibrary-2.0.5/src/AppiumLibrary/keywords/_android_utils.py | 0.692642 | 0.233248 | _android_utils.py | pypi |
from robot.libraries import BuiltIn
from keywordgroup import KeywordGroup
BUILTIN = BuiltIn.BuiltIn()
class _RunOnFailureKeywords(KeywordGroup):
def __init__(self):
self._run_on_failure_keyword = None
self._running_on_failure_routine = False
# Public
def register_keyword_to_run_on_failure(self, keyword):
"""Sets the keyword to execute when a AppiumLibrary keyword fails.
`keyword_name` is the name of a keyword (from any available
libraries) that will be executed if a AppiumLibrary keyword fails.
It is not possible to use a keyword that requires arguments.
Using the value "Nothing" will disable this feature altogether.
The initial keyword to use is set in `importing`, and the
keyword that is used by default is `Capture Page Screenshot`.
Taking a screenshot when something failed is a very useful
feature, but notice that it can slow down the execution.
This keyword returns the name of the previously registered
failure keyword. It can be used to restore the original
value later.
Example:
| Register Keyword To Run On Failure | Log Source | # Run `Log Source` on failure. |
| ${previous kw}= | Register Keyword To Run On Failure | Nothing | # Disables run-on-failure functionality and stores the previous kw name in a variable. |
| Register Keyword To Run On Failure | ${previous kw} | # Restore to the previous keyword. |
This run-on-failure functionality only works when running tests on Python/Jython 2.4
or newer and it does not work on IronPython at all.
"""
old_keyword = self._run_on_failure_keyword
old_keyword_text = old_keyword if old_keyword is not None else "No keyword"
new_keyword = keyword if keyword.strip().lower() != "nothing" else None
new_keyword_text = new_keyword if new_keyword is not None else "No keyword"
self._run_on_failure_keyword = new_keyword
self._info('%s will be run on failure.' % new_keyword_text)
return old_keyword_text
# Private
def _run_on_failure(self):
if self._run_on_failure_keyword is None:
return
if self._running_on_failure_routine:
return
self._running_on_failure_routine = True
try:
BUILTIN.run_keyword(self._run_on_failure_keyword)
except Exception, err:
self._run_on_failure_error(err)
finally:
self._running_on_failure_routine = False
def _run_on_failure_error(self, err):
err = "Keyword '%s' could not be run on failure: %s" % (self._run_on_failure_keyword, err)
if hasattr(self, '_warn'):
self._warn(err)
return
raise Exception(err) | /robotframework-mobilelibrary-2.0.5.tar.gz/robotframework-mobilelibrary-2.0.5/src/AppiumLibrary/keywords/_runonfailure.py | 0.806815 | 0.19521 | _runonfailure.py | pypi |
import time
import robot
from keywordgroup import KeywordGroup
class _WaitingKeywords(KeywordGroup):
def wait_until_page_contains(self, text, timeout=None, error=None):
"""Waits until `text` appears on current page.
Fails if `timeout` expires before the text appears. See
`introduction` for more information about `timeout` and its
default value.
`error` can be used to override the default error message.
See also `Wait Until Page Does Not Contain`,
`Wait Until Page Contains Element`,
`Wait Until Page Does Not Contain Element` and
BuiltIn keyword `Wait Until Keyword Succeeds`.
"""
if not error:
error = "Text '%s' did not appear in <TIMEOUT>" % text
self._wait_until(timeout, error, self._is_text_present, text)
def wait_until_page_does_not_contain(self, text, timeout=None, error=None):
"""Waits until `text` disappears from current page.
Fails if `timeout` expires before the `text` disappears. See
`introduction` for more information about `timeout` and its
default value.
`error` can be used to override the default error message.
See also `Wait Until Page Contains`,
`Wait Until Page Contains Element`,
`Wait Until Page Does Not Contain Element` and
BuiltIn keyword `Wait Until Keyword Succeeds`.
"""
def check_present():
present = self._is_text_present(text)
if not present:
return
else:
return error or "Text '%s' did not disappear in %s" % (text, self._format_timeout(timeout))
self._wait_until_no_error(timeout, check_present)
def wait_until_page_contains_element(self, locator, timeout=None, error=None):
"""Waits until element specified with `locator` appears on current page.
Fails if `timeout` expires before the element appears. See
`introduction` for more information about `timeout` and its
default value.
`error` can be used to override the default error message.
See also `Wait Until Page Contains`,
`Wait Until Page Does Not Contain`
`Wait Until Page Does Not Contain Element`
and BuiltIn keyword `Wait Until Keyword Succeeds`.
"""
if not error:
error = "Element '%s' did not appear in <TIMEOUT>" % locator
self._wait_until(timeout, error, self._is_element_present, locator)
def wait_until_page_does_not_contain_element(self, locator, timeout=None, error=None):
"""Waits until element specified with `locator` disappears from current page.
Fails if `timeout` expires before the element disappears. See
`introduction` for more information about `timeout` and its
default value.
`error` can be used to override the default error message.
See also `Wait Until Page Contains`,
`Wait Until Page Does Not Contain`,
`Wait Until Page Contains Element` and
BuiltIn keyword `Wait Until Keyword Succeeds`.
"""
def check_present():
present = self._is_element_present(locator)
if not present:
return
else:
return error or "Element '%s' did not disappear in %s" % (locator, self._format_timeout(timeout))
self._wait_until_no_error(timeout, check_present)
# Private
def _wait_until(self, timeout, error, function, *args):
error = error.replace('<TIMEOUT>', self._format_timeout(timeout))
def wait_func():
return None if function(*args) else error
self._wait_until_no_error(timeout, wait_func)
def _wait_until_no_error(self, timeout, wait_func, *args):
timeout = robot.utils.timestr_to_secs(timeout) if timeout is not None else self._timeout_in_secs
maxtime = time.time() + timeout
while True:
timeout_error = wait_func(*args)
if not timeout_error:
return
if time.time() > maxtime:
self.log_source()
raise AssertionError(timeout_error)
time.sleep(0.2)
def _format_timeout(self, timeout):
timeout = robot.utils.timestr_to_secs(timeout) if timeout is not None else self._timeout_in_secs
return robot.utils.secs_to_timestr(timeout) | /robotframework-mobilelibrary-2.0.5.tar.gz/robotframework-mobilelibrary-2.0.5/src/AppiumLibrary/keywords/_waiting.py | 0.746971 | 0.180251 | _waiting.py | pypi |
import requests
import json
from urllib.parse import urljoin
from robot.api import logger
from .version import VERSION
__version__ = VERSION
class MockServerLibrary(object):
"""Robot Framework library for interacting with [http://www.mock-server.com|MockServer]
The purpose of this library is to provide a keyword-based API
towards MockServer to be used in robot tests. The project is hosted in
[https://github.com/etsi-cti-admin/robotframework-mockserver|GitHub],
and packages are released to PyPI.
= Installation =
| pip install robotframework-mockserver
= Importing =
The library does not currently support any import arguments, so use the
following setting to take the library into use:
| Library | MockServerLibrary |
"""
ROBOT_LIBRARY_SCOPE = 'GLOBAL'
ROBOT_LIBRARY_VERSION = __version__
def create_mock_session(self, base_url):
"""Creates an HTTP session towards mockserver.
`base_url` is the full url (including port, if applicable) of the mockserver,
e.g. http://localhost:1080.
"""
logger.debug("robotframework-wiremock libary version: {}".format(__version__))
self.base_url = base_url
self.session = requests.Session()
def create_mock_request_matcher(self, method, path, body_type='JSON', body=None, exact=True):
"""Creates a mock request matcher to be used by mockserver.
Returns the request matcher in a dictionary format.
`method` is the HTTP method of the mocked endpoint
`path` is the url of the mocked endpoint, e.g. /api
`body_type` is the type of the request body, e.g. JSON
`body` is a dictionary or string of the json attribute(s) to match
`exact` is a boolean value which specifies whether the body should match fully (=true),
or if only specified fields should match (=false)
"""
req = {}
req['method'] = method
req['path'] = path
if body_type == 'JSON' and body:
match_type = 'STRICT' if exact else 'ONLY_MATCHING_FIELDS'
req['body'] = {'type': body_type, 'json': json.dumps(body), 'matchType': match_type}
if body_type == 'JSON_SCHEMA' and body:
if isinstance(body, str):
json_string = body
else:
json_string = json.dumps(body)
req['body'] = {'type': body_type, 'jsonSchema': json_string}
return req
def create_mock_response(self, status_code, headers=None, body_type='JSON', body=None):
"""Creates a mock response to be used by mockserver.
Returns the response in a dictionary format.
`status_code` is the HTTP status code of the response
`headers` is a dictionary of headers to be added to the response
`body_type` is the type of the response body, e.g. JSON
`body` is a dictonary of JSON attribute(s) to be added to the response body
"""
rsp = {}
rsp['statusCode'] = int(status_code)
if headers:
rsp['headers'] = []
for key, value in headers.items():
header = {'name': key, 'values': value.split(",")}
rsp['headers'].append(header)
logger.debug("Add header - header: {}".format(header))
if body_type == 'JSON' and body:
rsp['body'] = json.dumps(body)
return rsp
def create_mock_http_forward(self, path, delay=1, unit='SECONDS'):
"""Creates a mock http override forward to be used by mockserver.
Returns the http forward in a dictionary format.
`path` is the new url where to forward the request
`delay` is the delay of the forward action
`unit` is the unit of the delay time (default "SECONDS")
"""
fwd = {}
fwd['httpRequest'] = {'path': path}
fwd['delay'] = {'timeUnit': unit, 'value': delay}
return fwd
def create_mock_expectation_with_http_forward(self, request, forward, count=1, unlimited=True):
"""Creates a mock expectation with request and forward action to be used by mockserver.
`request` is a mock request matcher in a dictionary format.
`forward` is a mock forward in a dictionary format.
`count` is the number of expected requests
`unlimited` is a boolean value which, if enabled, allows unspecified number of
requests to reply to
"""
data = {}
data['httpRequest'] = request
data['httpOverrideForwardedRequest'] = forward
data['times'] = {'remainingTimes': int(count), 'unlimited': unlimited}
self.create_mock_expectation_with_data(data)
def create_mock_expectation(self, request, response, count=1, unlimited=True):
"""Creates a mock expectation to be used by mockserver.
`request` is a mock request matcher in a dictionary format.
`response` is a mock response in a dictionary format.
`count` is the number of expected requests
`unlimited` is a boolean value which, if enabled, allows unspecified number of
requests to reply to
"""
data = {}
data['httpRequest'] = request
data['httpResponse'] = response
data['times'] = {'remainingTimes': int(count), 'unlimited': unlimited}
self.create_mock_expectation_with_data(data)
def create_default_mock_expectation(self, method, path, response_code=200,
response_headers=None, body_type='JSON',
response_body=None):
"""Creates a default expectation to be used by mockserver.
`method` is the HTTP method of the mocked endpoint
`path` is the url of the mocked endpoint, e.g. /api
`response_code` is the HTTP status code of the response
`response_headers` is a dictionary of headers to be added to the response
`body_type` is the type of the response body, e.g. JSON
`response_body` is a dictonary of JSON attribute(s) to be added to the response body
"""
req = self.create_mock_request_matcher(method, path, exact=False)
rsp = self.create_mock_response(response_code, response_headers, body_type, response_body)
self.create_mock_expectation(req, rsp, unlimited=True)
def create_mock_expectation_with_data(self, data):
"""Creates a mock expectation with defined data to be used by mockserver.
`data` is a dictionary or JSON string with mock data. Please see
[https://app.swaggerhub.com/apis/jamesdbloom/mock-server_api|MockServer documentation]
for the detailed API reference.
"""
self._send_request("/expectation", data)
def verify_mock_expectation(self, request, count=1, exact=True):
"""Verifies that the mockserver has received a specific request.
`request` is a request expectation created using the keyword `Create Mock Request Matcher`
`count` is the minimum expected number of requests
`exact` specifies whether the expected count should match the actual received count
"""
data = {}
data['httpRequest'] = request
if exact:
data['times'] = {'atLeast': int(count), 'atMost': int(count)}
else:
data['times'] = {'atLeast': int(count)}
self.verify_mock_expectation_with_data(data)
def verify_mock_expectation_with_data(self, data):
"""Verifies a mock expectation with specified data.
`data` is a dictionary or JSON string with mock data. Please see
[https://app.swaggerhub.com/apis/jamesdbloom/mock-server_api|MockServer documentation]
for the detailed API reference.
"""
self._send_request("/verify", data)
def verify_mock_sequence(self, requests):
"""Verifies that the mockserver has received a specific ordered request sequence.
`requests` is a list of request expectations created using the keyword
`Create Mock Request Matcher`
"""
body = {}
body["httpRequests"] = requests
data = json.dumps(body)
self._send_request("/verifySequence", data)
def retrieve_requests(self, path):
"""Retrieves requests from the mockserver
`path` is the url of the endpoint for which to retrieve requests, e.g. /api
"""
body = {}
body['path'] = path
data = json.dumps(body)
return self._send_request("/retrieve", data)
def retrieve_expectations(self, path):
"""Retrieves expectations from the mockserver.
`path` is the url of the endpoint for which to retrieve expectations, e.g. /api
"""
body = {}
body['path'] = path
data = json.dumps(body)
return self._send_request("/retrieve?type=active_expectations", data)
def clear_requests(self, path):
"""Clears expectations and requests for a specific endpoint from the mockserver.
`path` is the url of the endpoint for which to clean expectations and requests, e.g. /api
"""
body = {}
body['path'] = path
data = json.dumps(body)
self._send_request("/clear", data)
def reset_all_requests(self):
"""Clears all expectations and received requests from the mockserver.
"""
self._send_request("/reset")
def dump_to_log(self):
"""Dumps logs at the mockserver.
"""
# self._send_request("/dumpToLog")
pass
def _send_request(self, path, data=None):
if isinstance(data, dict):
data_dump = json.dumps(data)
else:
data_dump = data
url = urljoin(self.base_url, path)
logger.debug("url: {}, data: {}".format(url, data_dump))
rsp = self.session.put(url, data=data_dump, timeout=5.0)
if rsp.status_code >= 400:
raise AssertionError("Mock server failed with {}: {}".format(rsp.status_code, rsp.text))
return rsp | /robotframework-mockserver-0.0.7.tar.gz/robotframework-mockserver-0.0.7/src/MockServerLibrary/library.py | 0.813201 | 0.297846 | library.py | pypi |
import requests
import json
from urllib.parse import urljoin
from robot.api import logger
from .version import VERSION
__version__ = VERSION
class MockServerLibrary(object):
"""Robot Framework library for interacting with [http://www.mock-server.com|MockServer]
The purpose of this library is to provide a keyword-based API
towards MockServer to be used in robot tests. The project is hosted in
[https://github.com/frankvanderkuur/robotframework-mockserverlibrary|GitHub],
and packages are released to PyPI.
= Installation =
| pip install robotframework-mockserverlibrary
= Importing =
The library does not currently support any import arguments, so use the
following setting to take the library into use:
| Library | MockServerLibrary |
"""
ROBOT_LIBRARY_SCOPE = 'GLOBAL'
ROBOT_LIBRARY_VERSION = __version__
def create_mock_session(self, base_url):
"""Creates an HTTP session towards mockserver.
`base_url` is the full url (including port, if applicable) of the mockserver,
e.g. http://localhost:1080.
"""
logger.debug("robotframework-wiremock libary version: {}".format(__version__))
self.base_url = base_url
self.session = requests.Session()
def create_mock_request_matcher(self, method, path, body_type='JSON', body=None, exact=True):
"""Creates a mock request matcher to be used by mockserver.
Returns the request matcher in a dictionary format.
`method` is the HTTP method of the mocked endpoint
`path` is the url of the mocked endpoint, e.g. /api
`body_type` is the type of the request body, e.g. JSON
`body` is a dictionary or string of the json attribute(s) to match
`exact` is a boolean value which specifies whether the body should match fully (=true),
or if only specified fields should match (=false)
"""
req = {}
req['method'] = method
req['path'] = path
if isinstance(body, str):
json_string = body
else:
json_string = json.dumps(body)
if body_type == 'JSON' and body:
match_type = 'STRICT' if exact else 'ONLY_MATCHING_FIELDS'
req['body'] = {'type': body_type, 'json': json_string, 'matchType': match_type}
if body_type == 'JSON_SCHEMA' and body:
req['body'] = {'type': body_type, 'jsonSchema': json_string}
return req
def create_mock_response(self, status_code, headers=None, body_type='JSON', body=None):
"""Creates a mock response to be used by mockserver.
Returns the response in a dictionary format.
`status_code` is the HTTP status code of the response
`headers` is a dictionary of headers to be added to the response
`body_type` is the type of the response body, e.g. JSON
`body` is either a string that contains the full body or a dictonary of JSON attribute(s) to be added to the
response body
"""
rsp = {}
rsp['statusCode'] = int(status_code)
if headers:
rsp['headers'] = []
for key, value in headers.items():
header = {'name': key, 'values': value.split(",")}
rsp['headers'].append(header)
logger.debug("Add header - header: {}".format(header))
if body_type == 'JSON' and body:
# Check if body is a dict or a plain file and process accordingly
if isinstance(body, dict):
rsp['body'] = json.dumps(body)
else:
rsp['body'] = body
return rsp
def create_mock_http_forward(self, path, delay=1, unit='SECONDS'):
"""Creates a mock http override forward to be used by mockserver.
Returns the http forward in a dictionary format.
`path` is the new url where to forward the request
`delay` is the delay of the forward action
`unit` is the unit of the delay time (default "SECONDS")
"""
fwd = {}
fwd['httpRequest'] = {'path': path}
fwd['delay'] = {'timeUnit': unit, 'value': delay}
return fwd
def create_mock_expectation_with_http_forward(self, request, forward, count=1, unlimited=True):
"""Creates a mock expectation with request and forward action to be used by mockserver.
`request` is a mock request matcher in a dictionary format.
`forward` is a mock forward in a dictionary format.
`count` is the number of expected requests
`unlimited` is a boolean value which, if enabled, allows unspecified number of
requests to reply to
"""
data = {}
data['httpRequest'] = request
data['httpOverrideForwardedRequest'] = forward
data['times'] = {'remainingTimes': int(count), 'unlimited': unlimited}
self.create_mock_expectation_with_data(data)
def create_mock_expectation(self, request, response, id="", count=1, unlimited=True):
"""Creates a mock expectation to be used by mockserver.
`request` is a mock request matcher in a dictionary format.
`response` is a mock response in a dictionary format.
`id` is a self-appointed unique identifier for the expectation.
`count` is the number of expected requests.
`unlimited` is a boolean value which, if enabled, allows unspecified number of
requests to reply to.
"""
data = {}
data['httpRequest'] = request
data['httpResponse'] = response
if id != "":
data['id'] = id
data['times'] = {'remainingTimes': int(count), 'unlimited': unlimited}
self.create_mock_expectation_with_data(data)
def create_default_mock_expectation(self, method, path, response_code=200,
response_headers=None, body_type='JSON',
response_body=None):
"""Creates a default expectation to be used by mockserver.
`method` is the HTTP method of the mocked endpoint
`path` is the url of the mocked endpoint, e.g. /api
`response_code` is the HTTP status code of the response
`response_headers` is a dictionary of headers to be added to the response
`body_type` is the type of the response body, e.g. JSON
`response_body` is a dictonary of JSON attribute(s) to be added to the response body
"""
req = self.create_mock_request_matcher(method, path, exact=False)
rsp = self.create_mock_response(response_code, response_headers, body_type, response_body)
self.create_mock_expectation(req, rsp, unlimited=True)
def create_mock_expectation_with_data(self, data):
"""Creates a mock expectation with defined data to be used by mockserver.
`data` is a dictionary or JSON string with mock data. Please see
[https://app.swaggerhub.com/apis/jamesdbloom/mock-server_api|MockServer documentation]
for the detailed API reference.
"""
self._send_request("/expectation", data)
def verify_mock_expectation(self, request, count=1, exact=True):
"""Verifies that the mockserver has received a specific request.
`request` is a request expectation created using the keyword `Create Mock Request Matcher`
`count` is the minimum expected number of requests
`exact` specifies whether the expected count should match the actual received count
"""
data = {}
data['httpRequest'] = request
if exact:
data['times'] = {'atLeast': int(count), 'atMost': int(count)}
else:
data['times'] = {'atLeast': int(count)}
self.verify_mock_expectation_with_data(data)
def verify_mock_expectation_by_id(self, id, count=1, exact=True):
"""Verifies that the mockserver has received a specific request.
`id` is a self-appointed unique identifier for the expectation when creating the expectation.
`count` is the minimum expected number of requests
`exact` specifies whether the expected count should match the actual received count
"""
data = {}
data['expectationId'] = {'id': id}
if exact:
data['times'] = {'atLeast': int(count), 'atMost': int(count)}
else:
data['times'] = {'atLeast': int(count)}
self.verify_mock_expectation_with_data(data)
def verify_mock_expectation_with_data(self, data):
"""Verifies a mock expectation with specified data.
`data` is a dictionary or JSON string with mock data. Please see
[https://app.swaggerhub.com/apis/jamesdbloom/mock-server_api|MockServer documentation]
for the detailed API reference.
"""
self._send_request("/verify", data)
def verify_mock_sequence(self, requests):
"""Verifies that the mockserver has received a specific ordered request sequence.
`requests` is a list of request expectations created using the keyword
`Create Mock Request Matcher`
"""
body = {}
body["httpRequests"] = requests
data = json.dumps(body)
self._send_request("/verifySequence", data)
def retrieve_requests(self, path):
"""Retrieves requests from the mockserver
`path` is the url of the endpoint for which to retrieve requests, e.g. /api
"""
body = {}
body['path'] = path
data = json.dumps(body)
return self._send_request("/retrieve", data)
def retrieve_expectations(self, path):
"""Retrieves expectations from the mockserver.
`path` is the url of the endpoint for which to retrieve expectations, e.g. /api
"""
body = {}
body['path'] = path
data = json.dumps(body)
return self._send_request("/retrieve?type=active_expectations", data)
def clear_requests(self, path):
"""Clears expectations and requests for a specific endpoint from the mockserver.
`path` is the url of the endpoint for which to clean expectations and requests, e.g. /api
"""
body = {}
body['path'] = path
data = json.dumps(body)
self._send_request("/clear", data)
def clear_requests_by_id(self, id, type="all"):
"""Clears expectations and recorded requests that match the given id.
`id` is the id of the expectation you wish to clear
`type` specifies the type of information to clear (all, log or expectation)
"""
possible_types = ['all', 'log', 'expectation']
body = {}
body['id'] = id
data = json.dumps(body)
if type.lower() not in possible_types:
raise RuntimeError("Type must be one of these values: all, log or expectation")
try:
self._send_request("/clear?type=" + type.lower(), data)
except Exception as e:
message="Clearing expectation with id " + id + " was unseccesfull!"
raise Warning(message)
def reset_all_requests(self):
"""Clears all expectations and received requests from the mockserver.
"""
self._send_request("/reset")
def dump_to_log(self):
"""Dumps logs at the mockserver.
"""
# self._send_request("/dumpToLog")
pass
def _send_request(self, path, data=None):
if isinstance(data, dict):
data_dump = json.dumps(data)
else:
data_dump = data
url = urljoin(self.base_url, path)
logger.debug("url: {}, data: {}".format(url, data_dump))
rsp = self.session.put(url, data=data_dump, timeout=5.0)
if rsp.status_code >= 400:
raise AssertionError("Mock server failed with {}: {}".format(rsp.status_code, rsp.text))
return rsp | /robotframework-mockserverlibrary-0.8.5.tar.gz/robotframework-mockserverlibrary-0.8.5/src/MockServerLibrary/library.py | 0.817684 | 0.379321 | library.py | pypi |
from robot.libraries.BuiltIn import BuiltIn
import logging
class MongoConnectionManager(object):
"""
Connection Manager handles the connection & disconnection to the database.
"""
def __init__(self):
"""
Initializes _dbconnection to None.
"""
self._dbconnection = None
self._builtin = BuiltIn()
def connect_to_mongodb(self, dbHost='localhost', dbPort=27017, dbMaxPoolSize=10, dbNetworkTimeout=None,
dbDocClass=dict, dbTZAware=False):
"""
Loads pymongo and connects to the MongoDB host using parameters submitted.
Example usage:
| # To connect to foo.bar.org's MongoDB service on port 27017 |
| Connect To MongoDB | foo.bar.org | ${27017} |
| # Or for an authenticated connection |
| Connect To MongoDB | admin:admin@foo.bar.org | ${27017} |
"""
dbapiModuleName = 'pymongo'
db_api_2 = __import__(dbapiModuleName)
dbPort = int(dbPort)
# print "host is [ %s ]" % dbHost
# print "port is [ %s ]" % dbPort
# print "pool_size is [ %s ]" % dbPoolSize
# print "timeout is [ %s ]" % dbTimeout
# print "slave_okay is [ %s ]" % dbSlaveOkay
# print "document_class is [ %s ]" % dbDocClass
# print "tz_aware is [ %s ]" % dbTZAware
logging.debug(
"| Connect To MondoDB | dbHost | dbPort | dbMaxPoolSize | dbNetworktimeout | dbDocClass | dbTZAware |")
logging.debug(
"| Connect To MondoDB | %s | %s | %s | %s | %s | %s |" % (dbHost, dbPort, dbMaxPoolSize, dbNetworkTimeout,
dbDocClass, dbTZAware))
self._dbconnection = db_api_2.MongoClient(host=dbHost, port=dbPort, socketTimeoutMS=dbNetworkTimeout,
document_class=dbDocClass, tz_aware=dbTZAware,
maxPoolSize=dbMaxPoolSize)
def disconnect_from_mongodb(self):
"""
Disconnects from the MongoDB server.
For example:
| Disconnect From MongoDB | # disconnects from current connection to the MongoDB server |
"""
logging.debug("| Disconnect From MongoDB |")
self._dbconnection.close() | /robotframework_mongodb_bson_library-1.1-py3-none-any.whl/MongoDBBSONLibrary/mongo_connection_manager.py | 0.530723 | 0.165155 | mongo_connection_manager.py | pypi |
from robot.libraries.BuiltIn import BuiltIn
import logging
class MongoConnectionManager(object):
"""
Connection Manager handles the connection & disconnection to the database.
"""
def __init__(self):
"""
Initializes _dbconnection to None.
"""
self._dbconnection = None
self._builtin = BuiltIn()
def connect_to_mongodb(self, dbHost='localhost', dbPort=27017, dbMaxPoolSize=10, dbNetworkTimeout=None,
dbDocClass=dict, dbTZAware=False):
"""
Loads pymongo and connects to the MongoDB host using parameters submitted.
Example usage:
| # To connect to foo.bar.org's MongoDB service on port 27017 |
| Connect To MongoDB | foo.bar.org | ${27017} |
| # Or for an authenticated connection |
| Connect To MongoDB | admin:admin@foo.bar.org | ${27017} |
"""
dbapiModuleName = 'pymongo'
db_api_2 = __import__(dbapiModuleName)
dbPort = int(dbPort)
# print "host is [ %s ]" % dbHost
# print "port is [ %s ]" % dbPort
# print "pool_size is [ %s ]" % dbPoolSize
# print "timeout is [ %s ]" % dbTimeout
# print "slave_okay is [ %s ]" % dbSlaveOkay
# print "document_class is [ %s ]" % dbDocClass
# print "tz_aware is [ %s ]" % dbTZAware
logging.debug(
"| Connect To MondoDB | dbHost | dbPort | dbMaxPoolSize | dbNetworktimeout | dbDocClass | dbTZAware |")
logging.debug(
"| Connect To MondoDB | %s | %s | %s | %s | %s | %s |" % (dbHost, dbPort, dbMaxPoolSize, dbNetworkTimeout,
dbDocClass, dbTZAware))
self._dbconnection = db_api_2.MongoClient(host=dbHost, port=dbPort, socketTimeoutMS=dbNetworkTimeout,
document_class=dbDocClass, tz_aware=dbTZAware,
maxPoolSize=dbMaxPoolSize)
def disconnect_from_mongodb(self):
"""
Disconnects from the MongoDB server.
For example:
| Disconnect From MongoDB | # disconnects from current connection to the MongoDB server |
"""
logging.debug("| Disconnect From MongoDB |")
self._dbconnection.close() | /robotframework_mongodb_library-3.2-py3-none-any.whl/MongoDBLibrary/mongo_connection_manager.py | 0.530723 | 0.165155 | mongo_connection_manager.py | pypi |
from robot.libraries.BuiltIn import BuiltIn
import logging
class MongoConnectionManager(object):
"""
Connection Manager handles the connection & disconnection to the database.
"""
def __init__(self):
"""
Initializes _dbconnection to None.
"""
self._dbconnection = None
self._builtin = BuiltIn()
def connect_to_mongodb(self, dbHost='localhost', dbPort=27017, dbMaxPoolSize=10, dbNetworkTimeout=None,
dbDocClass=dict, dbTZAware=False):
"""
Loads pymongo and connects to the MongoDB host using parameters submitted.
Example usage:
| # To connect to foo.bar.org's MongoDB service on port 27017 |
| Connect To MongoDB | foo.bar.org | ${27017} |
| # Or for an authenticated connection |
| Connect To MongoDB | admin:admin@foo.bar.org | ${27017} |
"""
dbapiModuleName = 'pymongo'
db_api_2 = __import__(dbapiModuleName)
dbPort = int(dbPort)
# print "host is [ %s ]" % dbHost
# print "port is [ %s ]" % dbPort
# print "pool_size is [ %s ]" % dbPoolSize
# print "timeout is [ %s ]" % dbTimeout
# print "slave_okay is [ %s ]" % dbSlaveOkay
# print "document_class is [ %s ]" % dbDocClass
# print "tz_aware is [ %s ]" % dbTZAware
logging.debug(
"| Connect To MondoDB | dbHost | dbPort | dbMaxPoolSize | dbNetworktimeout | dbDocClass | dbTZAware |")
logging.debug(
"| Connect To MondoDB | %s | %s | %s | %s | %s | %s |" % (dbHost, dbPort, dbMaxPoolSize, dbNetworkTimeout,
dbDocClass, dbTZAware))
self._dbconnection = db_api_2.MongoClient(host=dbHost, port=dbPort, socketTimeoutMS=dbNetworkTimeout,
document_class=dbDocClass, tz_aware=dbTZAware,
maxPoolSize=dbMaxPoolSize)
def disconnect_from_mongodb(self):
"""
Disconnects from the MongoDB server.
For example:
| Disconnect From MongoDB | # disconnects from current connection to the MongoDB server |
"""
logging.debug("| Disconnect From MongoDB |")
self._dbconnection.close() | /robotframework-mongodb-library3-3.3.tar.gz/robotframework-mongodb-library3-3.3/src/MongoDBLibrary/mongo_connection_manager.py | 0.530723 | 0.165155 | mongo_connection_manager.py | pypi |
from robot.libraries.BuiltIn import BuiltIn
import logging
class MongoConnectionManager(object):
"""
Connection Manager handles the connection & disconnection to the database.
"""
def __init__(self):
"""
Initializes _dbconnection to None.
"""
self._dbconnection = None
self._builtin = BuiltIn()
def connect_to_mongodb(self, dbHost='localhost', dbPort=27017, dbMaxPoolSize=10, dbNetworkTimeout=None,
dbDocClass=dict, dbTZAware=False):
"""
Loads pymongo and connects to the MongoDB host using parameters submitted.
Example usage:
| # To connect to foo.bar.org's MongoDB service on port 27017 |
| Connect To MongoDB | foo.bar.org | ${27017} |
| # Or for an authenticated connection |
| Connect To MongoDB | admin:admin@foo.bar.org | ${27017} |
"""
dbapiModuleName = 'pymongo'
db_api_2 = __import__(dbapiModuleName)
dbPort = int(dbPort)
# print "host is [ %s ]" % dbHost
# print "port is [ %s ]" % dbPort
# print "pool_size is [ %s ]" % dbPoolSize
# print "timeout is [ %s ]" % dbTimeout
# print "slave_okay is [ %s ]" % dbSlaveOkay
# print "document_class is [ %s ]" % dbDocClass
# print "tz_aware is [ %s ]" % dbTZAware
logging.debug(
"| Connect To MondoDB | dbHost | dbPort | dbMaxPoolSize | dbNetworktimeout | dbDocClass | dbTZAware |")
logging.debug(
"| Connect To MondoDB | %s | %s | %s | %s | %s | %s |" % (dbHost, dbPort, dbMaxPoolSize, dbNetworkTimeout,
dbDocClass, dbTZAware))
self._dbconnection = db_api_2.MongoClient(host=dbHost, port=dbPort, socketTimeoutMS=dbNetworkTimeout,
document_class=dbDocClass, tz_aware=dbTZAware,
maxPoolSize=dbMaxPoolSize)
def disconnect_from_mongodb(self):
"""
Disconnects from the MongoDB server.
For example:
| Disconnect From MongoDB | # disconnects from current connection to the MongoDB server |
"""
logging.debug("| Disconnect From MongoDB |")
self._dbconnection.close() | /robotframework-mongodb-library4-4.0.tar.gz/robotframework-mongodb-library4-4.0/src/MongoDBLibrary/mongo_connection_manager.py | 0.530723 | 0.165155 | mongo_connection_manager.py | pypi |
from robot.libraries.BuiltIn import BuiltIn
class MongoConnectionManager(object):
"""
Connection Manager handles the connection & disconnection to the database.
"""
def __init__(self):
"""
Initializes _dbconnection to None.
"""
self._dbconnection = None
self._builtin = BuiltIn()
def connect_to_mongodb(self, dbHost='localhost', dbPort=27017, dbMaxPoolSize=10, dbNetworkTimeout=None,
dbDocClass=dict, dbTZAware=False):
"""
Loads pymongo and connects to the MongoDB host using parameters submitted.
Example usage:
| # To connect to foo.bar.org's MongoDB service on port 27017 |
| Connect To MongoDB | foo.bar.org | ${27017} |
| # Or for an authenticated connection |
| Connect To MongoDB | admin:admin@foo.bar.org | ${27017} |
"""
dbapiModuleName = 'pymongo'
db_api_2 = __import__(dbapiModuleName)
dbPort = int(dbPort)
#print "host is [ %s ]" % dbHost
#print "port is [ %s ]" % dbPort
#print "pool_size is [ %s ]" % dbPoolSize
#print "timeout is [ %s ]" % dbTimeout
#print "slave_okay is [ %s ]" % dbSlaveOkay
#print "document_class is [ %s ]" % dbDocClass
#print "tz_aware is [ %s ]" % dbTZAware
print "| Connect To MondoDB | dbHost | dbPort | dbMaxPoolSize | dbNetworktimeout | dbDocClass | dbTZAware |"
print "| Connect To MondoDB | %s | %s | %s | %s | %s | %s |" % (dbHost, dbPort, dbMaxPoolSize, dbNetworkTimeout,
dbDocClass, dbTZAware)
self._dbconnection = db_api_2.MongoClient(host=dbHost, port=dbPort, socketTimeoutMS=dbNetworkTimeout,
document_class=dbDocClass, tz_aware=dbTZAware,
maxPoolSize=dbMaxPoolSize)
def disconnect_from_mongodb(self):
"""
Disconnects from the MongoDB server.
For example:
| Disconnect From MongoDB | # disconnects from current connection to the MongoDB server |
"""
print "| Disconnect From MongoDB |"
self._dbconnection.close() | /robotframework-mongodblibrary-0.3.4.zip/robotframework-mongodblibrary-0.3.4/src/MongoDBLibrary/mongo_connection_manager.py | 0.424889 | 0.187207 | mongo_connection_manager.py | pypi |
import netaddr
class RobotFrameworkNetAddr():
''' Wrapper functions to access a selection of the python netaddr library from robot framework.
Most of the functionality from the netaddr IPNetwork, IPAddress and EUI classes are implemented
along with a couple of extra functions to provide more ease of use in robot framework.
'''
ROBOT_LIBRARY_SCOPE = 'GLOBAL'
# netaddr.IPNetwork class
@staticmethod
def ipnetwork_broadcast(addr, **kwargs):
'''Returns the broadcast address of the given subnet.
*Arguments*
| argument | Description | Example | Default value
| addr | address (ip/mask or prefixlen) | 10.0.0.0/24 | - |
| kwargs | passed to IPNetwork constructor, see netaddr docs for options | - | - |
'''
return netaddr.IPNetwork(addr, **kwargs).broadcast
@staticmethod
def ipnetwork_cidr(addr, **kwargs):
'''Returns the network in cidr format.
*Arguments*
| argument | Description | Example | Default value
| addr | address (ip/mask) | 10.0.0.0/255.255.255.0 | - |
| kwargs | passed to IPNetwork constructor, see netaddr docs for options | - | - |
'''
return netaddr.IPNetwork(addr, **kwargs).cidr
@staticmethod
def ipnetwork_hostmask(addr, **kwargs):
'''Returns the hostmask.
*Arguments*
| argument | Description | Example | Default value
| addr | address (ip/mask or prefixlen) | 10.0.0.0/24 | - |
| kwargs | passed to IPNetwork constructor, see netaddr docs for options | - | - |
'''
return netaddr.IPNetwork(addr, **kwargs).hostmask
@staticmethod
def ipnetwork_info(addr, **kwargs):
'''Returns a dict with info about the network.
*Arguments*
| argument | Description | Example | Default value
| addr | address (ip/mask or prefixlen) | 10.0.0.0/24 | - |
| kwargs | passed to IPNetwork constructor, see netaddr docs for options | - | - |
'''
return netaddr.IPNetwork(addr, **kwargs).info
@staticmethod
def ipnetwork_ip(addr, **kwargs):
'''Returns the ip part the IPNetwork object.
*Arguments*
| argument | Description | Example | Default value
| addr | address (ip/mask or prefixlen) | 10.0.0.0/24 | - |
| kwargs | passed to IPNetwork constructor, see netaddr docs for options | - | - |
'''
return netaddr.IPNetwork(addr, **kwargs).ip
@staticmethod
def ipnetwork_is_link_local(addr, **kwargs):
'''Check if the address is a link local address.
*Arguments*
| argument | Description | Example | Default value
| addr | address (ip/mask or prefixlen) | 10.0.0.0/24 | - |
| kwargs | passed to IPNetwork constructor, see netaddr docs for options | - | - |
'''
return netaddr.IPNetwork(addr, **kwargs).is_link_local()
@staticmethod
def ipnetwork_is_loopback(addr, **kwargs):
'''Check if the address is a loopback address.
*Arguments*
| argument | Description | Example | Default value
| addr | address (ip/mask or prefixlen) | 10.0.0.0/24 | - |
| kwargs | passed to IPNetwork constructor, see netaddr docs for options | - | - |
'''
return netaddr.IPNetwork(addr, **kwargs).is_loopback()
@staticmethod
def ipnetwork_is_multicast(addr, **kwargs):
'''Check if the address is a multicast address.
*Arguments*
| argument | Description | Example | Default value
| addr | address (ip/mask or prefixlen) | 10.0.0.0/24 | - |
| kwargs | passed to IPNetwork constructor, see netaddr docs for options | - | - |
'''
return netaddr.IPNetwork(addr, **kwargs).is_multicast()
@staticmethod
def ipnetwork_is_private(addr, **kwargs):
'''Check if the address is a private address.
*Arguments*
| argument | Description | Example | Default value
| addr | address (ip/mask or prefixlen) | 10.0.0.0/24 | - |
| kwargs | passed to IPNetwork constructor, see netaddr docs for options | - | - |
'''
return netaddr.IPNetwork(addr, **kwargs).is_private()
@staticmethod
def ipnetwork_is_reserved(addr, **kwargs):
'''Check if the address is a reserved address.
*Arguments*
| argument | Description | Example | Default value
| addr | address (ip/mask or prefixlen) | 10.0.0.0/24 | - |
| kwargs | passed to IPNetwork constructor, see netaddr docs for options | - | - |
'''
return netaddr.IPNetwork(addr, **kwargs).is_reserved()
@staticmethod
def ipnetwork_is_unicast(addr, **kwargs):
'''Check if the address is a unicast address.
*Arguments*
| argument | Description | Example | Default value
| addr | address (ip/mask or prefixlen) | 10.0.0.0/24 | - |
| kwargs | passed to IPNetwork constructor, see netaddr docs for options | - | - |
'''
return netaddr.IPNetwork(addr, **kwargs).is_unicast()
@staticmethod
def ipnetwork_netmask(addr, **kwargs):
'''Returns the subnetmask of the subnet.
*Arguments*
| argument | Description | Example | Default value
| addr | address (ip/mask or prefixlen) | 10.0.0.0/24 | - |
| kwargs | passed to IPNetwork constructor, see netaddr docs for options | - | - |
'''
return netaddr.IPNetwork(addr, **kwargs).netmask
@staticmethod
def ipnetwork_network(addr, **kwargs):
'''Returns the network address of the subnet.
*Arguments*
| argument | Description | Example | Default value
| addr | address (ip/mask or prefixlen) | 10.0.0.0/24 | - |
| kwargs | passed to IPNetwork constructor, see netaddr docs for options | - | - |
'''
return netaddr.IPNetwork(addr, **kwargs).network
@staticmethod
def ipnetwork_prefixlen(addr, **kwargs):
'''Returns the prefix length of the subnet.
*Arguments*
| argument | Description | Example | Default value
| addr | address (ip/mask or prefixlen) | 10.0.0.0/24 | - |
| kwargs | passed to IPNetwork constructor, see netaddr docs for options | - | - |
'''
return netaddr.IPNetwork(addr, **kwargs).prefixlen
@staticmethod
def ipnetwork_size(addr, **kwargs):
'''Returns the number of addresses in the subnet.
*Arguments*
| argument | Description | Example | Default value
| addr | address (ip/mask or prefixlen) | 10.0.0.0/24 | - |
| kwargs | passed to IPNetwork constructor, see netaddr docs for options | - | - |
'''
return netaddr.IPNetwork(addr, **kwargs).size
@staticmethod
def ipnetwork_version(addr, **kwargs):
'''Returns the IP version of the network (4 or 6).
*Arguments*
| argument | Description | Example | Default value
| addr | address (ip/mask or prefixlen) | 10.0.0.0/24 | - |
| kwargs | passed to IPNetwork constructor, see netaddr docs for options | - | - |
'''
return netaddr.IPNetwork(addr, **kwargs).version
@staticmethod
def ipnetwork_is_network_addr(addr, **kwargs):
'''Returns if the provided addr is the network address.
*Arguments*
| argument | Description | Example | Default value
| addr | address (ip/mask or prefixlen) | 10.0.0.0/24 | - |
| kwargs | passed to IPNetwork constructor, see netaddr docs for options | - | - |
'''
net = netaddr.IPNetwork(addr, **kwargs)
return net.ip == net.network
@staticmethod
def ipnetwork_is_valid_ipv4(addr, **kwargs):
'''Returns if the provided addr is valid for IPv4.
*Arguments*
| argument | Description | Example | Default value
| addr | address (ip/mask or prefixlen) | 10.0.0.0/24 | - |
| kwargs | passed to IPNetwork constructor, see netaddr docs for options, should not contain the version keyword | - | - |
'''
try:
netaddr.IPNetwork(addr, version=4, **kwargs)
except:
return False
else:
return True
@staticmethod
def ipnetwork_is_valid_ipv6(addr, **kwargs):
'''Returns if the provided addr is valid for IPv6.
*Arguments*
| argument | Description | Example | Default value
| addr | address (ip/mask or prefixlen) | fe80::1/48 | - |
| kwargs | passed to IPNetwork constructor, see netaddr docs for options, should not contain the version keyword | - | - |
'''
try:
netaddr.IPNetwork(addr, version=6)
except:
return False
else:
return True
@staticmethod
def ipnetwork_previous(addr, **kwargs):
'''Returns subnet one lower than the one provided with addr.
*Arguments*
| argument | Description | Example | Default value
| addr | address (ip/mask or prefixlen) | 10.0.0.0/24 | - |
| kwargs | passed to IPNetwork constructor, see netaddr docs for options | - | - |
'''
try:
prev_net = netaddr.IPNetwork(addr, **kwargs).previous()
except:
return False
else:
return prev_net
@staticmethod
def ipnetwork_next(addr, **kwargs):
'''Returns subnet one higher than the one provided with addr.
*Arguments*
| argument | Description | Example | Default value
| addr | address (ip/mask or prefixlen) | 10.0.0.0/24 | - |
| kwargs | passed to IPNetwork constructor, see netaddr docs for options | - | - |
'''
try:
next_net = netaddr.IPNetwork(addr, **kwargs).next()
except:
return False
else:
return next_net
@staticmethod
def ipnetwork_in_network(addr, addr2, kwargs_addr1={}, kwargs_addr2={}):
'''Checks if network addr2 overlaps with in addr.
*Arguments*
| argument | Description | Example | Default value
| addr | address (ip/mask or prefixlen) | 10.0.0.0/24 | - |
| addr2 | address (ip/mask or prefixlen) | 10.0.0.0/24 | - |
| kwargs_addr1 | passed to IPNetwork constructor for addr1, see netaddr docs for options | - | - |
| kwargs_addr2 | passed to IPNetwork constructor for addr2, see netaddr docs for options | - | - |
'''
return netaddr.IPNetwork(addr, **kwargs_addr1) in netaddr.IPNetwork(addr2, **kwargs_addr2)
# netaddr.IPAddress class
@staticmethod
def ipaddress_bin(addr, **kwargs):
'''Returns ip in binary.
*Arguments*
| argument | Description | Example | Default value
| addr | the IP address | 10.0.0.0 | - |
| kwargs | passed to IPAddress constructor, see netaddr docs for options | - | - |
'''
return netaddr.IPAddress(addr, **kwargs).bin
@staticmethod
def ipaddress_bits(addr, **kwargs):
'''Returns ip series of bits grouped together in octets (ipv4) or hextets (ipv6).
*Arguments*
| argument | Description | Example | Default value
| addr | the IP address | 10.0.0.0 | - |
| kwargs | passed to IPAddress constructor, see netaddr docs for options | - | - |
'''
return netaddr.IPAddress(addr, **kwargs).bits()
@staticmethod
def ipaddress_info(addr, **kwargs):
'''Returns a dict with info about the IP address.
*Arguments*
| argument | Description | Example | Default value
| addr | the IP address | 10.0.0.0 | - |
| kwargs | passed to IPAddress constructor, see netaddr docs for options | - | - |
'''
return netaddr.IPAddress(addr, **kwargs).info
@staticmethod
def ipaddress_is_hostmask(addr, **kwargs):
'''Checks if the IP address is a hostmask.
*Arguments*
| argument | Description | Example | Default value
| addr | the IP address | 10.0.0.0 | - |
| kwargs | passed to IPAddress constructor, see netaddr docs for options | - | - |
'''
return netaddr.IPAddress(addr, **kwargs).is_hostmask()
@staticmethod
def ipaddress_is_link_local(addr, **kwargs):
'''Checks if the IP address is a link local address.
*Arguments*
| argument | Description | Example | Default value
| addr | the IP address | 10.0.0.0 | - |
| kwargs | passed to IPAddress constructor, see netaddr docs for options | - | - |
'''
return netaddr.IPAddress(addr, **kwargs).is_link_local()
@staticmethod
def ipaddress_is_loopback(addr, **kwargs):
'''Checks if the IP address is a loopback address.
*Arguments*
| argument | Description | Example | Default value
| addr | the IP address | 10.0.0.0 | - |
| kwargs | passed to IPAddress constructor, see netaddr docs for options | - | - |
'''
return netaddr.IPAddress(addr, **kwargs).is_loopback()
@staticmethod
def ipaddress_is_multicast(addr, **kwargs):
'''Checks if the IP address is a multicast address.
*Arguments*
| argument | Description | Example | Default value
| addr | the IP address | 10.0.0.0 | - |
| kwargs | passed to IPAddress constructor, see netaddr docs for options | - | - |
'''
return netaddr.IPAddress(addr, **kwargs).is_multicast()
@staticmethod
def ipaddress_is_netmask(addr, **kwargs):
'''Checks if the IP address is a netmask.
*Arguments*
| argument | Description | Example | Default value
| addr | the IP address | 10.0.0.0 | - |
| kwargs | passed to IPAddress constructor, see netaddr docs for options | - | - |
'''
return netaddr.IPAddress(addr, **kwargs).is_netmask()
@staticmethod
def ipaddress_is_private(addr, **kwargs):
'''Checks if the IP address is a private address.
*Arguments*
| argument | Description | Example | Default value
| addr | the IP address | 10.0.0.0 | - |
| kwargs | passed to IPAddress constructor, see netaddr docs for options | - | - |
'''
return netaddr.IPAddress(addr, **kwargs).is_private()
@staticmethod
def ipaddress_is_reserved(addr, **kwargs):
'''Checks if the IP address is a reserved address.
*Arguments*
| argument | Description | Example | Default value
| addr | the IP address | 10.0.0.0 | - |
| kwargs | passed to IPAddress constructor, see netaddr docs for options | - | - |
'''
return netaddr.IPAddress(addr, **kwargs).is_reserved()
@staticmethod
def ipaddress_is_unicast(addr, **kwargs):
'''Checks if the IP address is a unicast address.
*Arguments*
| argument | Description | Example | Default value
| addr | the IP address | 10.0.0.0 | - |
| kwargs | passed to IPAddress constructor, see netaddr docs for options | - | - |
'''
return netaddr.IPAddress(addr, **kwargs).is_unicast()
@staticmethod
def ipaddress_reverse_dns(addr, **kwargs):
'''Returns a reverse dns notation of the IP address.
*Arguments*
| argument | Description | Example | Default value
| addr | the IP address | 10.0.0.0 | - |
| kwargs | passed to IPAddress constructor, see netaddr docs for options | - | - |
'''
return netaddr.IPAddress(addr, **kwargs).reverse_dns
@staticmethod
def ipaddress_version(addr, **kwargs):
'''Returns the IP version of the IP address (4 or 6).
*Arguments*
| argument | Description | Example | Default value
| addr | the IP address | 10.0.0.0 | - |
| kwargs | passed to IPAddress constructor, see netaddr docs for options | - | - |
'''
return netaddr.IPAddress(addr, **kwargs).version
@staticmethod
def ipaddress_words(addr, **kwargs):
'''Returns a octets (IPv4) or hextets (IPv6) set of decimal values.
*Arguments*
| argument | Description | Example | Default value
| addr | the IP address | 10.0.0.0 | - |
| kwargs | passed to IPAddress constructor, see netaddr docs for options | - | - |
'''
return netaddr.IPAddress(addr, **kwargs).words
@staticmethod
def ipaddress_add(addr, amount, **kwargs):
'''Add a number to an IP address to get a higher or lower (if provided with a negative amount) IP.
*Arguments*
| argument | Description | Example | Default value
| addr | the IP address | 10.0.0.0 | - |
| amount | the amount to add | 10 | - |
| kwargs | passed to IPAddress constructor, see netaddr docs for options | - | - |
'''
return netaddr.IPAddress(addr, **kwargs) + int(amount)
@staticmethod
def ipaddress_is_valid_ipv4(addr):
'''Checks if addr is a valid IPv4 address.
*Arguments*
| argument | Description | Example | Default value
| addr | the IP address | 10.0.0.0 | - |
| kwargs | passed to IPAddress constructor, see netaddr docs for options | - | - |
'''
return netaddr.valid_ipv4(addr)
@staticmethod
def ipaddress_is_valid_ipv6(addr):
'''Checks if addr is a valid IPv6 address.
*Arguments*
| argument | Description | Example | Default value
| addr | the IP address | fe80::1 | - |
| kwargs | passed to IPAddress constructor, see netaddr docs for options | - | - |
'''
return netaddr.valid_ipv6(addr)
@staticmethod
def ipaddress_in_network(addr, netw):
'''Checks if addr is in subnet netw.
*Arguments*
| argument | Description | Example | Default value
| addr | the IP address | 10.0.0.200 | - |
| netw | the network | 10.0.0.0/24 | - |
'''
return netaddr.IPAddress(addr) in netaddr.IPNetwork(netw)
# netaddr.EUI class
@staticmethod
def eui_bin(addr, **kwargs):
'''Returns addr in binary.
*Arguments*
| argument | Description | Example | Default value
| addr | the mac address | 00-01-02-AA-BB-CC | - |
| kwargs | passed to EUI constructor, see netaddr docs for options | - | - |
'''
return netaddr.EUI(addr, **kwargs).bin
@staticmethod
def eui_bits(addr, **kwargs):
'''Returns addr in grouped bits.
*Arguments*
| argument | Description | Example | Default value
| addr | the mac address | 00-01-02-AA-BB-CC | - |
| kwargs | passed to EUI constructor, see netaddr docs for options | - | - |
'''
return netaddr.EUI(addr, **kwargs).bits()
@staticmethod
def eui_ei(addr, **kwargs):
'''Returns the addr's EI part.
*Arguments*
| argument | Description | Example | Default value
| addr | the mac address | 00-01-02-AA-BB-CC | - |
| kwargs | passed to EUI constructor, see netaddr docs for options | - | - |
'''
return netaddr.EUI(addr, **kwargs).ei
@staticmethod
def eui_eui64(addr, **kwargs):
'''Returns the addr's EUI64.
*Arguments*
| argument | Description | Example | Default value
| addr | the mac address | 00-01-02-AA-BB-CC | - |
| kwargs | passed to EUI constructor, see netaddr docs for options | - | - |
'''
return netaddr.EUI(addr, **kwargs).eui64()
@staticmethod
def eui_iab(addr, **kwargs):
'''Returns the addr's IAB (if available).
*Arguments*
| argument | Description | Example | Default value
| addr | the mac address | 00-01-02-AA-BB-CC | - |
| kwargs | passed to EUI constructor, see netaddr docs for options | - | - |
'''
return netaddr.EUI(addr, **kwargs).iab
@staticmethod
def eui_info(addr, **kwargs):
'''Returns a dict with info on the addr.
*Arguments*
| argument | Description | Example | Default value
| addr | the mac address | 00-01-02-AA-BB-CC | - |
| kwargs | passed to EUI constructor, see netaddr docs for options | - | - |
'''
return netaddr.EUI(addr, **kwargs).info
@staticmethod
def eui_ipv6(addr, prefix, **kwargs):
'''Returns an IPv6 address by combining a provided IPv6 prefix with this addr.
*Arguments*
| argument | Description | Example | Default value
| addr | the mac address | 00-01-02-AA-BB-CC | - |
| prefix | ipv6 prefix in hex | 0xfc000000000000000000000000000000 | - |
| kwargs | passed to EUI constructor, see netaddr docs for options | - | - |
'''
return netaddr.EUI(addr, **kwargs).ipv6(prefix)
@staticmethod
def eui_ipv6_link_local(addr, **kwargs):
'''Returns an IPv6 link local address based on this addr.
*Arguments*
| argument | Description | Example | Default value
| addr | the mac address | 00-01-02-AA-BB-CC | - |
| kwargs | passed to EUI constructor, see netaddr docs for options | - | - |
'''
return netaddr.EUI(addr, **kwargs).ipv6_link_local()
@staticmethod
def eui_is_iab(addr, **kwargs):
'''Check if the addr is IAB.
*Arguments*
| argument | Description | Example | Default value
| addr | the mac address | 00-01-02-AA-BB-CC | - |
| kwargs | passed to EUI constructor, see netaddr docs for options | - | - |
'''
return netaddr.EUI(addr, **kwargs).is_iab()
@staticmethod
def eui_modified_eui64(addr, **kwargs):
'''Returns modified eui64 of this addr.
*Arguments*
| argument | Description | Example | Default value
| addr | the mac address | 00-01-02-AA-BB-CC | - |
| kwargs | passed to EUI constructor, see netaddr docs for options | - | - |
'''
return netaddr.EUI(addr, **kwargs).modified_eui64()
@staticmethod
def eui_oui(addr, **kwargs):
'''Returns the OUI of this addr.
*Arguments*
| argument | Description | Example | Default value
| addr | the mac address | 00-01-02-AA-BB-CC | - |
| kwargs | passed to EUI constructor, see netaddr docs for options | - | - |
'''
return netaddr.EUI(addr, **kwargs).oui
@staticmethod
def eui_packed(addr, **kwargs):
'''Returns the addr in packed format.
*Arguments*
| argument | Description | Example | Default value
| addr | the mac address | 00-01-02-AA-BB-CC | - |
| kwargs | passed to EUI constructor, see netaddr docs for options | - | - |
'''
return netaddr.EUI(addr, **kwargs).packed
@staticmethod
def eui_value(addr, **kwargs):
'''Returns the value of the addr in decimal.
*Arguments*
| argument | Description | Example | Default value
| addr | the mac address | 00-01-02-AA-BB-CC | - |
| kwargs | passed to EUI constructor, see netaddr docs for options | - | - |
'''
return netaddr.EUI(addr, **kwargs).value
@staticmethod
def eui_version(addr, **kwargs):
'''Returns the version of the addr.
*Arguments*
| argument | Description | Example | Default value
| addr | the mac address | 00-01-02-AA-BB-CC | - |
| kwargs | passed to EUI constructor, see netaddr docs for options | - | - |
'''
return netaddr.EUI(addr, **kwargs).version
@staticmethod
def eui_words(addr, **kwargs):
'''Returns addr in a set of decimal values.
*Arguments*
| argument | Description | Example | Default value
| addr | the mac address | 00-01-02-AA-BB-CC | - |
| kwargs | passed to EUI constructor, see netaddr docs for options | - | - |
'''
return netaddr.EUI(addr, **kwargs).words
@staticmethod
def eui_is_valid(addr):
'''Returns if addr is a valid MAC address.
*Arguments*
| argument | Description | Example | Default value
| addr | the mac address | 00-01-02-AA-BB-CC | - |
'''
return netaddr.valid_mac(addr) | /robotframework_netaddr-0.0.4-py3-none-any.whl/RobotFrameworkNetAddr/robotframeworknetaddr.py | 0.794584 | 0.464598 | robotframeworknetaddr.py | pypi |
import argparse
import sys
from typing import Optional, List
import typing
from robotframework_obfuscator.obfuscator import IOpts, IDoWrite
def add_arguments(parser):
parser.description = "RobotFramework Obfuscator"
parser.add_argument(
"--stable-names",
action="store_true",
help="If passed, the names will always be the same among runs.",
)
parser.add_argument(
"--version",
action="store_true",
help="If passed, just prints the version to the standard output and exits.",
)
parser.add_argument(
"--dest",
help="The directory where the contents should be written.",
)
parser.add_argument(
"--skip-keyword",
action="append",
help="A keyword name that should not be translated (may be specified multiple times).",
)
parser.add_argument(
"--skip-var",
action="append",
help="A variable name that should not be translated (may be specified multiple times).",
)
parser.add_argument(
"target",
nargs="+",
help="The directory/directories with the contents that should be obfuscated.",
)
def main(args: Optional[List[str]] = None, do_write: Optional[IDoWrite] = None):
original_args = args if args is not None else sys.argv[1:]
parser = argparse.ArgumentParser()
add_arguments(parser)
opts = typing.cast(IOpts, parser.parse_args(args=original_args))
if opts.version:
import robotframework_obfuscator
sys.stdout.write(robotframework_obfuscator.__version__)
sys.stdout.flush()
return
if not opts.dest:
sys.stderr.write(
"The --dest <directory> where the obfuscated version should be written to must be provided."
)
sys.exit(1)
from robotframework_obfuscator.obfuscator import RobotFrameworkObfuscator
obfuscator = RobotFrameworkObfuscator(opts, do_write=do_write)
obfuscator.obfuscate()
if __name__ == "__main__":
main() | /robotframework-obfuscator-0.0.1.tar.gz/robotframework-obfuscator-0.0.1/robotframework_obfuscator/__main__.py | 0.539469 | 0.159414 | __main__.py | pypi |
from typing import List, Iterable, Union, Dict, Optional, Tuple
import pathlib
import sys
from os import scandir, makedirs
from robotframework_ls.impl.protocols import (
ICompletionContext,
)
from robotframework_obfuscator.name_generator import NameGenerator
from robotframework_ls.impl.text_utilities import normalize_robot_name
from robotframework_obfuscator.extract_var_name import get_inner_variable_name
class _Collector(object):
def __init__(
self,
completion_context: ICompletionContext,
name_generator: NameGenerator,
relative_path: pathlib.Path,
) -> None:
self.completion_context = completion_context
self._name_generator = name_generator
self.relative_path = relative_path
def on_keyword(self, keyword_node_name: str) -> None:
self._name_generator.on_found_keyword(keyword_node_name)
def on_variable(self, variable_name: str) -> None:
self._name_generator.on_found_variable(variable_name)
class IOpts(object):
version: bool # Should print --version?
stable_names: bool # Use stable names? (otherwise, each run will provide different names)
dest: Optional[str] # Dest directory
target: List[str] # Target directories to obfuscate
skip_keyword: List[str] # Keyword names that should not be obfuscated
skip_var: List[str] # Variable names that should not be obfuscated
class IDoWrite(object):
def __call__(self, path: pathlib.Path, contents: Union[str, bytes]):
pass
class RobotFrameworkObfuscator(object):
def __init__(self, opts: IOpts, do_write: Optional[IDoWrite]):
self._opts = opts
paths = []
for d in opts.target:
p = pathlib.Path(d)
if not p.exists():
self._critical(f"Target: '{p}' does not exist.")
paths.append(p.absolute())
self._paths: List[pathlib.Path] = paths
if do_write is None:
checked = set()
def _do_write(path: pathlib.Path, contents: Union[str, bytes]):
if path.parent not in checked:
makedirs(path.parent, exist_ok=True)
checked.add(path.parent)
if isinstance(contents, str):
path.write_text(contents, "utf-8")
else:
path.write_bytes(contents)
self._do_write = _do_write
else:
self._do_write = do_write
def _critical(self, txt):
sys.stderr.write(txt)
sys.stderr.write("\n")
sys.stderr.flush()
sys.exit(1)
def _iter_dir(self, p: Union[pathlib.Path, str]) -> Iterable[str]:
for entry in scandir(p):
if entry.is_dir():
yield from self._iter_dir(entry.path)
else:
yield entry.path
def _iter_files(self) -> Iterable[Tuple[pathlib.Path, pathlib.Path]]:
"""
Provides the base directory and the target file.
i.e.: yield(base_dir, filename)
"""
p: pathlib.Path
for p in self._paths:
if p.is_dir():
for entry in self._iter_dir(p):
yield p, pathlib.Path(entry)
else:
yield p.parent, p
def obfuscate(self) -> None:
from robotframework_ls.impl.completion_context import CompletionContext
from robotframework_ls.impl.robot_workspace import RobotDocument
from robocorp_ls_core import uris
from robotframework_ls.impl import ast_utils
from robotframework_obfuscator.ast_to_code import ast_to_code
from robotframework_obfuscator.obfuscator_transformer import (
ObfuscatorTransformer,
)
from robot.api.parsing import Token
opts = self._opts
file_to_collector: Dict[pathlib.Path, _Collector] = {}
name_generator = NameGenerator(use_stable_names=opts.stable_names)
skip_keyword_names = (
set(normalize_robot_name(name) for name in opts.skip_keyword)
if opts.skip_keyword
else set()
)
skip_variable_names = (
set(normalize_robot_name(name) for name in opts.skip_var)
if opts.skip_var
else set()
)
assert opts.dest
dest_base = pathlib.Path(opts.dest).absolute()
# The first step is collecting information on the keywords that are available.
for base_dir, f in self._iter_files():
relative_path = f.relative_to(base_dir)
lower_name = f.name.lower()
if not lower_name.endswith((".robot", ".resource")):
if lower_name.endswith(".pyc"):
continue
# Not a robot file and not excluded, copy the file as is.
target = dest_base / relative_path
try:
self._do_write(target, f.read_bytes())
except Exception:
raise Exception(f"Error when copying {f} to {target}.")
continue
# We need to collect keywords/variables first to know which keyword/variable names
# we should translate (we shouldn't translate keyword/variable names defined
# elsewhere).
uri = uris.from_fs_path(str(f))
doc = RobotDocument(uri)
completion_context = CompletionContext(doc)
collector = _Collector(completion_context, name_generator, relative_path)
ast = doc.get_ast()
for node_info in ast_utils.iter_all_nodes(ast):
accept_token_types = [Token.VARIABLE, Token.ASSIGN]
node = node_info.node
if node.__class__.__name__ == "Keyword":
normalized_name = normalize_robot_name(node.name)
if normalized_name not in skip_keyword_names:
collector.on_keyword(node.name)
elif node.__class__.__name__ == "Arguments":
accept_token_types.append(Token.ARGUMENT)
try:
tokens = node.tokens
except:
pass
else:
for t in tokens:
if "{" in t.value:
if t.type in accept_token_types:
name = get_inner_variable_name(t.value)
if name is not None:
tokenized = list(t.tokenize_variables())
assert (
len(tokenized) == 1
), f"Did not expect variable or assign ({t.type} - {t.value}) to have multiple vars ({tokenized})."
normalized_var_name = normalize_robot_name(name)
if normalized_var_name not in skip_variable_names:
collector.on_variable(t.value)
file_to_collector[f] = collector
# ast_utils.print_ast(ast)
# Now that we have information on the keywords available, the 2nd step is
# providing a new name for each keyword.
# Note that at this point we'll change the ast directly.
for f, collector in file_to_collector.items():
ast = collector.completion_context.get_ast()
ObfuscatorTransformer(name_generator).visit(ast)
text = ast_to_code(ast)
if opts.dest:
target = pathlib.Path(opts.dest) / collector.relative_path
self._do_write(target, text) | /robotframework-obfuscator-0.0.1.tar.gz/robotframework-obfuscator-0.0.1/robotframework_obfuscator/obfuscator.py | 0.635901 | 0.191536 | obfuscator.py | pypi |
from robot.api.parsing import ModelTransformer, Token, KeywordCall
from robotframework_obfuscator.name_generator import NameGenerator
from robotframework_ls.impl.keywords_in_args import KEYWORD_NAME_TO_KEYWORD_INDEX
from robotframework_ls.impl.text_utilities import normalize_robot_name
from robotframework_obfuscator.extract_var_name import get_inner_variable_name
class ObfuscatorTransformer(ModelTransformer):
def __init__(self, name_generator: NameGenerator):
self.name_generator = name_generator
def rename_node(self, node, token):
self._update_common(node)
if not token or not token.value:
return node
splitted = token.value.split(".")
old_name = splitted[-1]
old_normalized_name = normalize_robot_name(old_name)
new_name = self.name_generator.get_new_keyword_name(old_normalized_name)
ret = [node]
if new_name:
splitted[-1] = new_name
token.value = ".".join(splitted)
else:
# We can't use it directly, but let's at least garble it a bit...
splitted[-1] = self.name_generator.generate_garbled_keyword_name(
splitted[-1]
)
token.value = ".".join(splitted)
if token.type == Token.KEYWORD:
# Check if this was some 'Run Keyword' variant where we should also
# translate a parameter.
consider_keyword_at_index = KEYWORD_NAME_TO_KEYWORD_INDEX.get(
old_normalized_name
)
if consider_keyword_at_index is not None:
i_arg = 0
for arg in node.tokens:
if arg.type == token.ARGUMENT:
i_arg += 1
if i_arg == consider_keyword_at_index:
new_arg_name = self.name_generator.get_new_keyword_name(
normalize_robot_name(arg.value)
)
if new_arg_name:
arg.value = new_arg_name
# It's a call from a third party library, so, we can't directly replace it.
# Still, we can change some things to make the call a bit more obfuscated
# by replacing it with an evaluation.
if len(splitted) == 1:
tokens = list(node.tokens)
if len(tokens) >= 2:
sep_token = tokens[0]
name_token = tokens[1]
if (
sep_token.type == Token.SEPARATOR
and name_token.type == Token.KEYWORD
):
var_name = "${%s}" % self.name_generator.get_new_temp_name(
splitted[0]
)
eval_str = []
for c in splitted[0]:
eval_str.append("chr(%s)" % hex(ord(c)))
# Do it only for the simplest cases.
new_tokens = [
sep_token,
Token(Token.ASSIGN, var_name),
Token(Token.SEPARATOR, " "),
Token(
Token.KEYWORD,
self.name_generator.generate_garbled_keyword_name(
"Evaluate"
),
),
Token(Token.SEPARATOR, " "),
Token(
Token.ARGUMENT, "''.join([%s])" % ",".join(eval_str)
),
Token(Token.EOL, "\n"),
]
del tokens[1]
tokens.insert(
1,
Token(
Token.KEYWORD,
self.name_generator.generate_garbled_keyword_name(
"Run Keyword"
),
),
)
tokens.insert(2, Token(Token.SEPARATOR, " "))
tokens.insert(3, Token(Token.KEYWORD, var_name))
node.tokens = tuple(tokens)
ret = [KeywordCall(tokens=new_tokens), node]
return ret
def visit_KeywordName(self, node):
return self.rename_node(node, node.get_token(Token.KEYWORD_NAME))
def visit_KeywordCall(self, node):
return self.rename_node(node, node.get_token(Token.KEYWORD))
def generic_visit(self, node):
self._update_common(node)
ret = ModelTransformer.generic_visit(self, node)
return ret
def _update_common(self, node):
try:
tokens = node.tokens
except AttributeError:
return
else:
for token in tokens:
# i.e.: Remove tokens.
if token.type == token.COMMENT:
token.value = ""
else:
if "{" in token.value:
new_token_value = []
tokenized = list(token.tokenize_variables())
changed = False
for t in tokenized:
value = t.value
v = get_inner_variable_name(value)
if v is not None:
new_name = self.name_generator.get_new_variable_name(
value
)
if new_name:
changed = True
new_token_value.append(new_name)
else:
new_token_value.append(value)
else:
new_token_value.append(value)
if changed:
token.value = "".join(new_token_value) | /robotframework-obfuscator-0.0.1.tar.gz/robotframework-obfuscator-0.0.1/robotframework_obfuscator/obfuscator_transformer.py | 0.62681 | 0.164483 | obfuscator_transformer.py | pypi |
from OCRLibrary.keywords.binary_image_transformation import ImageThresholdingKeywords, MorphologicalTransformationKeywords
from OCRLibrary.keywords.changing_colourspace_transformation import ChangingColourspaceKeywords
from OCRLibrary.keywords.content_location import ContentLocationKeywords
from OCRLibrary.keywords.content_validation import ContentValidationKeywords
from OCRLibrary.keywords.read_and_save_images import ReadImageKeywords, SaveImageKeywords
from OCRLibrary.keywords.smoothing_image_transformation import SmoothingImageKeywords
from OCRLibrary.version import VERSION
__version__ = VERSION
class OCRLibrary(ImageThresholdingKeywords,
MorphologicalTransformationKeywords,
ChangingColourspaceKeywords,
ContentLocationKeywords,
ContentValidationKeywords,
ReadImageKeywords,
SaveImageKeywords,
SmoothingImageKeywords):
"""
OCRLibrary is an image reading and processing library for Robot Framework.
The OCR component of OCRLibrary utilizes pytesseract, which is a python wrapper for Google's Tesseract OCR.
Image processing is done through the opencv-python package.
Please note that recogizing all characters and their locations from screenshots is not guaranteed. Although processing the
image will increase the chances for the desired characters to be read or located.
= Information On Image Transformations =
[https://docs.opencv.org/4.5.2/d7/d4d/tutorial_py_thresholding.html | OpenCV Thresholding Documentation]
[https://docs.opencv.org/4.5.2/d9/d61/tutorial_py_morphological_ops.html | OpenCV Morphological Transformation Documentation]
[https://docs.opencv.org/4.5.2/df/d9d/tutorial_py_colorspaces.html | OpenCV Changing Colourspaces Documentation]
[https://docs.opencv.org/4.5.2/d4/d13/tutorial_py_filtering.html | OpenCV Smoothing Image Documentation]
== Using And Not Using OSTU ==
Information on keywords that use the ``apply_otsu`` argument.
List of current keywords that can use OTSU:
``Get Binary Image``, ``Get To Zero Image`` and, ``Get Trunc Image``
=== Using OTSU ===
Enabling otsu (``apply_otsu = True``) for thresholding keywords determines the threshold value automatically.
When otsu is enabled, the image processing keyword will return a tuple. Index 0 contains the optimal threshold
value found by the ostu threshold, and index 1 has the binary image.
For an example, please see the example ``Using Get To Zero Image`` [https://github.com/bendurston/robotframework-ocrlibrary/blob/main/examples/keyword_usage.robot | in the keyword usage file.]
=== Not Using OTSU ===
When ``apply_otsu = False`` threshold values must be provided. For more detail about the thresholding arguments,
please see the OpenCV thresholding documentation listed above.
For an example, please see the example ``Using Get Trunc Image`` [https://github.com/bendurston/robotframework-ocrlibrary/blob/main/examples/keyword_usage.robot | in the keyword usage file.]
== Keywords With Apply Prefix ==
This information pertains to the keywords with the ``Apply`` prefix.
=== Kernel Size Argument ===
There are a few minor differences with this argument for some keywords.
``Apply Median Filtering To Image`` takes a kernel size as an integer that is odd and greater than 0.
``Apply Gaussian Blur To Image`` takes a kernel size as a tuple/list where the values must be positive odd integers.
The rest of the keywords take a kernel size as a tuple/list where the values must be postive.
=== Kernel Type Argument ===
Keywords that require a ``kernel_type`` take the given kernel size and create a structured element. The integer provided as
the kernel type will determine the shape of the structured element. 0 will be a rectangle, 1 will be an ellipse,
and 2 will be a cross.
=== Iteration Argument ===
Iteration is the number of times the transformation is performed on the image. The ``iteration`` can be any positive integer
greater than 0.
=== Depth Argument ===
Depth represents the desired depth of the destination image. When ``depth=-1`` the output image will have the same depth as the source.
== Pytesseract Configuration Strings ==
Please see [https://github.com/bendurston/robotframework-ocrlibrary#custom-configurations-for-reading-images |the OCRLibrary README.md] for an in depth explanation of the ``pyt_conf`` argument.
Example:
| ${img_path}= Capture Page Screenshot
| ${processed_img}= Read Image ${img_path}
| ${content}= Get Image Content ${processed_img} --psm 6 -c tessedit_char_whitelist=0123456789 eng
Note: Only use one space between each configuration in the ``pyt_conf`` argument.
== Masking Colours ==
Users are able to mask (maintain) colours that exist within the provided upper and lower bounds. A BGR or HSV image can be
used for either ``Mask Colour`` or ``Mask Colours``. Bounds can be either a list of a tuple, and each index must be of type int.
Representation of BGR and HSV bounds respectively: (blue value, green value, red value), (hue value, saturation value, brightness value).
For more detail about the masking colours, please see the OpenCV changing colourspaces documentation listed above.
Please see the [https://github.com/bendurston/robotframework-ocrlibrary/blob/main/examples/keyword_usage.robot |keyword_usage.robot file] for an example of the Mask Colour or Mask Colours keywords.
== Reading And Saving Images ==
Please see the list of the following [https://docs.opencv.org/master/d4/da8/group__imgcodecs.html#ga288b8b3da0892bd651fce07b3bbd3a56 |formats that are supported] for image reading.
Please see the [https://docs.opencv.org/master/d4/da8/group__imgcodecs.html#gabbc7ef1aa2edfaa87772f1202d67e0ce |list of exceptions] for saving an image.
"""
ROBOT_LIBRARY_SCOPE = "GLOBAL"
ROBOT_LIBRARY_VERSION = VERSION
def __init__(self):
for b in OCRLibrary.__bases__:
b.__init__(self) | /robotframework_ocrlibrary-2.0.0-py3-none-any.whl/OCRLibrary/__init__.py | 0.879923 | 0.529385 | __init__.py | pypi |
from ..utils.exceptions.exception_handler \
import (verify_valid_image)
from ..utils.imagereading.text_locating \
import (return_text_coordinates, return_multiple_text_coordinates,
return_text_bounds, return_multiple_text_bounds)
class ContentLocationKeywords:
"""
ContentLocationKeywords Class
"""
def locate_text_coordinates(self, processed_img, text, pyt_conf='--psm 6', lang='eng'):
"""
Locates the coordinates of the provided text. This keyword gets the first occurrance of the text.
Use ``Locate Multiple Text Coordinates`` if there is more than one occurrance of the text.
The coordinates found are returned as a tuple (x, y). If nothing is found, None is returned.
See `Pytesseract Configuration Strings` for details about pyt_conf and lang arguments.
Please note: as of version 1.2.0 this keyword only finds the coordinates of a single word. This will not
work for sentances.
"""
verify_valid_image(processed_img)
coordinates = return_text_coordinates(processed_img, text, pyt_conf, lang)
return coordinates
def locate_multiple_text_coordinates(self, processed_img, text, pyt_conf='--psm 6', lang='eng'):
"""
Locates the coordiantes of more than one instance of the provided text. This keyword can also be used if there is only
one occurrance of the text. A list of coordinates found is return, each index stores a tuple (x, y).
If nothing is found, None is returned.
See `Pytesseract Configuration Strings` for details about pyt_conf and lang arguments.
Please note: as of version 1.2.0 this keyword only finds the coordinates of a single word. This will not
work for sentances.
"""
verify_valid_image(processed_img)
multiple_coordinates = return_multiple_text_coordinates(processed_img, text, pyt_conf, lang)
return multiple_coordinates
def locate_text_bounds(self, processed_img, text, pyt_conf='--psm 6', lang='eng'):
"""
Locates the bounds found around the provided text. This keyword gets the first occurrance of the text.
Use ``Locate Multiple Text Bounds`` if there is more than one occurrance of the text.
A tuple of the bounds is returned. Returns None if nothing is found.
Example:
| ${result}= Locate Text Bounds ${processed_img} OK
| ${x}= Set Variable ${result}[0]
| ${y}= Set Variable ${result}[1]
| ${w}= Set Variable ${result}[2]
| ${h}= Set Variable ${result}[3]
Bounds refer to the box around the word "OK".
- x represents the bound furthest to the left.
- y represents the top of the bound.
- w represents the width of the bound.
- h represents the height of the bound.
See `Pytesseract Configuration Strings` for details about pyt_conf and lang arguments.
Please note: as of version 1.2.0 this keyword only finds the coordinates of a single word. This will not
work for sentances.
"""
verify_valid_image(processed_img)
bounds = return_text_bounds(processed_img, text, pyt_conf, lang)
return bounds
def locate_multiple_text_bounds(self, processed_img, text, pyt_conf='--psm 6', lang='eng'):
"""
Locates the bounds found around more than one instance of the provided text. This keyword can also be used if there is one occurrance
of the text. A list of tuples containing the bounds are returned if the text is found. Returns None if nothing is found.
See ``Locate Text Bounds`` documentation for an example of what each index in the tuple corresponds to.
See `Pytesseract Configuration Strings` for details about pyt_conf and lang arguments.
Please note: as of version 1.2.0 this keyword only finds the coordinates of a single word. This will not
work for sentances.
"""
verify_valid_image(processed_img)
multiple_bounds = return_multiple_text_bounds(processed_img, text, pyt_conf, lang)
return multiple_bounds | /robotframework_ocrlibrary-2.0.0-py3-none-any.whl/OCRLibrary/keywords/content_location.py | 0.773045 | 0.535949 | content_location.py | pypi |
from ..utils.exceptions.exception_handler \
import (verify_valid_kernel_size, verify_valid_depth, raise_invalid_kernel_type, verify_valid_kernel_size_non_tuple,
verify_valid_image, verify_valid_kernel_size_only_odds)
from ..utils.helpers.robot_conversions \
import (convert_to_valid_kernel_size, convert_to_valid_int)
from ..utils.imageprocessing.image_processing_generic \
import (process_image_filtering_with_rect_kernel, process_image_filtering_with_ellipse_kernel, process_image_filtering_with_cross_kernel,
process_median_filtering, process_blurring_averaging, process_blurring_gaussian)
class SmoothingImageKeywords:
"""
SmoothingImageKeywords Class
Reference: https://docs.opencv.org/4.5.2/d4/d13/tutorial_py_filtering.html
"""
def apply_filter2D_to_image(self, processed_img, kernel_size, kernel_type=0, depth=-1):
"""
Applies the filter2D filter to the provided image. Kernel size must be a tuple/list of positive ints.
Example:
| ${img_path}= Capture Page Screenshot
| ${processed_img}= Read Image ${img_path}
| ${kernel_size}= Create List 1 1
| ${filtered_img}= Apply Filter2D To Image ${processed_img} ${kernel_size}
See `introduction` for details about using arguments.
For more details about this transformation see the OpenCV smoothing images documentation in the `Information On Image Transformations` section of the introduction.
"""
verify_valid_image(processed_img)
verify_valid_kernel_size(kernel_size)
verify_valid_depth(depth)
depth = convert_to_valid_int(depth)
kernel_size = convert_to_valid_kernel_size(kernel_size)
kernel_type = convert_to_valid_int(kernel_type)
if kernel_type == 0:
transformed_image = process_image_filtering_with_rect_kernel(processed_img, kernel_size, depth)
elif kernel_type == 1:
transformed_image = process_image_filtering_with_ellipse_kernel(processed_img, kernel_size, depth)
elif kernel_type == 2:
transformed_image = process_image_filtering_with_cross_kernel(processed_img, kernel_size, depth)
else:
return raise_invalid_kernel_type(kernel_type)
return transformed_image
def apply_median_filtering_to_image(self, processed_img, kernel_size):
"""
Applies the median filter to the provided image.
``kernel_size`` takes an integer that is odd and greater than 0. Not a tuple/list.
See `introduction` for details about using arguments.
For more details about this transformation see the OpenCV smoothing images documentation in the `Information On Image Transformations` section of the introduction.
"""
verify_valid_image(processed_img)
verify_valid_kernel_size_non_tuple(kernel_size)
kernel_size = convert_to_valid_int(kernel_size)
return process_median_filtering(processed_img, kernel_size)
def apply_averaging_blur_to_image(self, processed_img, kernel_size):
"""
Applies the averaging blur to the provided image. Kernel size must be a tuple/list of positive ints.
See ``Apply Filter2D To Image`` for example of general usage.
See `introduction` for details about using arguments.
For more details about this transformation see the OpenCV smoothing images documentation in the `Information On Image Transformations` section of the introduction.
"""
verify_valid_image(processed_img)
verify_valid_kernel_size(kernel_size)
kernel_size = convert_to_valid_kernel_size(kernel_size)
return process_blurring_averaging(processed_img, kernel_size)
def apply_gaussian_blur_to_image(self, processed_img, kernel_size):
"""
Applies the gaussian blur to the provided image. Kernel size must be a tuple/list of positive and odd ints.
See `introduction` for details about using arguments.
For more details about this transformation see the OpenCV smoothing images documentation in the `Information On Image Transformations` section of the introduction.
"""
verify_valid_image(processed_img)
verify_valid_kernel_size_only_odds(kernel_size)
kernel_size = convert_to_valid_kernel_size(kernel_size)
return process_blurring_gaussian(processed_img, kernel_size) | /robotframework_ocrlibrary-2.0.0-py3-none-any.whl/OCRLibrary/keywords/smoothing_image_transformation.py | 0.901547 | 0.569194 | smoothing_image_transformation.py | pypi |
from ..utils.exceptions.exception_handler import \
(verify_valid_kernel_size, verify_valid_iteration, raise_invalid_kernel_type, verify_valid_image,
verify_valid_image_path, verify_valid_threshold_values)
from ..utils.helpers.robot_conversions import \
(convert_to_valid_kernel_size)
from ..utils.imageprocessing.image_processing_gray import \
(process_to_binary_image, process_to_binary_otsu_image, process_to_tozero_image,
process_to_tozero_otsu_image, process_to_trunc_image, process_to_trunc_otsu_image,
process_erosion_with_rect_kernel, process_erosion_with_ellipse_kernel, process_erosion_with_cross_kernel,
process_dilation_with_rect_kernel, process_dilation_with_ellipse_kernel, process_dilation_with_cross_kernel,
process_opening_with_rect_kernel, process_opening_with_ellipse_kernel, process_opening_with_cross_kernel,
process_closing_with_rect_kernel, process_closing_with_ellipse_kernel, process_closing_with_cross_kernel,
process_gradient_with_rect_kernel, process_gradient_with_ellipse_kernel, process_gradient_with_cross_kernel,
process_tophat_with_rect_kernel, process_tophat_with_ellipse_kernel, process_tophat_with_cross_kernel,
process_blackhat_with_rect_kernel, process_blackhat_with_ellipse_kernel, process_blackhat_with_cross_kernel)
class ImageThresholdingKeywords:
"""
ImageThresholdingKeywords Class
Reference: https://docs.opencv.org/4.5.2/d7/d4d/tutorial_py_thresholding.html
"""
def get_binary_image(self, img_path, apply_otsu=False, inverse=False, max_threshold=255, threshold=127):
"""
Converts an image to a binary image.
See `introduction` for details about using arguments.
For more details about this transformation see the OpenCV image thresholding documentation in the `Information On Image Transformations` section of the introduction.
"""
verify_valid_image_path(img_path)
verify_valid_threshold_values(threshold, max_threshold)
if apply_otsu:
processed_image = process_to_binary_otsu_image(img_path, inverse, max_threshold)
else:
processed_image = process_to_binary_image(img_path, inverse, threshold, max_threshold)
return processed_image
def get_to_zero_image(self, img_path, apply_otsu=False, inverse=False, max_threshold=255, threshold=127):
"""
Converts an image to a tozero image.
All values considered black (if inverse is False) will be set to black, the rest of
the image will remain in gray scale. If inverse is true, the values considered to be white will be set to black,
the rest of the image will remain in gray scale.
See `introduction` for details about using the arguments.
For more details about this transformation see the OpenCV image thresholding documentation in the `Information On Image Transformations` section of the introduction.
"""
verify_valid_image_path(img_path)
verify_valid_threshold_values(threshold, max_threshold)
if apply_otsu:
processed_image = process_to_tozero_otsu_image(img_path, inverse, max_threshold)
else:
processed_image = process_to_tozero_image(img_path, inverse, threshold, max_threshold)
return processed_image
def get_trunc_image(self, img_path, apply_otsu=False, max_threshold=255, threshold=127):
"""
Converts an image to gray scale and applies truncation threshold. Values considered to be white will be set to white, the
rest of the image will remain gray scale.
See `introduction` for details about using the arguments.
For more details about this transformation see the OpenCV image thresholding documentation in the `Information On Image Transformations` section of the introduction.
"""
verify_valid_image_path(img_path)
verify_valid_threshold_values(threshold, max_threshold)
if apply_otsu:
processed_image = process_to_trunc_otsu_image(img_path, max_threshold)
else:
processed_image = process_to_trunc_image(img_path, threshold, max_threshold)
return processed_image
class MorphologicalTransformationKeywords:
"""
MorphologicalTransformationKeywords Class
Reference: https://docs.opencv.org/4.5.2/d9/d61/tutorial_py_morphological_ops.html
"""
def apply_erosion_to_image(self, processed_img, kernel_size, kernel_type=0, iteration=1):
"""
Applies the erosion morphological transformation to a binary image. Kernel size must be a tuple/list of positive ints.
Example:
| ${img_path}= Capture Page Screenshot
| ${processed_img}= Get Binary Image ${img_path}
| ${kernel_size}= Create List 1 1
| ${eroded_img}= Apply Erosion To Image ${processed_img} ${kernel_size}
See `introduction` for details about using the arguments.
For more details about this transformation see the OpenCV morphological transformation documentation in the `Information On Image Transformations` section of the introduction.
"""
verify_valid_image(processed_img)
verify_valid_kernel_size(kernel_size)
verify_valid_iteration(iteration)
kernel_size = convert_to_valid_kernel_size(kernel_size)
if kernel_type == 0:
transformed_image = process_erosion_with_rect_kernel(processed_img, kernel_size, iteration)
elif kernel_type == 1:
transformed_image = process_erosion_with_ellipse_kernel(processed_img, kernel_size, iteration)
elif kernel_type == 2:
transformed_image = process_erosion_with_cross_kernel(processed_img, kernel_size, iteration)
else:
return raise_invalid_kernel_type(kernel_type)
return transformed_image
def apply_dilation_to_image(self, processed_img, kernel_size, kernel_type=0, iteration=1):
"""
Applies the dilation morphological transformation to a binary image. Kernel size must be a tuple/list of positive ints.
See ``Apply Erosion To Image`` for example of general usage.
See `introduction` for details about using the arguments.
For more details about this transformation see the OpenCV morphological transformation documentation in the `Information On Image Transformations` section of the introduction.
"""
verify_valid_image(processed_img)
verify_valid_kernel_size(kernel_size)
verify_valid_iteration(iteration)
kernel_size = convert_to_valid_kernel_size(kernel_size)
if kernel_type == 0:
transformed_image = process_dilation_with_rect_kernel(processed_img, kernel_size, iteration)
elif kernel_type == 1:
transformed_image = process_dilation_with_ellipse_kernel(processed_img, kernel_size, iteration)
elif kernel_type == 2:
transformed_image = process_dilation_with_cross_kernel(processed_img, kernel_size, iteration)
else:
return raise_invalid_kernel_type(kernel_type)
return transformed_image
def apply_opening_to_image(self, processed_img, kernel_size, kernel_type=0, iteration=1):
"""
Applies the opening morphological transformation to a binary image. Kernel size must be a tuple/list of positive ints.
See ``Apply Erosion To Image`` for example of general usage.
See `introduction` for details about using the arguments.
For more details about this transformation see the OpenCV morphological transformation documentation in the `Information On Image Transformations` section of the introduction.
"""
verify_valid_image(processed_img)
verify_valid_kernel_size(kernel_size)
verify_valid_iteration(iteration)
kernel_size = convert_to_valid_kernel_size(kernel_size)
if kernel_type == 0:
transformed_image = process_opening_with_rect_kernel(processed_img, kernel_size)
elif kernel_type == 1:
transformed_image = process_opening_with_ellipse_kernel(processed_img, kernel_size)
elif kernel_type == 2:
transformed_image = process_opening_with_cross_kernel(processed_img, kernel_size)
else:
return raise_invalid_kernel_type(kernel_type)
return transformed_image
def apply_closing_to_image(self, processed_img, kernel_size, kernel_type=0, iteration=1):
"""
Applies the closing morphological transformation to a binary image. Kernel size must be a tuple/list of positive ints.
See ``Apply Erosion To Image`` for example of general usage.
See `introduction` for details about using the arguments.
For more details about this transformation see the OpenCV morphological transformation documentation in the `Information On Image Transformations` section of the introduction.
"""
verify_valid_image(processed_img)
verify_valid_kernel_size(kernel_size)
verify_valid_iteration(iteration)
kernel_size = convert_to_valid_kernel_size(kernel_size)
if kernel_type == 0:
transformed_image = process_closing_with_rect_kernel(processed_img, kernel_size)
elif kernel_type == 1:
transformed_image = process_closing_with_ellipse_kernel(processed_img, kernel_size)
elif kernel_type == 2:
transformed_image = process_closing_with_cross_kernel(processed_img, kernel_size)
else:
return raise_invalid_kernel_type(kernel_type)
return transformed_image
def apply_gradient_to_image(self, processed_img, kernel_size, kernel_type=0, iteration=1):
"""
Applies the gradient morphological transformation to a binary image. Kernel size must be a tuple/list of positive ints.
See ``Apply Erosion To Image`` for example of general usage.
See `introduction` for details about using the arguments.
For more details about this transformation see the OpenCV morphological transformation documentation in the `Information On Image Transformations` section of the introduction.
"""
verify_valid_image(processed_img)
verify_valid_kernel_size(kernel_size)
verify_valid_iteration(iteration)
kernel_size = convert_to_valid_kernel_size(kernel_size)
if kernel_type == 0:
transformed_image = process_gradient_with_rect_kernel(processed_img, kernel_size)
elif kernel_type == 1:
transformed_image = process_gradient_with_ellipse_kernel(processed_img, kernel_size)
elif kernel_type == 2:
transformed_image = process_gradient_with_cross_kernel(processed_img, kernel_size)
else:
return raise_invalid_kernel_type(kernel_type)
return transformed_image
def apply_top_hat_to_image(self, processed_img, kernel_size, kernel_type=0, iteration=1):
"""
Applies the top hat morphological transformation to a binary image. Kernel size must be a tuple/list of positive ints.
See ``Apply Erosion To Image`` for example of general usage.
See `introduction` for details about using the arguments.
For more details about this transformation see the OpenCV morphological transformation documentation in the `Information On Image Transformations` section of the introduction.
"""
verify_valid_image(processed_img)
verify_valid_kernel_size(kernel_size)
verify_valid_iteration(iteration)
kernel_size = convert_to_valid_kernel_size(kernel_size)
if kernel_type == 0:
transformed_image = process_tophat_with_rect_kernel(processed_img, kernel_size)
elif kernel_type == 1:
transformed_image = process_tophat_with_ellipse_kernel(processed_img, kernel_size)
elif kernel_type == 2:
transformed_image = process_tophat_with_cross_kernel(processed_img, kernel_size)
else:
return raise_invalid_kernel_type(kernel_type)
return transformed_image
def apply_black_hat_to_image(self, processed_img, kernel_size, kernel_type=0, iteration=1):
"""
Applies the black hat morphological transformation to a binary image. Kernel size must be a tuple/list of positive ints.
See ``Apply Erosion To Image`` for example of general usage.
See `introduction` for details about using the arguments.
For more details about this transformation see the OpenCV morphological transformation documentation in the `Information On Image Transformations` section of the introduction.
"""
verify_valid_image(processed_img)
verify_valid_kernel_size(kernel_size)
verify_valid_iteration(iteration)
kernel_size = convert_to_valid_kernel_size(kernel_size)
if kernel_type == 0:
transformed_image = process_blackhat_with_rect_kernel(processed_img, kernel_size)
elif kernel_type == 1:
transformed_image = process_blackhat_with_ellipse_kernel(processed_img, kernel_size)
elif kernel_type == 2:
transformed_image = process_blackhat_with_cross_kernel(processed_img, kernel_size)
else:
raise_invalid_kernel_type(kernel_type)
return transformed_image | /robotframework_ocrlibrary-2.0.0-py3-none-any.whl/OCRLibrary/keywords/binary_image_transformation.py | 0.872402 | 0.609916 | binary_image_transformation.py | pypi |
from ..utils.exceptions.exception_handler import \
(verify_valid_image, verify_valid_colour_bounds)
from ..utils.helpers.robot_conversions import \
(convert_to_valid_colour_bounds)
from ..utils.imageprocessing.image_processing_colour import \
(process_to_gray_scale, process_colour_image_to_hsv, mask_colour_bgr_or_hsv, mask_colours_bgr_or_hsv)
class ChangingColourspaceKeywords:
"""
ChangingColourspaceKeywords Class
Reference: https://docs.opencv.org/4.5.2/df/d9d/tutorial_py_colorspaces.html
"""
def convert_image_to_gray_scale(self, processed_img):
"""
Converts any image read to gray scale.
Example:
| ${path}= Capture Page Screenshot
| ${processed_img}= Read Image ${img_path}
| ${gray_scale_image}= Get Gray Scale Image ${path}
"""
verify_valid_image(processed_img)
return process_to_gray_scale(processed_img)
def convert_image_to_HSV(self, processed_img):
"""
Converts any image read as bgr into hsv colour scheme.
Example:
| ${img_path}= Capture Page Screenshot
| ${processed_img}= Read Image ${img_path}
| ${hsv_img}= Convert Image To HSV ${processed_img}
"""
verify_valid_image(processed_img)
return process_colour_image_to_hsv(processed_img)
def mask_colour(self, processed_img, lower_bound_colour, upper_bound_colour):
"""
Mask all colours in an image that are not within the provided bounds. Masked colours become black.
Example of masking all colours but red in a BGR image:
| ${img_path}= Capture Page Screenshot
| ${processed_img}= Read Image ${img_path}
| ${lower}= Create List 0 0 200
| ${upper}= Create List 0 0 255
| ${masked_img}= Mask Colour ${processed_img} ${lower} ${upper}
For more details about this transformation see the OpenCV changing colourspaces documentation in the `Information On Image Transformations` section of the introduction.
"""
verify_valid_image(processed_img)
colours = convert_to_valid_colour_bounds(lower_bound_colour, upper_bound_colour)
lower_bound_colour = colours[0]
upper_bound_colour = colours[1]
verify_valid_colour_bounds(lower_bound_colour, upper_bound_colour)
return mask_colour_bgr_or_hsv(processed_img, lower_bound_colour, upper_bound_colour)
def mask_colours(self, processed_img, lower_bound_colour1, upper_bound_colour1, lower_bound_colour2, upper_bound_colour2):
"""
Mask all colours in an image that are not within the two provided bounds. Masked colours become black.
Example of masking all colours but red and blue in a BGR image:
| ${img_path}= Capture Page Screenshot
| ${processed_img}= Read Image ${img_path}
| ${lower1}= Create List 0 0 200
| ${upper1}= Create List 0 0 255
| ${lower2}= Create List 0 200 0
| ${upper2}= Create List 0 255 0
| ${masked_img}= Mask Colours ${processed_img} ${lower1} ${upper1} ${lower2} ${upper2}
For more details about this transformation see the OpenCV changing colourspaces documentation in the `Information On Image Transformations` section of the introduction.
"""
colours = convert_to_valid_colour_bounds(lower_bound_colour1, upper_bound_colour1, lower_bound_colour2, upper_bound_colour2)
lower_bound_colour1 = colours[0]
upper_bound_colour1 = colours[1]
lower_bound_colour2 = colours[2]
upper_bound_colour2 = colours[3]
verify_valid_image(processed_img)
verify_valid_colour_bounds(lower_bound_colour1, upper_bound_colour1, lower_bound_colour2, upper_bound_colour2)
return mask_colours_bgr_or_hsv(processed_img, lower_bound_colour1, upper_bound_colour1, lower_bound_colour2, upper_bound_colour2) | /robotframework_ocrlibrary-2.0.0-py3-none-any.whl/OCRLibrary/keywords/changing_colourspace_transformation.py | 0.901894 | 0.411702 | changing_colourspace_transformation.py | pypi |
from pytesseract import image_to_data, Output
def return_text_coordinates(img, text, pyt_conf, lang):
"""
This keyword is find the coordinates of text in an image.
"""
data = image_to_data(img, output_type=Output.DICT, config=pyt_conf, lang=lang)
boxes = len(data['level'])
for i in range(boxes):
text_from_image = data['text'][i]
if text_from_image == text:
box_bounds = (int(data['left'][i]), int(data['top'][i]), int(data['width'][i]), int(data['height'][i]))
x = box_bounds[0] + box_bounds[2]/2
y = box_bounds[1] + box_bounds[3]/2
return x, y
return None
def return_multiple_text_coordinates(img, text, pyt_conf, lang):
"""
To be used when there are multiple occurrences of the same text you wish to find.
"""
data = image_to_data(img, output_type=Output.DICT, config=pyt_conf, lang=lang)
boxes = len(data['level'])
list_of_coordinates = []
for i in range(boxes):
text_from_image = data['text'][i]
if text_from_image == text:
box_bounds = (int(data['left'][i]), int(data['top'][i]), int(data['width'][i]), int(data['height'][i]))
x = box_bounds[0] + box_bounds[2]/2
y = box_bounds[1] + box_bounds[3]/2
coordinates = (x, y)
list_of_coordinates.append(coordinates)
if not list_of_coordinates:
return None
return list_of_coordinates
def return_text_bounds(img, text, pyt_conf, lang):
"""
This keyword is find the coordinates of text in an image.
"""
data = image_to_data(img, output_type=Output.DICT, config=pyt_conf, lang=lang)
boxes = len(data['level'])
for i in range(boxes):
text_from_image = data['text'][i]
if text_from_image == text:
box_bounds = (int(data['left'][i]), int(data['top'][i]), int(data['width'][i]), int(data['height'][i]))
return box_bounds
return None
def return_multiple_text_bounds(img, text, pyt_conf, lang):
"""
To be used when there are multiple occurrences of the same text you wish to find.
"""
data = image_to_data(img, output_type=Output.DICT, config=pyt_conf, lang=lang)
boxes = len(data['level'])
list_of_box_bounds = []
for i in range(boxes):
text_from_image = data['text'][i]
if text_from_image == text:
box_bounds = (int(data['left'][i]), int(data['top'][i]), int(data['width'][i]), int(data['height'][i]))
list_of_box_bounds.append(box_bounds)
if not list_of_box_bounds:
return None
return list_of_box_bounds | /robotframework_ocrlibrary-2.0.0-py3-none-any.whl/OCRLibrary/utils/imagereading/text_locating.py | 0.422981 | 0.474753 | text_locating.py | pypi |
import numpy
import cv2
from OCRLibrary.utils.exceptions.exceptions \
import (InvalidKernelSize, InvalidKernelType, InvalidIteration, ContentNotFound, InvalidImageArgument,
InvalidColourBoundArguments, InvalidImagePath, InvalidThresholdValue, InvalidDepthArgument)
def verify_content(expected_content, actual_content):
"""
Function verifies if the expected content is in the actual content. If it is, True is returned
otherwise a ContentNotFound error is raised.
"""
if expected_content not in actual_content:
raise ContentNotFound(f"The expected content: {expected_content} was not found in the actual content: {actual_content}")
return True
def verify_valid_kernel_size(kernel_size):
"""
Function verifies if the given kernel size is valid.
Kernel size must be a tuple/list of ints with positive values.
"""
if isinstance(kernel_size, (tuple, list)):
if (int(float(kernel_size[0])) > 0 and int(float(kernel_size[1])) > 0):
if (int(float(kernel_size[0])) and int(float(kernel_size[1]))):
return True
raise InvalidKernelSize("The kernel size argument provided is invalid. Please provide a size that is a positive number of type int, and the kernel_size is of type tuple or list.")
def verify_valid_kernel_size_only_odds(kernel_size):
"""
Function verifies if the given kernel size is valid.
Kernel size must be a tuple/list of ints with positive odd values.
"""
if isinstance(kernel_size, (tuple, list)):
if (int(float(kernel_size[0])) > 0 and int(float(kernel_size[1])) > 0):
if (int(float(kernel_size[0])) % 2 == 1 and int(float(kernel_size[1])) % 2 == 1):
return True
raise InvalidKernelSize("The kernel size argument provided is invalid. Please provide a size that is a positive odd number of type int, and the kernel_size is of type tuple or list.")
def verify_valid_kernel_size_non_tuple(kernel_size):
"""
Function verifies if the given kernel size is valid.
Kernel size must be a an int that is greater than zero and is odd.
"""
if isinstance(kernel_size, (int, str, float)):
if (int(kernel_size) > 0 and int(kernel_size) % 2 == 1):
return True
raise InvalidKernelSize("The kernel size argument provided is invalid. Please provide a size that is a positive odd number of type int.")
def raise_invalid_kernel_type(kernel_type):
"""
Function raises an InvalidKernelType Error when called.
"""
raise InvalidKernelType(f"The provided kernel type: {kernel_type} is invalid. Please provide a type that is either 0, 1 or 2.")
def verify_valid_iteration(iteration):
"""
Function verifies if the iteration is valid.
Must be an int greater than 0.
"""
if isinstance(iteration, int):
if iteration <= 0:
raise InvalidIteration(f"The provided iteration: {iteration} is invalid. Please select and integer that is greater than or equal to 1.")
return True
raise InvalidIteration(f"The provided iteration: {iteration} is invalid. Iteration must be an integer.")
def verify_valid_image(processed_img):
"""
Function verifies if the given image is valid.
That is an image that has been processed by opencv (is of type numpy.ndarray).
"""
if isinstance(processed_img, numpy.ndarray):
return True
raise InvalidImageArgument("The image argument provided is invalid. Please give an image that has been returned from any of the image processing keywords.")
def verify_valid_colour_bounds(*arg):
"""
Function verifies if the given bgr or hsv bounds are valid.
BGR/HSV values range from 0 to 255. This condition must be met.
"""
args_num = len(arg)
for i in range(0, args_num):
if ((isinstance(arg[i][0], int)) and (isinstance(arg[i][1], int)) and (isinstance(arg[i][2], int))):
if ((arg[i][0] < 0 or arg[i][0] > 255) or (arg[i][1] < 0 or arg[i][1] > 255) or (arg[i][2] < 0 or arg[i][2] > 255)):
raise InvalidColourBoundArguments("The bound(s) provided are invalid. Please give values that are ints between 0 and 255.")
else:
raise InvalidColourBoundArguments("The bound(s) provided are invalid. Please provide an int between 0 and 255.")
return True
def verify_valid_image_path(filename, read=True):
"""
Function verifies if the given image can be encoded/decoded by OpenCV.
If read is true, function checks if image can be decoded (imread()) Otherwise the
function checks if the image can be encoded (imwrite()).
"""
if read:
if cv2.haveImageReader(filename):
return True
raise InvalidImagePath("The image path provided is invalid. Please insure the path is correct or the file format is supported.")
if cv2.haveImageWriter(filename):
return True
raise InvalidImagePath("The provided filename cannot be encoded by OpenCV. Please insure your desired file format is supported.")
def verify_valid_threshold_values(threshold, max_threshold):
"""
Function verifies if the given threshold values are valid. Threshold values must be an int or a float.
"""
if (isinstance(threshold, (int, float)) and isinstance(max_threshold, (int, float))):
return True
raise InvalidThresholdValue(f"Either threshold value {threshold} or {max_threshold} are invalid. Please insure the thresholds are either of type int or float.")
def verify_valid_depth(depth):
"""
Function verifies if the given depth if valid. Must be a negative int.
"""
if (isinstance(depth, (int, str, float)) and int(depth) < 0):
return True
raise InvalidDepthArgument("The depth value provided is invalid. Please provide a negative integer.") | /robotframework_ocrlibrary-2.0.0-py3-none-any.whl/OCRLibrary/utils/exceptions/exception_handler.py | 0.723114 | 0.671107 | exception_handler.py | pypi |
class Error(Exception):
"""
Base Class for all custom exceptions
"""
class InvalidKernelSize(Error):
"""
Purpose:
Exception raised when the provided kernel size is invalid.
Attributes:
message - explanation of the error.
"""
def __init__(self, message):
self.message = message
class InvalidKernelType(Error):
"""
Purpose:
Execption raised when the provided kernel type is invalid (i.e. not rectangle, ellipse, or cross).
Attributes:
message - explanation of the error.
"""
def __init__(self, message):
self.message = message
class InvalidIteration(Error):
"""
Purpose:
Execption raised when the provided iteration is invalid.
Attributes:
message - explanation of the error.
"""
def __init__(self, message):
self.message = message
class ContentNotFound(Error):
"""
Purpose:
Exception raised when the desired content is not found within the image.
Attributes:
message - explanation of the error.
"""
def __init__(self, message):
self.message = message
class InvalidImageArgument(Error):
"""
Purpose:
Exception raised when an image has been given to the function that has not been processed by opencv.
Attributes:
message - explanation of the error.
"""
def __init__(self, message):
self.message = message
class InvalidColourBoundArguments(Error):
"""
Purpose:
Exceptions raised when the incorrect bounds are given when masking a BGR image.
Attributes:
message - explanation of the error.
"""
def __init__(self, message):
self.message = message
class InvalidImagePath(Error):
"""
Purpose:
Exception is raised when OpenCV is unable to decode the provided image.
Attributes:
message - explanation of the error.
"""
def __init__(self, message):
self.message = message
class InvalidThresholdValue(Error):
"""
Purpose:
Exception is raised when invalid threshold values are supplied.
Attributes:
message - explanation of the error.
"""
def __init__(self, message):
self.message = message
class InvalidDepthArgument(Error):
"""
Purpose:
Exception is raised when invalid depth values are supplied.
Attributes:
message - explanation of the error.
"""
def __init__(self, message):
self.message = message | /robotframework_ocrlibrary-2.0.0-py3-none-any.whl/OCRLibrary/utils/exceptions/exceptions.py | 0.907969 | 0.234133 | exceptions.py | pypi |
from OCRLibrary.utils.imageprocessing.imagetransformation.changing_colourspaces \
import (convert_bgr_to_gray, convert_bgr_to_hsv, mask_single_colour, mask_double_colour)
def process_to_gray_scale(img):
"""
Purpose:
Converts read image to gray scale.
Args:
img_path - path to the image to process.
Returns:
gray scale image read by opencv.
"""
return convert_bgr_to_gray(img)
def process_colour_image_to_hsv(img):
"""
Purpose:
Converts image from BGR to HSV.
Args:
img - provided read image (result of cv2.imread()).
Returns:
Image in HSV.
"""
return convert_bgr_to_hsv(img)
def mask_colour_bgr_or_hsv(processed_image, lower_bound_colour, upper_bound_colour):
"""
Purpose:
Maskes any colour that is not in the range of the bounds of the provided colour.
Args:
processed_img - provided read image (result of cv2.imread()).
lower_bound_colour - the lower bound of the colour to not mask in BGR format.
upper_bound_colour - the upper bound of the colour to not mask in BGR format.
Returns:
result - image with colours masked.
"""
return mask_single_colour(processed_image, lower_bound_colour, upper_bound_colour)
def mask_colours_bgr_or_hsv(processed_image, lower_bound_colour1, upper_bound_colour1, lower_bound_colour2, upper_bound_colour2):
"""
Purpose:
Maskes any colour that is not in the range of the bounds of the two provided colours.
Args:
img - provided read image (result of cv2.imread()).
lower_bound_colour1 - the lower bound of the first colour to not mask in BGR format.
upper_bound_colour1 - the upper bound of the first colour to not mask in BGR format.
lower_bound_colour2 - the lower bound of the second colour to not mask in BGR format.
upper_bound_colour2 - the upper bound of the second colour to not mask in BGR format.
Returns:
result - image with colours masked.
"""
return mask_double_colour(processed_image, lower_bound_colour1, upper_bound_colour1, lower_bound_colour2, upper_bound_colour2) | /robotframework_ocrlibrary-2.0.0-py3-none-any.whl/OCRLibrary/utils/imageprocessing/image_processing_colour.py | 0.95845 | 0.492188 | image_processing_colour.py | pypi |
from OCRLibrary.utils.imageprocessing.imagetransformation.structuring_element \
import get_rect_kernel, get_ellipse_kernel, get_cross_kernel
from OCRLibrary.utils.imageprocessing.imagetransformation.image_smoothing \
import image_filtering, blurring_averaging, blurring_gaussian, median_filtering
def process_image_filtering_with_rect_kernel(img, kernel_size, depth):
"""
Purpose:
Apply 2D image filter with a rectange kernel to an image.
Args:
img - the processed image.
kernel_size - size of the kernel.
depth - desired depth of the destination image.
Returns:
The filtered image.
"""
kernel = get_rect_kernel(kernel_size)
return image_filtering(img, depth, kernel)
def process_image_filtering_with_ellipse_kernel(img, kernel_size, depth):
"""
Purpose:
Apply 2D image filter with an ellipse kernel to an image.
Args:
img - the processed image.
kernel_size - size of the kernel.
depth - desired depth of the destination image.
Returns:
The filtered image.
"""
kernel = get_ellipse_kernel(kernel_size)
return image_filtering(img, depth, kernel)
def process_image_filtering_with_cross_kernel(img, kernel_size, depth):
"""
Purpose:
Apply 2D image filter with a cross kernel to an image.
Args:
img - the processed image.
kernel_size - size of the kernel.
depth - desired depth of the destination image.
Returns:
The filtered image.
"""
kernel = get_cross_kernel(kernel_size)
return image_filtering(img, depth, kernel)
def process_median_filtering(img, kernel_size):
"""
Purpose:
Apply median image filter to an image.
Args:
img - the processed image.
kernel_size - size of the kernel.
Returns:
The filtered image.
"""
return median_filtering(img, kernel_size)
def process_blurring_averaging(img, kernel_size):
"""
Purpose:
Apply blurring averaging filter to an image.
Args:
img - the processed image.
kernel_size - size of the kernel.
Returns:
The filtered image.
"""
return blurring_averaging(img, kernel_size)
def process_blurring_gaussian(img, kernel_size):
"""
Purpose:
Apply blurring gaussian filter to an image.
Args:
img - the processed image.
kernel_size - size of the kernel.
Returns:
The filtered image.
"""
return blurring_gaussian(img, kernel_size) | /robotframework_ocrlibrary-2.0.0-py3-none-any.whl/OCRLibrary/utils/imageprocessing/image_processing_generic.py | 0.958069 | 0.684879 | image_processing_generic.py | pypi |
import cv2
from OCRLibrary.utils.imageprocessing.imagetransformation.changing_colourspaces import convert_bgr_to_gray
from OCRLibrary.utils.imageprocessing.imagetransformation.image_thresholding \
import (threshold_binary, threshold_binary_inv, threshold_trunc, threshold_tozero, threshold_tozero_inv,
threshold_binary_otsu, threshold_binary_inv_otsu, threshold_trunc_otsu, threshold_tozero_otsu, threshold_tozero_inv_otsu)
from OCRLibrary.utils.imageprocessing.imagetransformation.morphological_transformations \
import (morph_erosion, morph_dilation, morph_opening, morph_closing, morph_gradient,
morph_top_hat, morph_black_hat)
from OCRLibrary.utils.imageprocessing.imagetransformation.structuring_element \
import (get_rect_kernel, get_ellipse_kernel, get_cross_kernel)
### Image thresholding
def process_to_binary_image(img_path, inverse=False, threshold=127, max_threshold=255):
"""
Purpose:
Process an image to binary colours.
Args:
img_path - path to the image to process
inverse - if true an inverted binary thresholding will be applied (optional).
threshold - threshold value used to classify the pixel values (optional).
max_threshold - the max value to be given if a pixels value is more than the threshold value (optional).
Returns:
A binary image.
"""
img = cv2.imread(img_path)
gray_img = convert_bgr_to_gray(img)
if inverse:
binary_image = threshold_binary_inv(gray_img, threshold, max_threshold)
else:
binary_image = threshold_binary(gray_img, threshold, max_threshold)
return binary_image
def process_to_binary_otsu_image(img_path, inverse=False, max_threshold=255):
"""
Purpose:
Process an image to binary colours using binary otsu thresholding.
Args:
img_path - path to the image to process
inverse - if true an inverted binary thresholding will be applied (optional).
max_threshold - the max value to be given if a pixels value is more than the threshold value (optional).
Returns:
binary_image_tuple[0] - optimal threshold value found by otsu threshold.
binary_image_tuple[1] - binary image.
"""
img = cv2.imread(img_path)
gray_img = convert_bgr_to_gray(img)
if inverse:
binary_image_tuple = threshold_binary_inv_otsu(gray_img, max_threshold)
else:
binary_image_tuple = threshold_binary_otsu(gray_img, max_threshold)
return binary_image_tuple
def process_to_tozero_image(img_path, inverse=False, threshold=177, max_threshold=255):
"""
Purpose:
Process an image tozero. All values considered black (if no inverse) will be set to black, the rest of
the image will remain in gray scale. If inverse is true, the values considered to be white will be set to black,
the rest of the image will remain in gray scale.
Args:
img_path - path to the image to process
inverse - if true an inverted binary thresholding will be applied (optional).
threshold - threshold value used to classify the pixel values (optional).
max_threshold - the max value to be given if a pixels value is more than the threshold value (optional).
Returns:
Tozero grayscale/binary image.
"""
img = cv2.imread(img_path)
gray_img = convert_bgr_to_gray(img)
if inverse:
tozero_image = threshold_tozero_inv(gray_img, threshold, max_threshold)
else:
tozero_image = threshold_tozero(gray_img, threshold, max_threshold)
return tozero_image
def process_to_tozero_otsu_image(img_path, inverse=False, max_threshold=255):
"""
Purpose:
Process an image tozero colours using tozero otsu thresholding.
Args:
img_path - path to the image to process
inverse - if true an inverted tozero thresholding will be applied (optional).
max_threshold - the max value to be given if a pixels value is more than the threshold value (optional).
Returns:
tozero_image_tuple[0] - optimal threshold value found by otsu threshold.
tozero_image_tuple[1] - tozero binary/grayscale image.
"""
img = cv2.imread(img_path)
gray_img = convert_bgr_to_gray(img)
if inverse:
tozero_image_tuple = threshold_tozero_inv_otsu(gray_img, max_threshold)
else:
tozero_image_tuple = threshold_tozero_otsu(gray_img, max_threshold)
return tozero_image_tuple
def process_to_trunc_image(img_path, threshold=177, max_threshold=255):
"""
Purpose:
Process an image to gray scale and apply truncation threshold (values considered to be white will be set to white, the
rest of the image will remain gray scale).
Args:
img_path - path to the image to process
threshold - threshold value used to classify the pixel values (optional).
max_threshold - the max value to be given if a pixels value is more than the threshold value (optional).
Returns:
Truncated binary/grayscale image.
"""
img = cv2.imread(img_path)
gray_img = convert_bgr_to_gray(img)
trunc_image = threshold_trunc(gray_img, threshold, max_threshold)
return trunc_image
def process_to_trunc_otsu_image(img_path, max_threshold=255):
"""
Purpose:
Process an image to gray scale and apply truncation and otsu threshold (values considered to be white will be set to white, the
rest of the image will remain gray scale).
Args:
img_path - path to the image to process
max_threshold - the max value to be given if a pixels value is more than the threshold value (optional).
Returns:
thrunc_image_tuple[0] - optimal threshold value found by otsu threshold.
thrunc_image_tuple[1] - trunc binary/grayscale image.
"""
img = cv2.imread(img_path)
gray_img = convert_bgr_to_gray(img)
trunc_image_tuple = threshold_trunc_otsu(gray_img, max_threshold)
return trunc_image_tuple
### Morphological transformations
def process_erosion_with_rect_kernel(img, kernel_size, iteration=1):
"""
Purpose:
Applies erosion morph with a rectangle shaped kernel of the specified size, to the image.
Args:
img - binary image.
kernel_size - the size of the kernel to use in morphological transformation.
iteration - Number of times the morph is performed (defaults to 1)
Returns:
Image with applied morphological transformation.
"""
kernel = get_rect_kernel(kernel_size)
return morph_erosion(img, kernel, iteration)
def process_erosion_with_ellipse_kernel(img, kernel_size, iteration=1):
"""
Purpose:
Applies erosion morph with a ellispe shaped kernel of the specified size, to the image.
Args:
img - binary image.
kernel_size - the size of the kernel to use in morphological transformation.
iteration - Number of times the morph is performed (defaults to 1)
Returns:
Image with applied morphological transformation.
"""
kernel = get_ellipse_kernel(kernel_size)
return morph_erosion(img, kernel, iteration)
def process_erosion_with_cross_kernel(img, kernel_size, iteration=1):
"""
Purpose:
Applies erosion morph with a cross shaped kernel of the specified size, to the image.
Args:
img - binary image.
kernel_size - the size of the kernel to use in morphological transformation.
iteration - Number of times the morph is performed (defaults to 1)
Returns:
Image with applied morphological transformation.
"""
kernel = get_cross_kernel(kernel_size)
return morph_erosion(img, kernel, iteration)
def process_dilation_with_rect_kernel(img, kernel_size, iteration=1):
"""
Purpose:
Applies dilation morph with a rectangle shaped kernel of the specified size, to the image.
Args:
img - binary image.
kernel_size - the size of the kernel to use in morphological transformation.
iteration - Number of times the morph is performed (defaults to 1)
Returns:
Image with applied morphological transformation.
"""
kernel = get_rect_kernel(kernel_size)
return morph_dilation(img, kernel, iteration)
def process_dilation_with_ellipse_kernel(img, kernel_size, iteration=1):
"""
Purpose:
Applies dilation morph with ellisple shaped kernel of the specified size, to the image.
Args:
img - binary image.
kernel_size - the size of the kernel to use in morphological transformation.
iteration - Number of times the morph is performed (defaults to 1)
Returns:
Image with applied morphological transformation.
"""
kernel = get_ellipse_kernel(kernel_size)
return morph_dilation(img, kernel, iteration)
def process_dilation_with_cross_kernel(img, kernel_size, iteration=1):
"""
Purpose:
Applies dilation morph with cross shaped kernel of the specified size, to the image.
Args:
img - binary image.
kernel_size - the size of the kernel to use in morphological transformation.
iteration - Number of times the morph is performed (defaults to 1)
Returns:
Image with applied morphological transformation.
"""
kernel = get_cross_kernel(kernel_size)
return morph_dilation(img, kernel, iteration)
def process_opening_with_rect_kernel(img, kernel_size, iteration=1):
"""
Purpose:
Applies opening morph with a rectangle shaped kernel of the specified size, to the image.
Args:
img - binary image.
kernel_size - the size of the kernel to use in morphological transformation.
iteration - Number of times the morph is performed (defaults to 1)
Returns:
Image with applied morphological transformation.
"""
kernel = get_rect_kernel(kernel_size)
return morph_opening(img, kernel, iteration)
def process_opening_with_ellipse_kernel(img, kernel_size, iteration=1):
"""
Purpose:
Applies opening morph with ellisple shaped kernel of the specified size, to the image.
Args:
img - binary image.
kernel_size - the size of the kernel to use in morphological transformation.
iteration - Number of times the morph is performed (defaults to 1)
Returns:
Image with applied morphological transformation.
"""
kernel = get_ellipse_kernel(kernel_size)
return morph_opening(img, kernel, iteration)
def process_opening_with_cross_kernel(img, kernel_size, iteration=1):
"""
Purpose:
Applies opening morph with cross shaped kernel of the specified size, to the image.
Args:
img - binary image.
kernel_size - the size of the kernel to use in morphological transformation.
iteration - Number of times the morph is performed (defaults to 1)
Returns:
Image with applied morphological transformation.
"""
kernel = get_cross_kernel(kernel_size)
return morph_opening(img, kernel, iteration)
def process_closing_with_rect_kernel(img, kernel_size, iteration=1):
"""
Purpose:
Applies closing morph with a rectangle shaped kernel of the specified size, to the image.
Args:
img - binary image.
kernel_size - the size of the kernel to use in morphological transformation.
iteration - Number of times the morph is performed (defaults to 1)
Returns:
Image with applied morphological transformation.
"""
kernel = get_rect_kernel(kernel_size)
return morph_closing(img, kernel, iteration)
def process_closing_with_ellipse_kernel(img, kernel_size, iteration=1):
"""
Purpose:
Applies closing morph with ellisple shaped kernel of the specified size, to the image.
Args:
img - binary image.
kernel_size - the size of the kernel to use in morphological transformation.
iteration - Number of times the morph is performed (defaults to 1)
Returns:
Image with applied morphological transformation.
"""
kernel = get_ellipse_kernel(kernel_size)
return morph_closing(img, kernel, iteration)
def process_closing_with_cross_kernel(img, kernel_size, iteration=1):
"""
Purpose:
Applies closing morph with cross shaped kernel of the specified size, to the image.
Args:
img - binary image.
kernel_size - the size of the kernel to use in morphological transformation.
iteration - Number of times the morph is performed (defaults to 1)
Returns:
Image with applied morphological transformation.
"""
kernel = get_cross_kernel(kernel_size)
return morph_closing(img, kernel, iteration)
def process_gradient_with_rect_kernel(img, kernel_size, iteration=1):
"""
Purpose:
Applies gradient morph with a rectangle shaped kernel of the specified size, to the image.
Args:
img - binary image.
kernel_size - the size of the kernel to use in morphological transformation.
iteration - Number of times the morph is performed (defaults to 1)
Returns:
Image with applied morphological transformation.
"""
kernel = get_rect_kernel(kernel_size)
return morph_gradient(img, kernel, iteration)
def process_gradient_with_ellipse_kernel(img, kernel_size, iteration=1):
"""
Purpose:
Applies gradient morph with ellisple shaped kernel of the specified size, to the image.
Args:
img - binary image.
kernel_size - the size of the kernel to use in morphological transformation.
iteration - Number of times the morph is performed (defaults to 1)
Returns:
Image with applied morphological transformation.
"""
kernel = get_ellipse_kernel(kernel_size)
return morph_gradient(img, kernel, iteration)
def process_gradient_with_cross_kernel(img, kernel_size, iteration=1):
"""
Purpose:
Applies gradient morph with cross shaped kernel of the specified size, to the image.
Args:
img - binary image.
kernel_size - the size of the kernel to use in morphological transformation.
iteration - Number of times the morph is performed (defaults to 1)
Returns:
Image with applied morphological transformation.
"""
kernel = get_cross_kernel(kernel_size)
return morph_gradient(img, kernel, iteration)
def process_tophat_with_rect_kernel(img, kernel_size, iteration=1):
"""
Purpose:
Applies tophat morph with a rectangle shaped kernel of the specified size, to the image.
Args:
img - binary image.
kernel_size - the size of the kernel to use in morphological transformation.
iteration - Number of times the morph is performed (defaults to 1)
Returns:
Image with applied morphological transformation.
"""
kernel = get_rect_kernel(kernel_size)
return morph_top_hat(img, kernel, iteration)
def process_tophat_with_ellipse_kernel(img, kernel_size, iteration=1):
"""
Purpose:
Applies tophat morph with ellisple shaped kernel of the specified size, to the image.
Args:
img - binary image.
kernel_size - the size of the kernel to use in morphological transformation.
iteration - Number of times the morph is performed (defaults to 1)
Returns:
Image with applied morphological transformation.
"""
kernel = get_ellipse_kernel(kernel_size)
return morph_top_hat(img, kernel, iteration)
def process_tophat_with_cross_kernel(img, kernel_size, iteration=1):
"""
Purpose:
Applies tophat morph with cross shaped kernel of the specified size, to the image.
Args:
img - binary image.
kernel_size - the size of the kernel to use in morphological transformation.
iteration - Number of times the morph is performed (defaults to 1)
Returns:
Image with applied morphological transformation.
"""
kernel = get_cross_kernel(kernel_size)
return morph_top_hat(img, kernel, iteration)
def process_blackhat_with_rect_kernel(img, kernel_size, iteration=1):
"""
Purpose:
Applies blackhat morph with a rectangle shaped kernel of the specified size, to the image.
Args:
img - binary image.
kernel_size - the size of the kernel to use in morphological transformation.
iteration - Number of times the morph is performed (defaults to 1)
Returns:
Image with applied morphological transformation.
"""
kernel = get_rect_kernel(kernel_size)
return morph_black_hat(img, kernel, iteration)
def process_blackhat_with_ellipse_kernel(img, kernel_size, iteration=1):
"""
Purpose:
Applies blackhat morph with ellisple shaped kernel of the specified size, to the image.
Args:
img - binary image.
kernel_size - the size of the kernel to use in morphological transformation.
iteration - Number of times the morph is performed (defaults to 1)
Returns:
Image with applied morphological transformation.
"""
kernel = get_ellipse_kernel(kernel_size)
return morph_black_hat(img, kernel, iteration)
def process_blackhat_with_cross_kernel(img, kernel_size, iteration=1):
"""
Purpose:
Applies blackhat morph with cross shaped kernel of the specified size, to the image.
Args:
img - binary image.
kernel_size - the size of the kernel to use in morphological transformation.
iteration - Number of times the morph is performed (defaults to 1)
Returns:
Image with applied morphological transformation.
"""
kernel = get_cross_kernel(kernel_size)
return morph_black_hat(img, kernel, iteration) | /robotframework_ocrlibrary-2.0.0-py3-none-any.whl/OCRLibrary/utils/imageprocessing/image_processing_gray.py | 0.885198 | 0.659235 | image_processing_gray.py | pypi |
import cv2
def threshold_binary(img, thresh, max_thresh):
"""
Purpose:
Apply binary threshold to grayscale image.
Arguments:
img - a gray scale image.
thresh - threshold value used to classify the pixel values.
max_thresh - the max value to be given if a pixels value is more than the threshold value.
Returns:
Thresholded image.
"""
return cv2.threshold(img, thresh, max_thresh, cv2.THRESH_BINARY)[1]
def threshold_binary_inv(img, thresh, max_thresh):
"""
Purpose:
Apply inverted binary threshold to grayscale image.
Arguments:
img - a gray scale image.
thresh - threshold value used to classify the pixel values.
max_thresh - the max value to be given if a pixels value is more than the threshold value.
Returns:
Thresholded image.
"""
return cv2.threshold(img, thresh, max_thresh, cv2.THRESH_BINARY_INV)[1]
def threshold_trunc(img, thresh, max_thresh):
"""
Purpose:
Apply truncated threshold to grayscale image.
Arguments:
img - a gray scale image.
thresh - threshold value used to classify the pixel values.
max_thresh - the max value to be given if a pixels value is more than the threshold value.
Returns:
Thresholded image.
"""
return cv2.threshold(img, thresh, max_thresh, cv2.THRESH_TRUNC)[1]
def threshold_tozero(img, thresh, max_thresh):
"""
Purpose:
Appy to zero threshold to grayscale image.
Arguments:
img - a gray scale image.
thresh - threshold value used to classify the pixel values.
max_thresh - the max value to be given if a pixels value is more than the threshold value.
Returns:
Thresholded image.
"""
return cv2.threshold(img, thresh, max_thresh, cv2.THRESH_TOZERO)[1]
def threshold_tozero_inv(img, thresh, max_thresh):
"""
Purpose:
Apply inverted to zero threshold to grayscale image.
Arguments:
img - a gray scale image.
thresh - threshold value used to classify the pixel values.
max_thresh - the max value to be given if a pixels value is more than the threshold value.
Returns:
Thresholded image.
"""
return cv2.threshold(img, thresh, max_thresh, cv2.THRESH_TOZERO_INV)[1]
def threshold_binary_otsu(img, max_thresh):
"""
Purpose:
Apply binary threshold with otsu threshold to grayscale image.
Arguments:
img - a gray scale image.
max_thresh - the max value to be given if a pixels value is more than the threshold value.
Returns:
tuple[0] - optimized threshold value.
tuple[1] - thresholded image.
"""
return cv2.threshold(img, 0, max_thresh, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
def threshold_binary_inv_otsu(img, max_thresh):
"""
Purpose:
Apply inverted binary threshold with otsu threshold to grayscale image.
Arguments:
img - a gray scale image.
max_thresh - the max value to be given if a pixels value is more than the threshold value.
Returns:
tuple[0] - optimized threshold value.
tuple[1] - thresholded image.
"""
return cv2.threshold(img, 0, max_thresh, cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
def threshold_trunc_otsu(img, max_thresh):
"""
Purpose:
Apply truncated threshold with otsu threshold to grayscale image.
Arguments:
img - a gray scale image.
max_thresh - the max value to be given if a pixels value is more than the threshold value.
Returns:
tuple[0] - optimized threshold value.
tuple[1] - thresholded image.
"""
return cv2.threshold(img, 0, max_thresh, cv2.THRESH_TRUNC+cv2.THRESH_OTSU)
def threshold_tozero_otsu(img, max_thresh):
"""
Purpose:
Apply to zero threshold with otsu threshold to grayscale image.
Arguments:
img - a gray scale image.
max_thresh - the max value to be given if a pixels value is more than the threshold value.
Returns:
tuple[0] - optimized threshold value.
tuple[1] - thresholded image.
"""
return cv2.threshold(img, 0, max_thresh, cv2.THRESH_TOZERO+cv2.THRESH_OTSU)
def threshold_tozero_inv_otsu(img, max_thresh):
"""
Purpose:
Apply inverted to zero threshold with otsu threshold to grayscale image.
Arguments:
img - a gray scale image.
max_thresh - the max value to be given if a pixels value is more than the threshold value.
Returns:
tuple[0] - optimized threshold value.
tuple[1] - thresholded image.
"""
return cv2.threshold(img, 0, max_thresh, cv2.THRESH_TOZERO_INV + cv2.THRESH_OTSU) | /robotframework_ocrlibrary-2.0.0-py3-none-any.whl/OCRLibrary/utils/imageprocessing/imagetransformation/image_thresholding.py | 0.918045 | 0.751032 | image_thresholding.py | pypi |
import cv2
import numpy as np
def convert_bgr_to_gray(img):
"""
Purpose:
Converts image from BGR to gray scale.
Args:
img - provided read image (result of cv2.imread()).
Returns:
Image in gray scale.
"""
return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
def convert_bgr_to_hsv(img):
"""
Purpose:
Converts image from BGR to HSV.
Args:
img - provided read image (result of cv2.imread()).
Returns:
Image in HSV.
"""
return cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
def mask_single_colour(img, lower_bound_colour, upper_bound_colour):
"""
Purpose:
Maskes any colour that is not in the range of the bounds of the provided colour.
Args:
img - provided read image (result of cv2.imread()).
lower_bound_colour - the lower bound of the colour to not mask in BGR format.
upper_bound_colour - the upper bound of the colour to not mask in BGR format.
Returns:
result - image with colours masked.
"""
lower_bound_colour = np.array(lower_bound_colour)
upper_bound_colour = np.array(upper_bound_colour)
mask = cv2.inRange(img, lower_bound_colour, upper_bound_colour)
result = cv2.bitwise_and(img, img, mask=mask)
return result
def mask_double_colour(img, lower_bound_colour1, upper_bound_colour1, lower_bound_colour2, upper_bound_colour2):
"""
Purpose:
Maskes any colour that is not in the range of the bounds of the two provided colours.
Args:
img - provided read image (result of cv2.imread()).
lower_bound_colour1 - the lower bound of the first colour to not mask in BGR format.
upper_bound_colour1 - the upper bound of the first colour to not mask in BGR format.
lower_bound_colour2 - the lower bound of the second colour to not mask in BGR format.
upper_bound_colour2 - the upper bound of the second colour to not mask in BGR format.
Returns:
result - image with colours masked.
"""
lower_bound_colour1 = np.array(lower_bound_colour1)
upper_bound_colour1 = np.array(upper_bound_colour1)
lower_bound_colour2 = np.array(lower_bound_colour2)
upper_bound_colour2 = np.array(upper_bound_colour2)
mask1 = cv2.inRange(img, lower_bound_colour1, upper_bound_colour1)
mask2 = cv2.inRange(img, lower_bound_colour2, upper_bound_colour2)
mask = cv2.bitwise_or(mask1, mask2)
result = cv2.bitwise_and(img, img, mask=mask)
return result | /robotframework_ocrlibrary-2.0.0-py3-none-any.whl/OCRLibrary/utils/imageprocessing/imagetransformation/changing_colourspaces.py | 0.928676 | 0.589421 | changing_colourspaces.py | pypi |
import struct
class VolumeDump(object):
"""Helper class to create and check volume dumps."""
DUMPBEGINMAGIC = 0xB3A11322
DUMPENDMAGIC = 0x3A214B6E
DUMPVERSION = 1
D_DUMPHEADER = 1
D_VOLUMEHEADER = 2
D_VNODE = 3
D_DUMPEND = 4
@staticmethod
def check_header(filename):
"""Verify filename is a dump file."""
file = open(filename, "rb")
size = struct.calcsize("!BLL")
packed = file.read(size)
file.close()
if len(packed) != size:
raise AssertionError("Not a dump file: file is too short.")
(tag, magic, version) = struct.unpack("!BLL", packed)
if tag != VolumeDump.D_DUMPHEADER:
raise AssertionError("Not a dump file: wrong tag")
if magic != VolumeDump.DUMPBEGINMAGIC:
raise AssertionError("Not a dump file: wrong magic")
if version != VolumeDump.DUMPVERSION:
raise AssertionError("Not a dump file: wrong version")
def __init__(self, filename):
"""Create a new volume dump file."""
self.file = open(filename, "wb")
self.write(self.D_DUMPHEADER, "LL", self.DUMPBEGINMAGIC, self.DUMPVERSION)
def write(self, tag, fmt, *args):
"""Write a tag and values to the dump file."""
packed = struct.pack("!B"+fmt, tag, *args)
self.file.write(packed)
def close(self):
"""Write the end of dump tag and close the dump file."""
self.write(self.D_DUMPEND, "L", self.DUMPENDMAGIC) # vos requires the end tag
self.file.close()
self.file = None
class _DumpKeywords(object):
"""Volume dump keywords."""
volid = 536870999 # random, but valid, volume id
def _create_empty_dump(self, filename):
"""Create the smallest possible valid dump file."""
dump = VolumeDump(filename)
dump.write(ord('v'), "L", self.volid)
dump.write(ord('t'), "HLL", 2, 0, 0)
dump.write(VolumeDump.D_VOLUMEHEADER, "")
dump.close()
def _create_dump_with_bogus_acl(self, filename):
"""Create a minimal dump file with bogus ACL record.
The bogus ACL would crash the volume server before gerrit 11702."""
size, version, total, positive, negative = (0, 0, 0, 1000, 0) # positive is out of range.
dump = VolumeDump(filename)
dump.write(ord('v'), "L", self.volid)
dump.write(ord('t'), "HLL", 2, 0, 0)
dump.write(VolumeDump.D_VOLUMEHEADER, "")
dump.write(VolumeDump.D_VNODE, "LL", 3, 999)
dump.write(ord('A'), "LLLLL", size, version, total, positive, negative)
dump.close()
def should_be_a_dump_file(self, filename):
"""Fails if filename is not an AFS dump file."""
VolumeDump.check_header(filename)
def create_dump(self, filename, size='small', contains=''):
"""
Generate a volume dump file.
"""
if contains == 'bogus-acl':
self._create_dump_with_bogus_acl(filename)
elif size == 'empty':
self._create_empty_dump(filename)
elif size == 'small':
self._create_empty_dump(filename) # todo: create a dump file
else:
raise ValueError('unsupported size arg: %s' % (size)) | /robotframework_openafslibrary-0.8.1-py3-none-any.whl/OpenAFSLibrary/keywords/dump.py | 0.59408 | 0.313092 | dump.py | pypi |
from pathlib import Path
from typing import Any, Dict, Iterable, List, Optional, Tuple, Union
from DataDriver import DataDriver
from requests.auth import AuthBase
from requests.cookies import RequestsCookieJar as CookieJar
from robot.api.deco import library
from OpenApiDriver.openapi_executors import OpenApiExecutors, ValidationLevel
from OpenApiDriver.openapi_reader import OpenApiReader
@library(scope="TEST SUITE", doc_format="ROBOT")
class OpenApiDriver(OpenApiExecutors, DataDriver):
"""
Visit the [https://github.com/MarketSquare/robotframework-openapidriver | library page]
for an introduction and examples.
"""
def __init__( # pylint: disable=too-many-arguments, too-many-locals, dangerous-default-value
self,
source: str,
origin: str = "",
base_path: str = "",
included_paths: Optional[Iterable[str]] = None,
ignored_paths: Optional[Iterable[str]] = None,
ignored_responses: Optional[Iterable[int]] = None,
ignored_testcases: Optional[Iterable[Tuple[str, str, int]]] = None,
response_validation: ValidationLevel = ValidationLevel.WARN,
disable_server_validation: bool = True,
mappings_path: Union[str, Path] = "",
invalid_property_default_response: int = 422,
default_id_property_name: str = "id",
faker_locale: Optional[Union[str, List[str]]] = None,
require_body_for_invalid_url: bool = False,
recursion_limit: int = 1,
recursion_default: Any = {},
username: str = "",
password: str = "",
security_token: str = "",
auth: Optional[AuthBase] = None,
cert: Optional[Union[str, Tuple[str, str]]] = None,
verify_tls: Optional[Union[bool, str]] = True,
extra_headers: Optional[Dict[str, str]] = None,
cookies: Optional[Union[Dict[str, str], CookieJar]] = None,
proxies: Optional[Dict[str, str]] = None,
):
"""
== Base parameters ==
=== source ===
An absolute path to an openapi.json or openapi.yaml file or an url to such a file.
=== origin ===
The server (and port) of the target server. E.g. ``https://localhost:8000``
=== base_path ===
The routing between ``origin`` and the endpoints as found in the ``paths``
section in the openapi document.
E.g. ``/petshop/v2``.
== Test case generation and execution ==
=== included_paths ===
A list of paths that will be included when generating the test cases.
The ``*`` character can be used at the end of a partial path to include all paths
starting with the partial path (wildcard include).
=== ignored_paths ===
A list of paths that will be ignored when generating the test cases.
The ``*`` character can be used at the end of a partial path to ignore all paths
starting with the partial path (wildcard ignore).
=== ignored_responses ===
A list of responses that will be ignored when generating the test cases.
=== ignored_testcases ===
A list of specific test cases that, if it would be generated, will be ignored.
Specific test cases to ignore must be specified as a ``Tuple`` or ``List``
of ``path``, ``method`` and ``response``.
=== response_validation ===
By default, a ``WARN`` is logged when the Response received after a Request does not
comply with the schema as defined in the openapi document for the given operation. The
following values are supported:
- ``DISABLED``: All Response validation errors will be ignored
- ``INFO``: Any Response validation erros will be logged at ``INFO`` level
- ``WARN``: Any Response validation erros will be logged at ``WARN`` level
- ``STRICT``: The Test Case will fail on any Response validation errors
=== disable_server_validation ===
If enabled by setting this parameter to ``True``, the Response validation will also
include possible errors for Requests made to a server address that is not defined in
the list of servers in the openapi document. This generally means that if there is a
mismatch, every Test Case will raise this error. Note that ``localhost`` and
``127.0.0.1`` are not considered the same by Response validation.
== API-specific configurations ==
=== mappings_path ===
See [https://marketsquare.github.io/robotframework-openapi-libcore/advanced_use.html | this page]
for an in-depth explanation.
=== invalid_property_default_response ===
The default response code for requests with a JSON body that does not comply
with the schema.
Example: a value outside the specified range or a string value
for a property defined as integer in the schema.
=== default_id_property_name ===
The default name for the property that identifies a resource (i.e. a unique
entity) within the API.
The default value for this property name is ``id``.
If the target API uses a different name for all the resources within the API,
you can configure it globally using this property.
If different property names are used for the unique identifier for different
types of resources, an ``ID_MAPPING`` can be implemented using the ``mappings_path``.
=== faker_locale ===
A locale string or list of locale strings to pass to the Faker library to be
used in generation of string data for supported format types.
=== require_body_for_invalid_url ===
When a request is made against an invalid url, this usually is because of a "404" request;
a request for a resource that does not exist. Depending on API implementation, when a
request with a missing or invalid request body is made on a non-existent resource,
either a 404 or a 422 or 400 Response is normally returned. If the API being tested
processes the request body before checking if the requested resource exists, set
this parameter to True.
== Parsing parameters ==
=== recursion_limit ===
The recursion depth to which to fully parse recursive references before the
`recursion_default` is used to end the recursion.
=== recursion_default ===
The value that is used instead of the referenced schema when the
`recursion_limit` has been reached.
The default `{}` represents an empty object in JSON.
Depending on schema definitions, this may cause schema validation errors.
If this is the case, 'None' (``${NONE}`` in Robot Framework) or an empty list
can be tried as an alternative.
== Security-related parameters ==
_Note: these parameters are equivalent to those in the ``requests`` library._
=== username ===
The username to be used for Basic Authentication.
=== password ===
The password to be used for Basic Authentication.
=== security_token ===
The token to be used for token based security using the ``Authorization`` header.
=== auth ===
A [https://requests.readthedocs.io/en/latest/api/#authentication | requests ``AuthBase`` instance]
to be used for authentication instead of the ``username`` and ``password``.
=== cert ===
The SSL certificate to use with all requests.
If string: the path to ssl client cert file (.pem).
If tuple: the ('cert', 'key') pair.
=== verify_tls ===
Whether or not to verify the TLS / SSL certificate of the server.
If boolean: whether or not to verify the server TLS certificate.
If string: path to a CA bundle to use for verification.
=== extra_headers ===
A dictionary with extra / custom headers that will be send with every request.
This parameter can be used to send headers that are not documented in the
openapi document or to provide an API-key.
=== cookies ===
A dictionary or [https://docs.python.org/3/library/http.cookiejar.html#http.cookiejar.CookieJar | CookieJar object]
to send with all requests.
=== proxies ===
A dictionary of 'protocol': 'proxy url' to use for all requests.
"""
included_paths = included_paths if included_paths else ()
ignored_paths = ignored_paths if ignored_paths else ()
ignored_responses = ignored_responses if ignored_responses else ()
ignored_testcases = ignored_testcases if ignored_testcases else ()
mappings_path = Path(mappings_path).as_posix()
OpenApiExecutors.__init__(
self,
source=source,
origin=origin,
base_path=base_path,
response_validation=response_validation,
disable_server_validation=disable_server_validation,
mappings_path=mappings_path,
invalid_property_default_response=invalid_property_default_response,
default_id_property_name=default_id_property_name,
faker_locale=faker_locale,
require_body_for_invalid_url=require_body_for_invalid_url,
recursion_limit=recursion_limit,
recursion_default=recursion_default,
username=username,
password=password,
security_token=security_token,
auth=auth,
cert=cert,
verify_tls=verify_tls,
extra_headers=extra_headers,
cookies=cookies,
proxies=proxies,
)
paths = self.openapi_spec["paths"]
DataDriver.__init__(
self,
reader_class=OpenApiReader,
paths=paths,
included_paths=included_paths,
ignored_paths=ignored_paths,
ignored_responses=ignored_responses,
ignored_testcases=ignored_testcases,
)
class DocumentationGenerator(OpenApiDriver):
__doc__ = OpenApiDriver.__doc__
@staticmethod
def get_keyword_names() -> List[str]:
"""Curated keywords for libdoc and libspec."""
return [
"test_unauthorized",
"test_invalid_url",
"test_endpoint",
] # pragma: no cover | /robotframework_openapidriver-4.2.1-py3-none-any.whl/OpenApiDriver/openapidriver.py | 0.910974 | 0.329877 | openapidriver.py | pypi |
from typing import Any, Dict, List, Union
from DataDriver.AbstractReaderClass import AbstractReaderClass
from DataDriver.ReaderConfig import TestCaseData
# pylint: disable=too-few-public-methods
class Test:
"""
Helper class to support ignoring endpoint responses when generating the test cases.
"""
def __init__(self, path: str, method: str, response: Union[str, int]):
self.path = path
self.method = method.lower()
self.response = str(response)
def __eq__(self, other: Any) -> bool:
if not isinstance(other, type(self)):
return False
return (
self.path == other.path
and self.method == other.method
and self.response == other.response
)
class OpenApiReader(AbstractReaderClass):
"""Implementation of the reader_class used by DataDriver."""
def get_data_from_source(self) -> List[TestCaseData]:
test_data: List[TestCaseData] = []
paths = getattr(self, "paths")
self._filter_paths(paths)
ignored_responses_ = [
str(response) for response in getattr(self, "ignored_responses", [])
]
ignored_tests = [Test(*test) for test in getattr(self, "ignored_testcases", [])]
for path, path_item in paths.items():
# by reseversing the items, post/put operations come before get and delete
for item_name, item_data in reversed(path_item.items()):
# this level of the OAS also contains data that's not related to a
# path operation
if item_name not in ["get", "put", "post", "delete", "patch"]:
continue
method, method_data = item_name, item_data
tags_from_spec = method_data.get("tags", [])
for response in method_data.get("responses"):
# 'default' applies to all status codes that are not specified, in
# which case we don't know what to expect and therefore can't verify
if (
response == "default"
or response in ignored_responses_
or Test(path, method, response) in ignored_tests
):
continue
tag_list = _get_tag_list(
tags=tags_from_spec, method=method, response=response
)
test_data.append(
TestCaseData(
arguments={
"${path}": path,
"${method}": method.upper(),
"${status_code}": response,
},
tags=tag_list,
),
)
return test_data
def _filter_paths(self, paths: Dict[str, Any]) -> None:
def matches_include_pattern(path: str) -> bool:
for included_path in included_paths:
if path == included_path:
return True
if included_path.endswith("*"):
wildcard_include, _, _ = included_path.partition("*")
if path.startswith(wildcard_include):
return True
return False
def matches_ignore_pattern(path: str) -> bool:
for ignored_path in ignored_paths:
if path == ignored_path:
return True
if ignored_path.endswith("*"):
wildcard_ignore, _, _ = ignored_path.partition("*")
if path.startswith(wildcard_ignore):
return True
return False
if included_paths := getattr(self, "included_paths", ()):
path_list = list(paths.keys())
for path in path_list:
if not matches_include_pattern(path):
paths.pop(path)
if ignored_paths := getattr(self, "ignored_paths", ()):
path_list = list(paths.keys())
for path in path_list:
if matches_ignore_pattern(path):
paths.pop(path)
def _get_tag_list(tags: List[str], method: str, response: str) -> List[str]:
return [*tags, f"Method: {method.upper()}", f"Response: {response}"] | /robotframework_openapidriver-4.2.1-py3-none-any.whl/OpenApiDriver/openapi_reader.py | 0.887823 | 0.28607 | openapi_reader.py | pypi |
import openpyxl
class OpenPyxlLibrary:
"""
This test library internally use openpyxl module of python and provides keywords to open, read, write excel files. This library only supports
xlsx file formats.
*Prerequisties*
Openpyxl module of python should be installed using command "pip install openpyxl"
OpenPyxlLibrary must be imported.
Example:
| Library | OpenpyxlLibrary |
| Open Excel | Filename with fullpath |
"""
def __init__(self):
self.wb = None
self.sheet = None
self.filename = None
def open_excel(self, file):
"""
Open excel file
Arguments:
| File | Filename with fullpath to open and test upon |
Example:
| Open Excel | C:\\Python27\\ExcelRobotTest\\ExcelRobotTest.xlsx |
"""
self.filename = file
self.wb = openpyxl.load_workbook(self.filename)
def get_sheet_names(self):
"""
Return sheetnames of the workbook
Example:
| Openexcel File | C:\\Python27\\ExcelRobotTest\\ExcelRobotTest.xlsx |
| Get sheet names | |
"""
self.filename = file
return self.wb.get_sheet_names()
def opensheet_byname(self, sheetname):
"""
**** Marked for depreciation ****
"""
#self.sheet = self.wb.get_sheet_by_name(sheetname)
self.sheet = self.wb[sheetname]
def get_column_count(self, sheetname):
"""
Return the column count of the given sheet
Example:
| Get Column count | Sheet1 |
"""
#self.sheet = self.wb.get_sheet_by_name(sheetname)
self.sheet = self.wb[sheetname]
return self.sheet.max_column
def get_row_count(self, sheetname):
"""
Return the Row count of the given sheet
Example:
| Get Row count | Sheet1 |
"""
#self.sheet = self.wb.get_sheet_by_name(sheetname)
self.sheet = self.wb[sheetname]
return self.sheet.max_row
def read_cell_data_by_coordinates(self,sheetname, row_value, column_value):
"""
Return the value of a cell by giving the sheetname, row value & column value
Example:
| Read Cell Data By Coordinates | SheetName | Row Number | Column Number |
| Read Cell Data By Coordinates | Sheet1 | 1 | 1 |
"""
#self.sheet = self.wb.get_sheet_by_name(sheetname)
self.sheet = self.wb[sheetname]
self.row = int(row_value)
self.column = int(column_value)
varcellValue = self.sheet.cell(row=self.row, column=self.column).value
return varcellValue
def write_data_by_coordinates(self,sheetname,row_value, column_value,varValue):
"""
Write the value to a call using its co-ordinates
Example:
| Write Data By Coordinates | SheetName | Row Number | Column Number | Data |
| Write Data By Coordinates | Sheet1 | 1 | 1 | TestData |
"""
#self.sheet = self.wb.get_sheet_by_name(sheetname)
self.sheet = self.wb[sheetname]
self.row = int(row_value)
self.column = int(column_value)
self.varValue = varValue
self.sheet.cell(row=self.row, column=self.column).value = self.varValue
def save_excel(self, file):
"""
Save the excel file after writing the data.
Example:
Update existing file:
| Openexcel File | C:\\Python27\\ExcelRobotTest\\ExcelRobotTest.xlsx |
| Save Excelfile | C:\\Python27\\ExcelRobotTest\\ExcelRobotTest.xlsx |
Save in new file:
| Openexcel File | C:\\Python27\\ExcelRobotTest\\ExcelRobotTest.xlsx |
| Save Excelfile | D:\\Test\\ExcelRobotNewFile.xlsx |
"""
self.file = file
self.wb.save(self.file)
def add_new_sheet(self, varnewsheetname):
"""
Add new sheet
Arguments:
| New sheetname | The name of the new sheet to be added in the workbook |
Example:
| Keywords | Parameters |
| Add new sheet | SheetName |
"""
self.newsheet = varnewsheetname
self.wb.create_sheet(self.newsheet) | /robotframework-openpyxllib-0.7.tar.gz/robotframework-openpyxllib-0.7/OpenPyxlLibrary/OpenPyxlLibrary.py | 0.603932 | 0.427337 | OpenPyxlLibrary.py | pypi |
import time
from typing import Optional
from typing_extensions import Literal
from robotlibcore import keyword
from robot.api import Error
from OpenShiftLibrary.client import GenericClient
from OpenShiftLibrary.outputformatter import OutputFormatter
from OpenShiftLibrary.outputstreamer import OutputStreamer
class PodKeywords(object):
def __init__(self, client: GenericClient, output_formatter: OutputFormatter,
output_streamer: OutputStreamer) -> None:
self.client = client
self.output_formatter = output_formatter
self.output_streamer = output_streamer
@keyword
def search_pods(self, name: str = "", label_selector: Optional[str] = None,
namespace: Optional[str] = None) -> None:
"""Searchs for pods with name containing a given string and/or
having a given label selector and/or from a given namespace
Args:
name (str): String that pods name should contain. Defaults to empty string.
label_selector (Optional[str], optional): Label selector that pods should have. Defaults to None.
namespace (Optional[str], optional): Namespace where the pod/s exist/s. Defaults to None.
"""
pods = self.client.get(kind='Pod', namespace=namespace,
label_selector=label_selector)['items']
result = [pod for pod in pods if name in pod['metadata']['name']]
if not result:
self.output_streamer.stream('Pods not found in search', "error")
raise Error('Pods not found in search')
self.output_streamer.stream(self.output_formatter.format(result, "Pods found", "status"), "info")
@keyword
def wait_for_pods_number(self, number: int, namespace: Optional[str] = None,
label_selector: Optional[str] = None, timeout: int = 60,
comparison: Literal["EQUAL", "GREATER THAN", "LESS THAN"] = "EQUAL") -> None:
"""Waits for a given number of pods to exist
Args:
number (int): Number of pods to wait for
namespace (Optinal[str], optional): Namespace where the pods exist. Defaults to None.
label_selector (Optional[str], optional): Label selector of the pods. Defaults to None.
timeout (int, optional): Time to wait for the pods. Defaults to 60.
comparison (Literal[, optional): Comparison between expected and actual number of pods. Defaults to "EQUAL".
"""
max_time = time.time() + timeout
while time.time() < max_time:
pods_number = len(self.client.get(kind='Pod', namespace=namespace,
label_selector=label_selector)['items'])
if pods_number == number and comparison == "EQUAL":
self.output_streamer.stream(f"Pods number: {number} succeeded", "info")
break
elif pods_number > number and comparison == "GREATER THAN":
self.output_streamer.stream(f"Pods number greater than: {number} succeeded", "info")
break
elif pods_number < number and comparison == "LESS THAN":
self.output_streamer.stream(f"Pods number less than: {number} succeeded", "info")
break
else:
pods = self.client.get(kind='Pod', namespace=namespace,
label_selector=label_selector)['items']
pods_number = len(pods)
self.output_streamer.stream(self.output_formatter.format(
pods, f"Timeout - {pods_number} pods found", "name"), "warn")
@keyword
def wait_for_pods_status(self, namespace: Optional[str] = None,
label_selector: Optional[str] = None,
timeout: int = 60) -> None:
"""Waits for pods status
Args:
namespace (Optional[str, None], optional): Namespace where the pods exist. Defaults to None.
label_selector (Optional[str], optional): Pods' label selector. Defaults to None.
timeout (int, optional): Time to wait for pods status. Defaults to 60.
"""
max_time = time.time() + timeout
failing_containers = []
while time.time() < max_time:
pods = self.client.get(kind='Pod', namespace=namespace,
label_selector=label_selector)['items']
if pods:
pending_pods = [pod for pod in pods if pod['status']['phase'] == "Pending"]
if not pending_pods:
failing_pods = [pod for pod in pods if pod['status']['phase']
== "Failed" or pod['status']['phase'] == "Unknown"]
if failing_pods:
self.output_streamer.stream(self.output_formatter.format(
failing_pods, "Error in Pod", "wide"), "error")
raise Error(self.output_formatter.format(
failing_pods, "There are pods in status Failed or Unknown: ", "name"))
failing_containers = [pod for pod in pods if pod['status']['phase']
== "Running" and pod['status']['conditions'][1]['status'] != "True"]
if not failing_containers:
self.output_streamer.stream(self.output_formatter.format(pods, "Pods", "wide"), "info")
break
else:
self.output_streamer.stream(self.output_formatter.format(
failing_containers, "Timeout - Pods with containers not ready", "wide"), "warn") | /robotframework-openshift-1.0.0.tar.gz/robotframework-openshift-1.0.0/OpenShiftLibrary/keywords/pods.py | 0.897821 | 0.287718 | pods.py | pypi |
import json
import os
import validators
import yaml
from typing import Any, Dict, List, Optional, Union
from robotlibcore import keyword
from OpenShiftLibrary.client.authclient import AuthClient
from OpenShiftLibrary.client import GenericClient
from OpenShiftLibrary.dataloader import DataLoader
from OpenShiftLibrary.dataparser import DataParser
from OpenShiftLibrary.errors import ResourceOperationFailed
from OpenShiftLibrary.outputformatter import OutputFormatter
from OpenShiftLibrary.outputstreamer import OutputStreamer
from OpenShiftLibrary.templateloader import TemplateLoader
class GenericKeywords(object):
def __init__(self, auth_client: AuthClient, client: GenericClient, data_loader: DataLoader,
data_parser: DataParser, output_formatter: OutputFormatter,
output_streamer: OutputStreamer, template_loader: TemplateLoader) -> None:
self.auth_client = auth_client
self.client = client
self.data_loader = data_loader
self.data_parser = data_parser
self.output_formatter = output_formatter
self.output_streamer = output_streamer
self.template_loader = template_loader
@keyword
def oc_apply(self, kind: str, src: str, api_version: Optional[str] = None, namespace: Optional[str] = None,
**kwargs: Optional[str]) -> List[Dict[str, Any]]:
"""Applies Resource/s definition/s on one or more Resources
Args:
kind (str): Resource/s kind/s
src (str): Path/Url/String containing the yaml or json with the Resource/s definition/s
api_version (Optional[str], optional): Resource Api Version. Defaults to None.
namespace (Optional[str], optional): Namespace where the Resource/s exist/s or will be
created. Defaults to None.
Returns:
List[Dict[str, Any]]: List containing the apply operation/s result/s
"""
return self._apply_or_create(kind, 'apply', src, api_version=api_version,
namespace=namespace, **kwargs)
@keyword
def oc_create(self, kind: str, src: str, api_version: Optional[str] = None, namespace: Optional[str] = None,
**kwargs: Optional[str]) -> List[Dict[str, Any]]:
"""Creates one or multiple Resources
Args:
kind (str): Resource/s kind/s
src (str): Path/Url/String containing the yaml or json with the Resource/s definition/s
api_version (Optional[str], optional): Resource Api Version. Defaults to None.
namespace (Optional[str], optional): Namespace where the Resource/s will be created. Defaults to None.
Returns:
List[Dict[str, Any]]: List containing the create operation/s result/s
"""
return self._apply_or_create(kind, 'create', src, api_version=api_version,
namespace=namespace, **kwargs)
@keyword
def oc_delete(self, kind: str, api_version: Optional[str] = None, src: Optional[str] = None,
name: Optional[str] = None, namespace: Optional[str] = None,
label_selector: Optional[str] = None, field_selector: Optional[str] = None,
**kwargs: str) -> List[Dict[str, Any]]:
"""Deletes one or more Resources
Args:
kind (str): Resource/s kind/s
api_version (Optional[str], optional): Resource Api Version. Defaults to None.
src (str): Path/Url/String containing the yaml or json with the Resource/s definition/s
name (Optional[str], optional): Name of the Resource to delete
namespace (Optional[str], optional): Namespace where the Resource/s to delete exist/s. Defaults to None.
label_selector (Optional[str], optional): Label Selector of the Resource/s to delete. Defaults to None.
field_selector (Optional[str], optional): Field Selector of the Resource/s to delete. Defaults to None.
Returns:
List[Dict[str, Any]]: List containing the delete operation/s result/s
"""
operation = 'delete'
if not kind and not (src or name or label_selector or field_selector):
self._handle_error(operation, ("Kind and src or kind and at least one of name, label_selector "
"or field_selector is required"))
if kind and not (src or name or label_selector or field_selector):
self._handle_error(operation, ("Src or at least one of name, label_selector "
"or field_selector is required"))
if src and (name or label_selector or field_selector):
self._handle_error(operation, ("Src or at least one of name, label_selector "
"or field_selector is required, but not both"))
result: List[Dict[str, Any]]
if src and not (name or label_selector or field_selector):
items = self._get_items(operation, src, kwargs.pop('template_data', None))
if kind == 'List':
result = [self._operate(kind, operation,
api_version=api_version, body=item,
namespace=namespace or item.get('metadata', {}).get('namespace'),
**kwargs) for item in items]
else:
result = [self._operate(kind, operation,
api_version=api_version,
name=item.get('metadata', {}).get('name'),
namespace=namespace or item.get('metadata', {}).get('namespace'),
label_selector=item.get('metadata', {}).get('label_selector'),
field_selector=item.get('metadata', {}).get('field_selector'),
**kwargs) for item in items]
if not src and (name or label_selector or field_selector):
result = [self._operate(kind, operation, api_version=api_version, name=name,
namespace=namespace, label_selector=label_selector,
field_selector=field_selector, **kwargs)]
self._generate_output(operation, result)
return result
@keyword
def oc_get(self, kind: str, api_version: Optional[str] = None, name: Optional[str] = None, namespace: Optional[str] = None,
label_selector: Optional[str] = None, field_selector: Optional[str] = None,
fields: Optional[List] = None, **kwargs: str) -> List[Dict[str, Any]]:
"""Gets Resource/s
Args:
kind (str): Resource/s kind/s
api_version (Optional[str], optional): Resource Api Version. Defaults to None.
name (Optional[str], optional): Resource name. Defaults to None.
namespace (Optional[str], optional): Namespace where the Resource/s exist/s. Defaults to None.
label_selector (Optional[str], optional): Label Selector of the Resource/s. Defaults to None.
field_selector (Optional[str], optional): Field Selector of the Resource/s. Defaults to None.
fields (Optional[List], optional): Fields to filter the result. Defaults to None.
Returns:
List[Dict[str, Any]]: List containing the get operation/s result/s
"""
operation = 'get'
if name and field_selector:
field_selector = f"metadata.name=={name},{field_selector}"
elif name and not field_selector:
field_selector = f"metadata.name=={name}"
arguments = {'api_version': api_version, 'namespace': namespace,
'label_selector': label_selector, 'field_selector': field_selector,
**kwargs}
result = self._operate(kind, operation, **arguments)
self.output_streamer.stream(result, 'info')
if isinstance(result, Dict):
items = result.get('items')
if not items:
self._handle_error(operation, "Not Found")
if fields:
items = self._filter(items, fields)
self._generate_output(operation, items)
return items
@keyword
def oc_get_pod_logs(self, name: str, namespace: str, **kwargs: Optional[str]) -> str:
"""Gets Pod Logs
Args:
name (str): Name of the pod to get the logs
namespace (str): Namespace where the pod exists
Returns:
str: Pod Logs
"""
result = None
operation = 'get pod logs'
try:
result = self.client.get_pod_logs(name, namespace, **kwargs)
except Exception as error:
self._handle_error(operation, error)
self._generate_output(operation, result)
return result
@keyword
def oc_login(self, host: str, username: str, password: str,
ssl_ca_cert: Optional[str] = None) -> None:
"""Logs in to Cluster
Args:
host (str): Cluster url
username (str): User name
password (str): User password
ssl_ca_cert (Optional[str], optional): Path to client certificate. Defaults to None.
"""
operation = 'login'
try:
token = self.auth_client.login(host, username, password, ssl_ca_cert)
self.client.reload_config(token, host, ssl_ca_cert)
except Exception as error:
self._handle_error(operation, error)
self._generate_output(operation, f"Successfully connected to {host}")
@keyword
def oc_patch(self, kind: str, src: str, name: str, api_version: Optional[str] = None,
namespace: Optional[str] = None, **kwargs: str) -> Dict[str, Any]:
"""Updates Fields of the Resource using JSON merge patch
Args:
kind (str): Resource kind
src (str): Path/Url/String containing the json with the Resource patch
api_version (Optional[str], optional): Resource Api Version. Defaults to None.
name (str): Name of Resource to patch
namespace (Optional[str], optional): Namespace where the Resource Exists. Defaults to None.
Returns:
Dict[str, Any]: Patch operation result
"""
operation = 'patch'
if not (kind and src and name):
self._handle_error(operation, "Kind, src and name are required")
if kind and not (src and name):
self._handle_error(operation, "Src and name are required")
body = self._parse_data(operation, self._load_data(operation, src))[0]
arguments = {'api_version': api_version, 'name': name, 'body': body, 'namespace': namespace,
'content_type': 'application/merge-patch+json', **kwargs}
result = self._operate(kind, operation, **arguments)
self._generate_output(operation, result)
return result
@keyword
def oc_watch(self, kind: str, api_version: Optional[str] = None,
namespace: Optional[str] = None, name: Optional[str] = None,
label_selector: Optional[str] = None, field_selector: Optional[str] = None,
resource_version: Optional[str] = None,
timeout: Optional[int] = 60) -> List[Dict[str, Any]]:
"""Watches changes in one or more Resources
Args:
kind (str): Resource/s kind/s
api_version (Optional[str], optional): Resource Api Version. Defaults to None.
name (Optional[str], optional): Resource name. Defaults to None.
namespace (Optional[str], optional): Namespace where the Resource/s exist/s. Defaults to None.
label_selector (Optional[str], optional): Label Selector of the Resource/s. Defaults to None.
field_selector (Optional[str], optional): Field Selector of the Resource/s. Defaults to None.
resource_version (Optional[str], optional): Resource Version of the Resource/s. Defaults to None.
timeout (Optional[int], optional): Timeout for the watch. Defaults to 60.
Returns:
List[Dict[str, Any]]: List containing the list of watched events
"""
operation = 'watch'
if not kind:
self._handle_error(operation, "Kind is required")
if name and field_selector:
field_selector = f"metadata.name=={name},{field_selector}"
elif name and not field_selector:
field_selector = f"metadata.name=={name}"
arguments = {'api_version': api_version, 'namespace': namespace, 'label_selector': label_selector,
'field_selector': field_selector, 'resource_version': resource_version,
'timeout': timeout}
result = self._operate(kind, operation, **arguments)
self._generate_output(operation, result)
return result
def _apply_or_create(self, kind: str, operation: str, src: str, api_version: Optional[str] = None,
namespace: Optional[str] = None, **kwargs: Optional[str]) -> List[Dict[str, Any]]:
if not (kind and src):
self._handle_error(operation, "Kind and src are required")
items = self._get_items(operation, src, kwargs.pop('template_data', None))
result = [self._operate(kind, operation, api_version=api_version, body=item,
namespace=namespace or item.get('metadata', {}).get('namespace'),
**kwargs) for item in items]
self._generate_output(operation, result)
return result
def _get_items(self, operation: str, src: str, template_data: Optional[str] = None) -> List[Dict[str, Any]]:
loaded_data = self._load_data(operation, src)
data = self._load_template_data(operation, loaded_data, template_data) if template_data else loaded_data
items = self._parse_data(operation, data)
return items
def _load_data(self, operation: str, src: str) -> str:
if os.path.isfile(src):
try:
return self.data_loader.from_file(src)
except Exception as error:
self._handle_error(operation, f"Load data from file failed\n{error}")
if validators.url(src):
try:
return self.data_loader.from_url(src)
except Exception as error:
self._handle_error(operation, f"Load data from url failed\n{error}")
return src
def _load_template_data(self, operation: str, data: str, template_data: str) -> str:
result: str
try:
result = self.template_loader.from_jinja2(data, template_data)
except Exception as error:
self._handle_error(operation, f"Load data from jinja failed\n{error}")
return result
def _parse_data(self, operation: str, data: str) -> List[Dict[str, Any]]:
result: List[Dict[str, Any]]
if self._is_valid_json(data):
try:
result = self.data_parser.from_json(data)
except Exception as error:
self._handle_error(operation, f"Parse json failed\n{error}")
elif self._is_valid_yaml(data):
try:
result = self.data_parser.from_yaml(data)
except Exception as error:
self._handle_error(operation, f"Parse yaml failed\n{error}")
else:
self._handle_error(operation, f"Data is not a valid yaml or json")
return result
def _operate(self, kind: str, operation: str, **arguments: Optional[str]) -> Union[Dict[str, Any], List[Dict[str, Any]]]:
result: Union[Dict[str, Any], List[Dict[str, Any]]]
try:
result = getattr(self.client, operation)(kind, **arguments)
except Exception as error:
error_reason = error
if 'forbidden' in str(error):
error_reason = f"Operation Forbidden. Please Log in to cluster."
self._handle_error(operation, error_reason)
return result
def _handle_error(self, operation: str, error_reason: str) -> None:
error_message = f"{operation.capitalize()} failed\nReason: {error_reason}"
raise ResourceOperationFailed(error_message)
def _generate_output(self, operation: str,
output: Union[List[List[Dict[str, Any]]], List[Dict[str, Any]]]) -> None:
output_message = f"{operation.capitalize()} result"
output = self.output_formatter.format(output, output_message, None)
self.output_streamer.stream(output, 'info')
def _is_valid_json(self, data: str) -> bool:
try:
json.loads(data)
except Exception as e:
return False
return True
def _is_valid_yaml(self, data: str) -> bool:
try:
yaml.safe_load_all(data)
except Exception as e:
return False
return True
def _filter(self, items: List[Dict[str, Any]], fields: List) -> List[Dict[str, Any]]:
splitted_fields = [item.split('.') for item in fields]
filtered_items = []
for item in items:
filtered_item = {}
for index, field_list in enumerate(splitted_fields):
subitem = item
for field in field_list:
if isinstance(subitem, List):
subitem = [self._filter_one(item, field) for item in subitem]
else:
subitem = self._filter_one(subitem, field)
filtered_item[fields[index]] = subitem if subitem is not None else "Field does not exist"
filtered_items.append(filtered_item)
return filtered_items
def _filter_one(self, item: Union[Dict[str, Any], List[Dict[str, Any]]], field: str) -> Union[Dict[str, Any], str]:
if '[' in field and ']' in field:
item = item.get(field[0:-3], None) or item.get(field[0: -2], None)
if item:
str_idx = field[-2:-1]
if str_idx is not '*' and str_idx is not '[':
idx = int(str_idx) if str_idx.isdecimal() else None
item = item[idx] if idx is not None and idx < len(item) else None
else:
item = item.get(field, None) if isinstance(item, Dict) else None
return item if item is not None else "Field does not exist" | /robotframework-openshift-1.0.0.tar.gz/robotframework-openshift-1.0.0/OpenShiftLibrary/keywords/generic.py | 0.802942 | 0.204699 | generic.py | pypi |
from typing import Any, Dict, List, Optional
from kubernetes import client, config
from openshift.dynamic import DynamicClient
from urllib import parse
from OpenShiftLibrary.client import GenericClient
class GenericApiClient(GenericClient):
def apply(self, kind: str, body: str, api_version: Optional[str] = None,
namespace: Optional[str] = None, **kwargs: str) -> Dict[str, Any]:
return self._get_resources(kind, api_version).apply(body=body, namespace=namespace,
**kwargs).to_dict()
def create(self, kind: str, body: str, api_version: Optional[str] = None,
namespace: Optional[str] = None, **kwargs: str) -> Dict[str, Any]:
return self._get_resources(kind, api_version).create(body=body, namespace=namespace,
**kwargs).to_dict()
def delete(self, kind: str, api_version: Optional[str] = None, name: Optional[str] = None,
namespace: Optional[str] = None, body: Optional[str] = None,
label_selector: Optional[str] = None, field_selector: Optional[str] = None,
**kwargs: str) -> Dict[str, Any]:
return self._get_resources(kind, api_version).delete(name=name, namespace=namespace, body=body,
label_selector=label_selector,
field_selector=field_selector,
**kwargs).to_dict()
def get(self, kind: str, api_version: Optional[str] = None, name: Optional[str] = None, namespace: Optional[str] = None,
label_selector: Optional[str] = None, field_selector: Optional[str] = None,
**kwargs: str) -> Dict[str, Any]:
return self._get_resources(kind, api_version).get(name=name, namespace=namespace,
label_selector=label_selector,
field_selector=field_selector,
**kwargs).to_dict()
def get_pod_logs(self, name: str, namespace: str, **kwargs: Optional[str]) -> Any:
query = parse.urlencode(kwargs)
url = f"/api/v1/namespaces/{namespace}/pods/{name}/log?{query}"
return self.dynamic_client.request('GET', url)
def patch(self, kind: str, name: str, body: str, api_version: Optional[str] = None, namespace: Optional[str] = None,
**kwargs: str) -> Dict[str, Any]:
return self._get_resources(kind, api_version).patch(name=name, body=body, namespace=namespace,
**kwargs).to_dict()
def watch(self, kind: str, api_version: Optional[str] = None, namespace: Optional[str] = None, name: Optional[str] = None,
label_selector: Optional[str] = None, field_selector: Optional[str] = None,
resource_version: Optional[str] = None,
timeout: Optional[int] = None) -> List[Dict[str, Any]]:
events = self._get_resources(kind, api_version).watch(namespace=namespace, name=name,
label_selector=label_selector,
field_selector=field_selector,
resource_version=resource_version,
timeout=timeout)
return [{'type': event['type'], 'object': event['object'].to_dict()} for event in events]
def reload_config(self, token: Optional[str] = None,
host: Optional[str] = None,
ssl_ca_cert: Optional[str] = None) -> None:
if token and host:
configuration = client.Configuration()
configuration.api_key['authorization'] = token
configuration.api_key_prefix['authorization'] = 'Bearer'
configuration.host = host
configuration.ssl_ca_cert = ssl_ca_cert
configuration.verify_ssl = True if ssl_ca_cert else False
else:
configuration = config.load_kube_config()
self.dynamic_client = DynamicClient(client.ApiClient(configuration))
def _get_resources(self, kind: str, api_version: Optional[str] = None) -> Any:
try:
getattr(self, 'dynamic_client')
except AttributeError:
self.reload_config()
return self.dynamic_client.resources.get(api_version=api_version or self._get_api_version(kind),
kind=kind)
def _get_api_version(self, kind: str) -> str:
result = ""
core = self.dynamic_client.request('GET', '/api/v1')
if any(resource for resource in core.resources if resource.kind == kind):
result = core.groupVersion
else:
groups = self.dynamic_client.request('GET', '/apis').groups
for group in groups:
api = self.dynamic_client.request('GET', f"/apis/{group.name}/{group.preferredVersion.version}")
if any(resource for resource in api.resources if resource.kind == kind):
result = group.preferredVersion.groupVersion
break
return result | /robotframework-openshift-1.0.0.tar.gz/robotframework-openshift-1.0.0/OpenShiftLibrary/client/genericapiclient.py | 0.816443 | 0.1661 | genericapiclient.py | pypi |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.