code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1
value |
|---|---|---|---|---|---|
from collections import OrderedDict
from copy import deepcopy
from io import StringIO
import os
import re
from functools import partial
from tempfile import TemporaryDirectory
from typing import List
from IPython.core.display import display
from robot.api import get_model
from robot.errors import DataError
from robot.reporting import ResultWriter
from robot.running.model import TestSuite
from robot.running.builder.testsettings import TestDefaults
from robot.running.builder.parsers import ErrorReporter
from robot.running.builder.transformers import SettingsBuilder, SuiteBuilder
from robot.model.itemlist import ItemList
from robot.output import LOGGER
from robot.utils import get_error_details
from ipywidgets import VBox, HBox, Button, Output, Text
from .utils import (
detect_robot_context, line_at_cursor, scored_results,
complete_libraries, get_lunr_completions, remove_prefix,
display_log, process_screenshots, lunr_query, get_keyword_doc
)
from .selectors import (
BrokenOpenConnection, clear_selector_highlights, get_autoit_selector_completions, get_selector_completions,
get_white_selector_completions, get_win32_selector_completions, is_autoit_selector,
is_selector, is_white_selector, is_win32_selector, close_current_connection, yield_current_connection
)
from .constants import VARIABLE_REGEXP, BUILTIN_VARIABLES
from .listeners import (
GlobalVarsListener, RobotKeywordsIndexerListener,
SeleniumConnectionsListener, StatusEventListener
)
from robot.running.model import UserKeyword
# Monkey patch user-keyword source for JupyterLab debugger
def get_source(self):
if hasattr(self, 'actual_source'):
return self.actual_source
return self.parent.source if self.parent is not None else None
UserKeyword.source = property(get_source)
def normalize_argument(name):
if "=" in name:
name, default = name.split("=", 1)
else:
default = None
return (
name,
re.sub(r"\W", "_", re.sub(r"^[^\w]*|[^\w]*$", "", name, re.U), re.U),
default
)
def execute_keyword(suite: TestSuite, name, arguments, execute_kwargs, **values):
header = suite.rpa and "Tasks" or "Test Cases"
code = f"""\
*** {header} ***
{name}
{name} {' '.join([values[a[1]] for a in arguments])}
"""
# Copy the test suite
suite = deepcopy(suite)
suite.rpa = True
with TemporaryDirectory() as path:
_, report = _execute_impl(
code, suite, outputdir=path, interactive_keywords=False,
**execute_kwargs
)
if report is not None:
display(report, raw=True)
def on_button_execute(execute, controls, out, widgets, *args, **kwargs):
values = {key: control.value for key, control in controls.items()}
with out:
description = widgets[0].description
widgets[0].description = "Executing..."
for widget in widgets:
widget.disabled = True
out.clear_output(wait=True)
try:
execute(**values)
finally:
widgets[0].description = description
for widget in widgets:
widget.disabled = False
def get_interactive_keyword(suite: TestSuite, keyword, **execute_kwargs):
"""Get an interactive widget for testing a keyword."""
name = keyword.name
arguments = [normalize_argument(arg) for arg in keyword.args]
# Make a copy of the suite, the suite the widget operates on must no be
# the same as the main one
suite_copy = deepcopy(suite)
execute_key = partial(
execute_keyword, suite_copy, name, arguments, execute_kwargs
)
widgets = []
controls = OrderedDict()
out = Output()
for arg in arguments:
input_widget = Text(description=arg[1] + "=", value=arg[2])
widgets.append(input_widget)
controls[arg[1]] = input_widget
button = Button(description=name)
button.on_click(partial(on_button_execute, execute_key, controls, out, widgets))
widgets.insert(0, button)
return VBox((HBox(widgets), out))
class TestSuiteError(Exception):
pass
class ProgressUpdater(StringIO):
"""Wrapper designed to capture robot.api.logger.console and display it.
This can be used passing an instance of this to the execute's stdout argument"""
colors = re.compile(r"\[[0-?]+[^m]+m")
def __init__(self, display, update_display):
self.display = display
self.update_display = update_display
self.progress = {"test": "n/a", "keyword": "n/a", "message": None}
self.already_displayed = False
super(ProgressUpdater, self).__init__()
def _update(self):
status_line = " | ".join(
str(s)
for s in [
self.progress["test"],
self.progress["keyword"],
self.progress["message"],
]
if s
)
mimebundle = {
"text/html": f'<pre style="white-space:nowrap;overflow:hidden;padding-left:1ex;'
f'"><i class="fa fa-spinner fa-pulse"></i>{status_line}</pre>'
}
if not self.already_displayed:
self.display(mimebundle)
self.already_displayed = True
else:
self.update_display(mimebundle)
def update(self, data):
if "test" in data:
self.progress["test"] = data["test"]
self.progress["message"] = None
elif "keyword" in data:
self.progress["keyword"] = data["keyword"]
self.progress["message"] = None
self._update()
def clear(self):
self.update_display({"text/plain": ""})
def write(self, s):
self.progress["message"] = s.strip()
self._update()
return super(ProgressUpdater, self).write(s)
class NoOpStream():
def write(self, message, flush=False):
# This is a no-op
pass
def flush(self):
# This is a no-op
pass
def init_suite(name: str, source: str = os.getcwd()):
"""Create a new test suite."""
return TestSuite(name=name, source=source)
def generate_report(suite: TestSuite, outputdir: str):
process_screenshots(outputdir)
writer = ResultWriter(os.path.join(outputdir, "output.xml"))
writer.write_results(
log=os.path.join(outputdir, "log.html"),
report=None,
rpa=getattr(suite, "rpa", False),
)
with open(os.path.join(outputdir, "log.html"), "rb") as fp:
log = fp.read()
log = log.replace(b'"reportURL":"report.html"', b'"reportURL":null')
html = """
<button
class="jp-mod-styled jp-mod-accept"
onClick="{};event.preventDefault();event.stopPropagation();"
>
<i class="fa fa-file" aria-hidden="true"></i>
Log
</button>
""".format(display_log(log, "log.html"))
return {"text/html": html}
def _execute_impl(code: str, suite: TestSuite, defaults: TestDefaults = TestDefaults(),
stdout=None, stderr=None, listeners=[], drivers=[], outputdir=None, interactive_keywords=True, logger=None):
# This will help raise runtime exceptions
traceback = []
LOGGER.register_error_listener(lambda: traceback.extend(get_error_details()))
# Clear selector completion highlights
for driver in yield_current_connection(drivers, SeleniumConnectionsListener.NAMES + ["jupyter"]):
try:
clear_selector_highlights(driver)
except BrokenOpenConnection:
close_current_connection(drivers, driver)
if logger is not None:
logger.debug("Compiling code: \n%s", code)
# Copy keywords/variables/libraries in case of failure
imports = get_items_copy(suite.resource.imports)
variables = get_items_copy(suite.resource.variables)
keywords = get_items_copy(suite.resource.keywords)
# Compile AST
model = get_model(
StringIO(code),
data_only=False,
curdir=os.getcwd().replace("\\", "\\\\"),
)
ErrorReporter(code).visit(model)
SettingsBuilder(suite, defaults).visit(model)
SuiteBuilder(suite, defaults).visit(model)
# Strip variables/keyword duplicates
strip_duplicate_items(suite.resource.variables)
strip_duplicate_items(suite.resource.keywords)
for listener in listeners:
# Notify suite variables to the listener
if isinstance(listener, GlobalVarsListener):
listener.suite_vars = [var.name for var in suite.resource.variables]
new_imports = [item for item in get_items_copy(suite.resource.imports) if item not in imports]
for new_import in new_imports:
new_import.source = suite.source
new_variables = [item for item in get_items_copy(suite.resource.variables) if item not in variables]
for new_variable in new_variables:
new_variable.source = suite.source
# If there is no test, allow the user to interact with defined keywords by providing widgets
new_keywords = [item for item in get_items_copy(suite.resource.keywords) if item not in keywords]
for new_keyword in new_keywords:
new_keyword.actual_source = suite.source
if not suite.tests and new_keywords and interactive_keywords:
return None, [
get_interactive_keyword(
suite, keyword,
# stdout=stdout, stderr=stderr,
listeners=[
listener
for listener in listeners
if not isinstance(listener, StatusEventListener)
], drivers=drivers,
logger=logger
)
for keyword in new_keywords
]
# Set default streams
# By default stdout is no-op
if stdout is None:
stdout = NoOpStream()
if logger is not None:
logger.debug("Executing code")
# Execute suite
result = suite.run(
outputdir=outputdir,
stdout=stdout, stderr=stderr,
listener=listeners
)
if len(traceback) != 0:
# Reset keywords/variables/libraries
set_items(suite.resource.imports, imports)
set_items(suite.resource.variables, variables)
set_items(suite.resource.keywords, keywords)
clean_items(suite.tests)
error_msg = '\n'.join(traceback)
if logger is not None:
logger.debug("Execution error: %s", error_msg)
raise TestSuiteError(error_msg)
for listener in listeners:
if isinstance(listener, RobotKeywordsIndexerListener):
listener.import_from_suite_data(suite)
# Detect RPA
suite.rpa = get_rpa_mode(model)
report = None
if suite.tests:
report = generate_report(suite, outputdir)
# Remove tests run so far,
# this is needed so that we don't run them again in the next execution
clean_items(suite.tests)
return result, report
def execute(code: str, suite: TestSuite, defaults: TestDefaults = TestDefaults(),
stdout=None, stderr=None, listeners=[], drivers=[], outputdir=None, logger=None):
"""
Execute a snippet of code, given the current test suite. Returns a tuple containing the result of the
suite (if there were tests) and a displayable object containing either the report or interactive widgets.
"""
if outputdir is None:
with TemporaryDirectory() as path:
result = _execute_impl(code, suite, defaults, stdout, stderr, listeners, drivers, path, logger=logger)
else:
result = _execute_impl(code, suite, defaults, stdout, stderr, listeners, drivers, outputdir, logger=logger)
return result
def complete(code: str, cursor_pos: int, suite: TestSuite, keywords_listener: RobotKeywordsIndexerListener = None, extra_libraries: List[str] = [], drivers=[], logger=None):
"""Complete a snippet of code, given the current test suite."""
context = detect_robot_context(code, cursor_pos)
cursor_pos = cursor_pos is None and len(code) or cursor_pos
line, offset = line_at_cursor(code, cursor_pos)
line_cursor = cursor_pos - offset
needle = re.split(r"\s{2,}|\t| \| ", line[:line_cursor])[-1].lstrip()
if logger is not None:
logger.debug("Completing text: %s", needle)
library_completion = context == "__settings__" and any(
[
line.lower().startswith("library "),
"import library " in line.lower(),
"reload library " in line.lower(),
"get library instance" in line.lower(),
]
)
matches = []
# Try to complete a variable
if needle and needle[0] in "$@&%":
if logger is not None:
logger.debug("Context: Variable")
potential_vars = list(set(
[var.name for var in suite.resource.variables] +
VARIABLE_REGEXP.findall(code) +
BUILTIN_VARIABLES
))
matches = [
m["ref"]
for m in scored_results(needle, [dict(ref=v) for v in potential_vars])
if needle.lower() in m["ref"].lower()
]
if len(line) > line_cursor and line[line_cursor] == "}":
cursor_pos += 1
needle += "}"
# Try to complete a library name
elif library_completion:
if logger is not None:
logger.debug("Context: Library name")
needle = needle.lower()
needle = remove_prefix(needle, 'library ')
needle = remove_prefix(needle, 'import library ')
needle = remove_prefix(needle, 'reload library ')
needle = remove_prefix(needle, 'get library instance ')
matches = complete_libraries(needle, extra_libraries)
# Try to complete a CSS selector
elif is_selector(needle):
if logger is not None:
logger.debug("Context: Selenium or Appium selector")
logger.debug("Current WebDrivers: %s", drivers)
matches = []
for driver in yield_current_connection(drivers, SeleniumConnectionsListener.NAMES + ["jupyter", "appium"]):
matches = [get_selector_completions(needle.rstrip(), driver)[0]]
# Try to complete an AutoIt selector
elif is_autoit_selector(needle):
if logger is not None:
logger.debug("Context: AutoIt selector")
matches = [get_autoit_selector_completions(needle)[0]]
# Try to complete a white selector
elif is_white_selector(needle):
if logger is not None:
logger.debug("Context: WhiteLibrary selector")
matches = [get_white_selector_completions(needle)[0]]
# Try to complete a Windows selector
elif is_win32_selector(needle):
if logger is not None:
logger.debug("Context: Win32 selector")
matches = [get_win32_selector_completions(needle)[0]]
# Try to complete a keyword
elif keywords_listener is not None:
if logger is not None:
logger.debug("Context: Keywords or Built-ins")
matches = get_lunr_completions(
needle,
keywords_listener.index,
keywords_listener.keywords,
context
)
if logger is not None:
logger.debug("Available completions: %s", matches)
return {
"matches": matches,
"cursor_end": cursor_pos,
"cursor_start": cursor_pos - len(needle)
}
def inspect(code: str, cursor_pos: int, suite: TestSuite, keywords_listener: RobotKeywordsIndexerListener = None, detail_level=0, logger=None):
cursor_pos = len(code) if cursor_pos is None else cursor_pos
line, offset = line_at_cursor(code, cursor_pos)
line_cursor = cursor_pos - offset
left_needle = re.split(r"\s{2,}|\t| \| ", line[:line_cursor])[-1]
right_needle = re.split(r"\s{2,}|\t| \| ", line[line_cursor:])[0]
needle = left_needle.lstrip().lower() + right_needle.rstrip().lower()
if logger is not None:
logger.debug("Inspecting text: %s", needle)
results = []
data = {}
found = False
if needle and lunr_query(needle):
query = lunr_query(needle)
results = keywords_listener.index.search(query)
results += keywords_listener.index.search(query.strip("*"))
for result in results:
keyword = keywords_listener.keywords[result["ref"]]
if needle not in [keyword.name.lower(), result["ref"].lower()]:
continue
data = get_keyword_doc(keyword)
found = True
break
if logger is not None:
logger.debug("Inspection data: %s", data)
return {
"data": data,
"found": found,
}
def shutdown_drivers(drivers=[]):
for driver in drivers:
if hasattr(driver["instance"], "quit"):
driver["instance"].quit()
def strip_duplicate_items(items: ItemList):
"""Remove duplicates from an item list."""
new_items = {}
for item in items:
new_items[item.name] = item
items._items = list(new_items.values())
def clean_items(items: ItemList):
"""Remove elements from an item list."""
items._items = []
def set_items(items: ItemList, value: List):
"""Remove elements from an item list."""
items._items = value
def get_items_copy(items: ItemList):
"""Get copy of an itemlist."""
return list(items._items)
def get_rpa_mode(model):
"""Get RPA mode for the test suite."""
if not model:
return None
tasks = [s.tasks for s in model.sections if hasattr(s, 'tasks')]
if all(tasks) or not any(tasks):
return tasks[0] if tasks else None
raise DataError('One file cannot have both tests and tasks.') | /robotframework_interpreter-0.7.6-py3-none-any.whl/robotframework_interpreter/interpreter.py | 0.719285 | 0.1532 | interpreter.py | pypi |
import requests
import logging
import subprocess
import json
import os
import robot
import time
from robot.variables import GLOBAL_VARIABLES
from robot.api import logger
from urlparse import urljoin
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
execfile(os.path.join(THIS_DIR, 'version.py'))
__version__ = VERSION
ORIENTATIONS = {
"down": 0,
"right": 90,
"left": 270,
"up": 180
}
ORIENTATIONS_REV = {
0: "down",
90: "right",
180: "up",
270: "left"
}
DEFAULT_SIMULATOR = ("/Applications/Xcode.app/Contents/Applications/" +
"iPhone Simulator.app/Contents/MacOS/iPhone Simulator")
class IOSLibraryException(Exception):
pass
class IOSLibrary(object):
ROBOT_LIBRARY_VERSION = VERSION
ROBOT_LIBRARY_SCOPE = 'GLOBAL'
def __init__(self, device_endpoint='localhost:37265'):
"""
Initialize the IOSLibrary.
`device_endpoint` endpoint of the test server (the instrumented app).
Optional if you are running tests on the local machine against the
simulator.
"""
if device_endpoint:
self.set_device_url('http://%s/' % device_endpoint)
self._screenshot_index = 0
self._current_orientation = 0
self._waxsim = self._find_waxsim()
self._username = None
self._password = None
if os.path.exists(DEFAULT_SIMULATOR):
self.set_simulator(DEFAULT_SIMULATOR)
self._device = "iPhone"
self._ios_major_version = 5
def set_device_url(self, url):
"""
Set the device url where the application is started.
`url` the base url to use for all requests
"""
self._url = url
def set_basic_auth(self, username, password):
'''
Set basic authentication to use with all further API calls
username is the username to authenticate with, e.g. 'Aladdin'
password is the password to use, e.g. 'open sesame'
'''
self._username = username
self._password = password
def _find_waxsim(self):
path = os.environ['PATH']
for d in path.split(os.pathsep):
if os.path.exists(d):
files = os.listdir(d)
if 'waxsim' in files:
return os.path.join(d, 'waxsim')
return None
def set_simulator(self, simulator_path=DEFAULT_SIMULATOR):
"""
Set the path where the iOS Simulator is found.
If the iOS Simulator is at the default location, you don't need to call
this. However, if you are using beta release of XCode, you can choose
which simulator to use.
`simulator_path` fully qualified path to the iOS Simulator executable.
"""
self._simulator = simulator_path
def set_device(self, device_name="iPhone"):
"""
Set the device. This is used for the simulator as well as for choosing
the correct gestures.
`device` The device that is connected. Valid values are:
"iPhone", "iPad", "iPhone (Retina)" and "iPad (Retina)"
"""
allowed = ("iPhone", "iPad", "iPhone (Retina)", "iPad (Retina)")
assert device_name in allowed, "%s is not in %r, but should be." % (device_name, allowed)
self._device = device_name
def set_ios_version(self, ios_major_version=5):
"""
Set the iOS Version used for sending the correct gestures.
`ios_version` The iOS version of the device that is connceted. Valid
values are: 4, 5, 6, must be a number.
"""
assert type(ios_version) is int, "%s is not a number, but should be." % (device_name, allowed)
self._ios_major_version = ios_major_version
def _get_app_and_binary(self, app_path):
filename, ext = os.path.splitext(app_path)
binary = None
if ext == '.app':
binary = os.path.join(app_path, filename)
elif ext == '':
app_path = os.path.dirname(app_path)
binary = filename
return app_path, binary
def _check_simulator(self):
assert (os.path.exists(self._simulator) or
(self._waxsim and os.path.exists(self._waxsim))), (
"neither simulator at %s nor waxsim could be found"
% self._simulator)
def start_simulator(self, app_path, sdk='5.1'):
"""
Starts the App found at `app_path` in the iOS Simulator.
`app_path` Path to the binary of the App to start.
"""
self._check_simulator()
app_path = os.path.expanduser(app_path)
assert os.path.exists(app_path), \
"Couldn't find app bundle or binary at %s" % app_path
cmd = []
app_path, binary = self._get_app_and_binary(app_path)
if not self._waxsim:
assert binary, "Could not parse app binary name"
assert os.path.exists(binary), \
"Could not find app binary at %s" % app_path
logging.warning("Waxsim not found, execute app without installing it in simulator")
cmd = [self._simulator,
'-SimulateDevice',
self._device,
'-SimulateApplication',
binary]
else:
cmd = [self._waxsim,
'-s',
sdk,
'-f',
self._device.lower(),
app_path]
with open("waxsim.log", "w") as logfile:
self._simulator_proc = subprocess.Popen(cmd, stderr=logfile)
def reset_simulator(self):
"""
Reset the simulator. Warning the simulator should run
"""
p = os.path.join(
os.path.join(os.path.dirname(__file__), 'resources'),
"reset.applescript")
cmd = ["osascript",p]
with open("reset_sim.log","w") as logfile:
with open("reset_sim.err.log","w") as errfile:
self._reset = subprocess.Popen(cmd, stdout=logfile, stderr=errfile)
def stop_simulator(self):
"""
Stops a previously started iOS Simulator.
"""
cmd = "`echo 'application \"iPhone Simulator\" quit' | osascript`"
stop_proc = subprocess.Popen(cmd, shell=True)
stop_proc.wait()
self._simulator_proc.wait()
def set_basic_auth(self, username, password):
'''
Set basic authentication to use with all further API calls
username is the username to authenticate with, e.g. 'Aladdin'
password is the password to use, e.g. 'open sesame'
'''
self._username = username
self._password = password
def is_device_available(self):
"""
Succeeds if the test server is available for receiving commands.
This is best used with the `Wait Until Keyword Succeeds` keyword from
the BuiltIn library like this:
Example:
| Wait Until Keyword Succeeds | 1 minute | 10 seconds | Is device available |
"""
logger = logging.getLogger()
previous_loglevel = logger.getEffectiveLevel()
logger.setLevel(logging.ERROR)
status_code = 0
try:
resp = self._get('version')
status_code = resp.status_code
except:
raise
finally:
logger.setLevel(previous_loglevel)
assert status_code == 200, "Device is not available"
def _post(self, endp, request, **kwargs):
if self._username != None and self._username != None:
kwargs['auth'] = (self._username, self._password)
url = urljoin(self._url, endp)
res = requests.post(url, data=request, headers={
'Content-Type': 'application/json;charset=utf-8'
}, **kwargs)
return res
def _get(self, endp, **kwargs):
if self._username != None and self._username != None:
kwargs['auth'] = (self._username, self._password)
res = requests.get(urljoin(self._url, endp), **kwargs)
assert res.status_code == 200, (
"Device sent http status code %d" % res.status_code)
return res
def _map(self, query, method_name, args=None):
if args is None:
args = []
data = json.dumps({
"query": query,
"operation": {
"arguments": args,
"method_name": method_name
}
})
res = self._post("map", data)
logging.debug("<< %r %r", res.status_code, res.text)
res = self._parse_json(res.text)
if res['outcome'] != 'SUCCESS':
raise IOSLibraryException('map %s failed because: %s \n %s' %
(query, res['reason'], res['details']))
return res['results']
def _parse_json(self, to_parse):
try:
return json.loads(to_parse)
except ValueError as e:
raise IOSLibraryException("Testserver response '%s' couldn't be parsed as json: %s" % (to_parse, e.message))
def _screenshot(self, filename=None, relative_url='screenshot'):
res = self._get(relative_url)
path, link = self._get_screenshot_paths(filename)
with open(path, 'w') as f:
f.write(res.content)
logger.info('</td></tr><tr><td colspan="3"><a href="%s">'
'<img src="%s"></a>' % (link, link), True, False)
def _load_playback_data(self, recording):
if not recording.endswith(".base64"):
recording = "%s_ios%d_%s.base64" % (
recording,
self._ios_major_version,
self._device.split(" ")[0].lower())
p = os.path.join(
os.path.join(os.path.dirname(__file__), 'resources'),
recording
)
if os.path.exists(p):
with open(p, 'r') as f:
return f.read()
elif self._ios_major_version == 6:
# we can reuse most of the ios5 gestures on ios6
return self._load_playback_data(recording.replace("6", "5"))
else:
raise IOSLibraryException('Playback not found: %s' % p)
def _playback(self, recording, options=None):
data = self._load_playback_data(recording)
post_data = {
"events": data
}
if options:
post_data.update(options)
res = self._post('play', json.dumps(post_data))
fail = False
if res.status_code != 200:
fail = True
error_msg = "device url sent status code %s", res.status_code
try:
jres = self._parse_json(res.text)
if jres['outcome'] != 'SUCCESS':
fail = True
error_msg = "%s %s" % (jres['reason'], jres['details'])
except ValueError:
pass
if fail:
raise IOSLibraryException('playback failed because: %s' % error_msg)
return res
def _rotate_to(self, orientation, direction="left"):
orientation = self._reduce_degrees(orientation)
self._current_orientation = orientation
if direction == "right":
orientation += 90
elif direction == "left":
orientation += 270
orientation = self._reduce_degrees(orientation)
orientation = ORIENTATIONS_REV[orientation]
playback = "rotate_%s_home_%s" % (direction, orientation)
self._playback(playback)
time.sleep(1)
def _reduce_degrees(self, degrees):
while degrees >= 360:
degrees -= 360
while degrees < 0:
degrees += 360
return degrees
def _element_exists(self, query):
if not self.query(query):
return False
return True
def _get_webview_html(self, query=None, index=None):
if not index: index = 0
if not query: query = ""
res = self.query("webView " + (query and query + " " or "") + "css:'body'")
index = int(index)
if not res or not res[index]:
raise IOSLibraryException("No WebView with index %i found" % index)
return res[index]["html"]
def query(self, query):
"""
Search for a UIElement matching `query`
`query` query selector. The available syntax is documented here https://github.com/calabash/calabash-ios/wiki/05-Query-syntax
"""
return self._map(query, "query")
def query_all(self, query):
"""
Search for all UIElements matching `query`
`query` query selector. The available syntax is documented here https://github.com/calabash/calabash-ios/wiki/05-Query-syntax
"""
return self._map(query, "query_all")
def _pinch(self, in_out, options={}):
f = "pinch_in"
if in_out == "out":
f = "pinch_out"
self._playback(f, options)
# BEGIN: STOLEN FROM SELENIUM2LIBRARY
def _get_log_dir(self):
logfile = GLOBAL_VARIABLES['${LOG FILE}']
if logfile != 'NONE':
return os.path.dirname(logfile)
return GLOBAL_VARIABLES['${OUTPUTDIR}']
def _get_screenshot_paths(self, filename):
logdir = self._get_log_dir()
if not filename:
self._screenshot_index += 1
filename = 'ios-screenshot-%d.png' % self._screenshot_index
filename = os.path.join('screenshots', filename)
screen_dir = os.path.join(logdir, 'screenshots')
if not os.path.exists(screen_dir):
os.mkdir(screen_dir)
else:
filename = filename.replace('/', os.sep)
path = os.path.join(logdir, filename)
link = robot.utils.get_link_path(path, logdir)
return path, link
# END: STOLEN FROM SELENIUM2LIBRARY
# DEFINITIONS
def touch(self, query):
"""
Touch element specified by query
`query` selector of the element to touch. The available syntax is documented here https://github.com/calabash/calabash-ios/wiki/05-Query-syntax
"""
return self._playback("touch", {"query": query})
def touch_position(self, x=0, y=0):
"""
Simulate a touch at the specified position
`x` X-Coordinate of the position to touch
`y` Y-Coordinate of the position to touch
"""
self._playback("touch",
{"offset": {
"x": x,
"y": y
}
})
def capture_screenshot(self, filename=None, relative_url='screenshot'):
"""
Captures a screenshot of the current screen and embeds it
in the test report
`filename` Location where the screenshot will be saved. If omitted a unique filename will be chosen.
`relative_url` URL part, relative to the device endpoint. For the standard setup the default value is sufficient.
"""
self._screenshot(filename, relative_url)
def toggle_switch(self, name=None):
"""
Toggle a switch
`name` Name of the switch to toggle.
"""
if not name:
self.touch("switch")
else:
self.touch("switch marked:'%s'" % name)
def touch_text(self, placeholder=None):
"""
Touch a Textfield
`placeholder` of textField to touch
"""
if not placeholder:
self.touch("textField")
else:
self.touch("textField placeholder:'%s'" % placeholder)
def set_text(self, value, query="textField"):
"""
Set the value of a textField
`value` the new value of the textField
`query` query selector to find the textField that will be set to the new value
"""
text_fields_modified = self._map(query, "setText", [value])
if not text_fields_modified:
raise IOSLibraryException("could not find text field %s" % query)
def go_back(self):
"""
Touch the first Navigationitem in a Navigation Bar
"""
self.touch("navigationItemButtonView first")
def rotate(self, direction):
"""
Rotate the simulator
`direction` The direction to rotate the simulator in. Valid values are "left" and "right".
"""
if direction == "right":
self._current_orientation -= 90
elif direction == "left":
self._current_orientation += 90
else:
raise IOSLibraryException("not a valid direction %s" % direction)
self._rotate_to(self._current_orientation, direction)
def set_device_orientation_to(self, orientation, direction="left"):
"""
Set orientation of the simulator
`orientation` The final orientation the simulator should have afterwards. Valid values are "up", "down", "left", "right".
`direction` The direction to rotate the simulator in until it reached the final orientation. Valid values are "left" and "right".
"""
degrees = ORIENTATIONS[orientation]
self._rotate_to(degrees, direction)
def scroll(self, direction, query="scrollView index:0"):
"""
Scroll the view.
`direction` direction to scroll in. Valid values are "up", "down", "left", "right"
`query` selector of the view to scroll in. Defaults to the first scrollView.
"""
views_touched = self._map(query, "scroll", [direction])
if not views_touched:
raise IOSLibraryException("could not find view to scroll: %s" %
query)
def pinch(self, direction, query=None):
"""
Pinch in or out.
`direction` to pinch. Valid values are "in" and "out".
`query` selector of the element to pinch on
"""
options = {}
if query:
options = {"query": query}
self._pinch(direction, options)
def swipe(self, direction, query=None):
"""
Swipe.
`direction` The direction to swipe in. Valid values are "up", "down", "left", "right"
`query` query identifiying the element of the screen to be swiped on, e.g. "view marked:'foo'"
"""
degrees = ORIENTATIONS[direction]
direction = (360 - self._current_orientation) + degrees
direction = self._reduce_degrees(direction)
direction = ORIENTATIONS_REV[direction]
options = {}
if query:
options["query"] = query
self._playback("swipe_%s" % direction, options)
def screen_should_contain_text(self, expected):
"""
Asserts that the current screen contains a given text
`expected` The text that should be on the screen
"""
if not self._element_exists("view {text LIKE '*%s*'}" %
expected.replace("'", r"\'")):
raise IOSLibraryException("No text %s found" % expected)
def screen_should_contain(self, expected):
"""
Asserts that the current screen contains a given element
specified by name or query
`expected` String or View that should be on the current screen
"""
res = (self._element_exists("view marked:'%s'" % expected) or
self._element_exists(expected))
if not res:
raise IOSLibraryException("No element found with mark or text %s" %
expected)
def screen_should_contain_query(self, query):
"""
Asserts that the current screen contains a given element
specified by query.
`query` Element query that should be on the current screen
"""
if not self._element_exists(query):
raise IOSLibraryException(
"No element found with query '%s'" % query)
def webview_should_contain(self, expected, index=0, query=None):
"""
Asserts that the current webview contains a given text
`expected` text that should be in the webview
`index` index of the webView
`query` query to find the webview (e.g. "marked:'Tears in Heaven'", for full query syntax see https://github.com/calabash/calabash-ios/wiki/05-Query-syntax
"""
if not expected in self._get_webview_html(query, index):
raise IOSLibraryException("%s not found in webView" % expected)
def webview_should_not_be_empty(self, index=0, query=None):
"""
Asserts that the current webview is not empty
`index` index of the webView
`query` query to find the webview (e.g. "marked:'Tears in Heaven'", for full query syntax see https://github.com/calabash/calabash-ios/wiki/05-Query-syntax
"""
if not self._get_webview_html(query, index):
raise IOSLibraryException("Webview is empty")
# END: DEFINITIONS | /robotframework-ioslibrary-0.2.0.tar.gz/robotframework-ioslibrary-0.2.0/src/IOSLibrary/__init__.py | 0.459804 | 0.207596 | __init__.py | pypi |
import time
import array
from robot import utils
from robot.utils import asserts
from robot.utils.connectioncache import ConnectionCache
from robot.output import LOGGER
from robot.output.loggerhelper import Message
from .utils import int_any_base
from .mapping import *
class Sdr:
def set_sdr_source(self, source):
"""Select the SDR source. This specifies if the keywords that fetch and
check SDRs collect the SDR data from the sensor device or form the SDR
repository device.
Example:
| Set SDR Source | Sensor Device |
| Set SDR Source | SDR Repository |
"""
if source.lower() in ('sensor device', 'sdr repository'):
self._cp['sdr_source'] = source.lower()
def _get_sdr_list(self):
if self._cp['sdr_source'] == 'sensor device':
return self._ipmi.get_device_sdr_list()
elif self._cp['sdr_source'] == 'sdr repository':
return self._ipmi.get_repository_sdr_list()
else:
raise RuntimeError
def _sdr_entries(self):
if self._cp['sdr_source'] == 'sensor device':
return self._ipmi.device_sdr_entries()
elif self._cp['sdr_source'] == 'sdr repository':
return self._ipmi.sdr_repository_entries()
else:
raise RuntimeError
def get_sdr_repository_info(self):
"""Returns the SDR Repository Info.
"""
return self._ipmi.get_sdr_repository_info()
def get_sdr_repository_allocation_info(self):
"""Returns the SDR Repository Allocation Info.
"""
return self._ipmi.get_sdr_repository_allocation_info()
def reserve_sdr_repository(self):
"""Returns the SDR Repository Reservation Id.
"""
return self._ipmi.reserve_sdr_repository()
def clear_sdr_repository(self):
"""Clear the SDR repository and wait until erasure is finished.
"""
return self._ipmi.clear_sdr_repository()
def delete_sdr(self, record_id):
"""Delete the SDR from repository specified by 'record_id'.
"""
record_id = int_any_base(record_id)
return self._ipmi.delete_sdr(record_id)
def run_initialization_agent(self):
self._ipmi.start_initialization_agent()
def get_initialization_agent_status(self):
return self._ipmi.get_initialization_agent_status()
def partial_add_sdr(self, reservation_id,
record_id, offset, progress, data):
record_id = int_any_base(record_id)
offset = int_any_base(offset)
progress = int_any_base(progress)
if isinstance(data, basestring):
data = [int_any_base(d) for d in data.split(' ')]
elif isinstance(data, list):
data = data
else:
data = [int_any_base(data)]
data = array.array('c', [chr(c) for c in data])
return self._ipmi.partial_add_sdr(
reservation_id, record_id, offset, progress, data)
@property
def _sdr_list(self):
if 'prefetched_sdr_list' in self._cp:
return self._cp['prefetched_sdr_list']
else:
return self._get_sdr_list()
@property
def _selected_sdr(self):
try:
return self._cp['selected_sdr']
except KeyError:
AssertionError('No SDR selected.')
_selected_sdr.setter
def _selected_sdr(self, value):
self._cp['selected_sdr'] = value
def prefetch_sdr_list(self):
if 'prefetched_sdr_list' in self._cp:
del self._cp['prefetched_sdr_list']
self._cp['prefetched_sdr_list'] = self._sdr_list
self._info('Prefetching SDR list')
def log_sdr_list(self):
print('*INFO* SDR list')
for sdr in self._sdr_list:
print(sdr)
def _find_sdr_by_name(self, name):
for sdr in self._sdr_list:
if sdr.device_id_string == name:
return sdr
raise AssertionError('SDR with name "%s" not found in list' % (name))
def _find_sdr_by_record_id(self, sdr_id):
for sdr in self._sdr_list:
if sdr.id == sdr_id:
return sdr
raise AssertionError('SDR with ID "%x" not found' % sdr_id)
def _find_sdr_by_record_type(self, record_type):
for sdr in self._sdr_list:
if sdr.type == record_type:
return sdr
def _find_sdr_by_sensor_type(self, sensor_type):
for sdr in self._sdr_list:
if hasattr(sdr, 'sensor_type_code'):
if sdr.sensor_type_code == sensor_type:
return sdr
def select_sdr_by_record_id(self, record_id):
"""Selects a SDR by its record id.
"""
record_id = int_any_base(record_id)
self._selected_sdr = self._find_sdr_by_record_id(record_id)
def select_sdr_by_name(self, name):
"""Selects a SDR by its name.
"""
self._selected_sdr = self._find_sdr_by_name(name)
def select_sdr_by_record_type(self, record_type):
"""Selects SDR by its record type.
`record_type`
"""
record_type = find_sdr_record_type(record_type)
self._selected_sdr = self._find_sdr_by_record_type(record_type)
def select_sdr_by_sensor_type(self, sensor_type):
"""Selects SDR by its sensor type.
`sensor_type`
"""
sensor_type = find_sdr_sensor_type(sensor_type)
self._selected_sdr = self._find_sdr_by_sensor_type(sensor_type)
def selected_sdr_name_should_be_equal(self, expected_name, msg=None):
"""Fails unless the name of the selected sensor matches the given one.
"""
actual_name = self._selected_sdr.device_id_string
asserts.assert_equal(expected_name, actual_name, msg)
def selected_sdr_sensor_state_should_be_equal(self, expected_state,
mask=0x7fff, msg=None):
"""Fails unless the state of the selected senor matches the given
one.
"""
expected_state = int_any_base(expected_state)
mask = int_any_base(mask)
self.sensor_state_should_be_equal(self._selected_sdr.device_id_string,
expected_state, self._selected_sdr, mask, msg)
def selected_sdr_sensor_reading_should_be_equal(self, expected_reading,
msg=None):
"""Fails unless the reading of the selected sensor matches the given
one.
Note: `expected_reading` is the converted value, not the raw reading.
"""
expected_reading = float(expected_reading)
self.sensor_reading_should_be_equal(self._selected_sdr,
expected_reading, msg)
def selected_sdr_entity_id_should_be(self, expected_entity_id, msg=None):
"""Fails unless the entity ID of the selected SDR matches the given
one.
Possible `entity_id`s are: Power Module, Cooling Unit, PICMG Front
Board, PICMG Rear Transition Module, PICMG Advanced MC, PICMG Microtca
Carrier Hub, PICMG Shelf Management Controller, PICMG Filtration Unit,
PICMG Shelf Fru Information
"""
expected_entity_id = find_entity_type_id(expected_entity_id)
actual_entity_id = self._selected_sdr.entity_id
asserts.assert_equal(expected_entity_id, actual_entity_id, msg)
def selected_sdr_entity_instance_should_be(self, expected_entity_instance,
msg=None):
expected_entity_instance = int_any_base(expected_entity_instance)
actual_entity_instance = self._selected_sdr.entity_instance
asserts.assert_equal(expected_entity_instance,
actual_entity_instance, msg)
def selected_sdr_type_should_be(self, expected_sdr_type, msg=None):
"""Fails unless the SDR type of the selected SDR matches the given
one.
Possible `sdr_type`s are: Full Sensor Record, Compact Sensor Record,
Entity Association Record, Fru Device Locator Record, Management
Controller Device Locator Record, Management Controller Confirmation
Record, BMC Message Channel Info Record
"""
expected_sdr_type = find_sdr_record_type(expected_sdr_type)
actual_sdr_type = self._selected_sdr.type
asserts.assert_equal(expected_sdr_type, actual_sdr_type)
def get_sensor_number_for_sensor_name(self, name, sdr=None):
"""Return the sensor number that is specified by name.
"""
self.select_sdr_by_name(name)
if sdr is None:
sdr = self._find_sdr_by_name(name)
return sdr.number
def sensor_state_should_be_equal(self, name, expected_state, sdr=None,
mask=0x7fff, msg=None):
"""Fails unless the sensor state of the sensor with name `name` matches
the given one.
"""
expected_state = int_any_base(expected_state)
mask = int_any_base(mask)
if sdr is None:
sdr = self._find_sdr_by_name(name)
(_, actual_state) = self._ipmi.get_sensor_reading(sdr.number)
# apply mask
expected_state = expected_state & mask
actual_state = actual_state & mask
asserts.assert_equal(expected_state, actual_state, msg)
def sensor_reading_should_be_equal(self, name, expected_reading, msg=None):
"""Fails unless the sensor reading of the sensor with name `name`
matches the given one.
"""
expected_reading = float(expected_reading)
sdr = self._find_sdr_by_name(name)
(raw, _) = ac._ipmi.get_sensor_reading(sdr.number)
if raw is not None:
actual_reading = sdr.convert_sensor_reading(raw)
else:
actual_reading = None
asserts.assert_equal(expected_value, actual_reading, msg)
def sdr_should_be_present(self, name):
"""Fails unless the SDR with the given name is present.
"""
try:
self._find_sdr_by_name(name)
except:
raise AssertionError('Sensor "%s" is not present' % name)
def get_sdr_instance_by_record_id(self, record_id):
"""Returns the SDR object of the SDR with record id `record_id`.
"""
return self._find_sdr_by_record_id(record_id)
def get_sdr_instance(self, name):
"""Returns the SDR object of the SDR with name `name`.
"""
return self._find_sdr_by_name(name)
def get_sensor_number(self, name):
"""Returns the sensor number for the given SDR name.
`name` is the sensor ID string given in the SDR.
"""
sdr = self._find_sdr_by_name(name)
if not sdr.number:
raise RuntimeError('SDR "%s" has no sensor number' % name)
return sdr.number
def get_sensor_reading(self, name):
"""Returns a sensor reading.
`name` is the sensor ID string given in the SDR.
"""
sdr = self._find_sdr_by_name(name)
(raw, _) = self._ipmi.get_sensor_reading(sdr.number)
if raw is not None:
reading = sdr.convert_sensor_raw_to_value(raw)
else:
reading = None
return reading
def get_sensor_state(self, name, sdr=None):
"""Returns the assertion state of a sensor.
`name` is the sensor ID string. See also `Get Sensor Reading`.
"""
if sdr is None:
sdr = self._find_sdr_by_name(name)
(_, states) = self._ipmi.get_sensor_reading(sdr.number)
return states
def _check_valid_threshold_name(self, threshold):
if threshold not in ('lnr', 'lcr', 'lnc', 'unc', 'ucr', 'unr'):
raise RuntimeError('Invalid threshold "%s"' % threshold)
def get_sensor_threshold(self, name, threshold):
"""Returns the current threshold for a sensor.
`name` is the sensor ID string. See also `Get Sensor Reading`.
`threshold` can be one of the following strings: "lnr", "lcr", "lnc",
"unc", "ucr", "unr".
Example:
| ${threshold}= | Get Sensor Threshold | Vcc +12V | lnr |
"""
threshold = threshold.lower()
self._check_valid_threshold_name(threshold)
sdr = self._find_sdr_by_name(name)
thresholds = self._ipmi.get_sensor_thresholds(sdr.number, sdr.owner_lun)
converted_thresholds = {}
for t in ('lnr', 'lcr', 'lnc', 'unc', 'ucr', 'unr'):
if thresholds.has_key(t):
converted_thresholds[t] \
= sdr.convert_sensor_raw_to_value(thresholds[t])
return converted_thresholds[threshold]
def set_sensor_threshold(self, name, threshold, value):
"""Sets the threshold of a sensor.
For the `name` and `threshold` parameters see `Get Sensor Threshold`.
"""
threshold = threshold.lower()
value = float(value)
self._check_valid_threshold_name(threshold)
sdr = self._find_sdr_by_name(name)
thresholds = {}
thresholds[threshold] = sdr.convert_sensor_value_to_raw(value)
self._ipmi.set_sensor_thresholds(sdr.number, sdr.owner_lun,
**thresholds)
def wait_until_sensor_state_is(self, name, state, mask=0x7fff):
"""Wait until a sensor reaches the given state.
`name` is the sensor ID string. See also `Get Sensor Reading`.
"""
state = int_any_base(state)
mask = int_any_base(mask)
start_time = time.time()
while time.time() < start_time + self._timeout:
current_state = self.get_sensor_state(name)
if current_state & mask == state & mask:
self._info('waited %s seconds until state "%s" was reached'
% (time.time()-start_time, state))
return
time.sleep(self._poll_interval)
raise AssertionError('Sensor "%s" did not reach the state "%s" in %s.'
% (name, state, utils.secs_to_timestr(self._timeout)))
def wait_until_sensor_reading_is(self, name, value):
"""Wait until a sensor reaches the given value.
`name` is the sensor ID string. See also `Get Sensor Reading`.
"""
value = float(value)
start_time = time.time()
while time.time() < start_time + self._timeout:
current_reading = self.get_sensor_reading(name)
if current_reading == value:
self._info('waited %s seconds until value "%s" was reached'
% (time.time()-start_time, value))
return
time.sleep(self._poll_interval)
raise AssertionError('Sensor "%s" did not reach the value "%s" in %s.'
% (name, value, utils.secs_to_timestr(self._timeout))) | /robotframework-ipmilibrary-0.3.5.tar.gz/robotframework-ipmilibrary-0.3.5/src/IpmiLibrary/sdr.py | 0.710126 | 0.238063 | sdr.py | pypi |
import struct
import time
from robot import utils
from robot.utils import asserts
from .utils import int_any_base
from .mapping import *
class NotSupportedError(Exception):
pass
class Sel:
@property
def _sel_records(self):
if 'prefetched_sel_records' in self._cp:
return self._cp['prefetched_sel_records']
else:
return self._ipmi.get_sel_entries()
@property
def _selected_sel_record(self):
try:
return self._cp['selected_sel_record']
except KeyError:
AssertionError('No SEL record selected.')
@_selected_sel_record.setter
def _selected_sel_record(self, value):
self._cp['selected_sel_record'] = value
def _invalidate_prefetched_sel_records(self):
if 'prefetched_sel_records' in self._cp:
del self._cp['prefetched_sel_records']
def prefetch_sel(self):
"""Prefetches the sensor event log.
Fetching the SEL is required for all further operation on the SEL.
See `Sel Should Contain X Times Sensor Type`, `Select Sel Record By
Sensor Type` and `Wait Until Sel Contains Sensor Type`.
"""
self._info('Prefetching SEL')
self._invalidate_prefetched_sel_records()
self._cp['prefetched_sel_records'] = self._ipmi.get_sel_entries()
def clear_sel(self):
"""Clears the sensor event log."""
self._invalidate_prefetched_sel_records()
self._ipmi.clear_sel()
def get_sel_entries_count(self):
"""Returns the number of entries in SEL."""
return self._ipmi.get_sel_entries_count()
def log_sel(self):
"""Dumps the sensor event log and logs it."""
print('*INFO* SEL')
for record in self._sel_records:
print(record)
def _find_sel_records_by_sensor_type(self, type):
matches = []
for record in self._sel_records:
if record.sensor_type == type:
matches.append(record)
return matches
def _find_sel_records_by_sensor_number(self, number):
matches = []
for record in self._sel_records:
if record.sensor_number == number:
matches.append(record)
return matches
def sel_should_contain_x_entries(self, count, msg=None):
"""Fails if the SEL does not contain `count` entries.
"""
count = int(count)
asserts.assert_equal(count, len(self._sel_records), msg)
def sel_should_contain_x_times_sensor_type(self, type, count, msg=None):
"""Fails if the SEL does not contain `count` times an event with the
given sensor type.
"""
type = find_sensor_type(type)
count = int(count)
records = self._find_sel_records_by_sensor_type(type)
asserts.assert_equal(count, len(records), msg)
def sel_should_contain_sensor_type(self, type, msg=None):
"""Fails if SEL contains the given sensor type.
"""
type = find_sensor_type(type)
records = self._find_sel_records_by_sensor_type(type)
if len(records) == 0:
raise AssertionError('SEL doesn`t contain sensor type %s' % type)
def sel_should_not_contain_sensor_type(self, type, msg=None):
"""Fails if SEL contains the given sensor type.
"""
type = find_sensor_type(type)
records = self._find_sel_records_by_sensor_type(type)
if len(records) != 0:
raise AssertionError('SEL contains sensor type %s' % type)
def wait_until_sel_contains_x_times_sensor_type(self, count, type):
"""Waits until the specified sensor type appears at least `count`
times within the SEL.
Note: this keyword invalidates the prefetched SEL records. You have to
rerun the `Prefetch SEL` keyword.
"""
type, type_name = find_sensor_type(type), type
count = int(count)
self._invalidate_prefetched_sel_records()
start_time = time.time()
while time.time() < start_time + self._timeout:
records = self._find_sel_records_by_sensor_type(type)
if len(records) >= count:
self._selected_sel_record = records[0]
return
time.sleep(self._poll_interval)
raise AssertionError('No match found for SEL record type "%s (%s)" in %s.'
% (type_name, type, utils.secs_to_timestr(self._timeout)))
def wait_until_sel_contains_x_times_sensor_number(self, count, number):
"""Waits until the specified sensor number appears at least `count`
times within the SEL.
Note: this keyword invalidates the prefetched SEL records. You have to
rerun the `Prefetch SEL` keyword.
"""
number = find_sensor_type(number)
count = int(count)
self._invalidate_prefetched_sel_records()
start_time = time.time()
while time.time() < start_time + self._timeout:
records = self._find_sel_records_by_sensor_number(number)
if len(records) >= count:
self._selected_sel_record = records[0]
return
time.sleep(self._poll_interval)
raise AssertionError('No match found for SEL record from num "%d" in %s.'
% (number, utils.secs_to_timestr(self._timeout)))
def wait_until_sel_contains_sensor_type(self, type):
"""Wait until the SEL contains at least one record with the given
sensor type.
`type` is either an human readable string or the corresponding number.
The SEL is polled with an interval, which can be set by `Set Poll
Interval` or by `library loading`.
The first matching entry is automatically selected, see `Select SEL
Record By Sensor Type`.
Note: this keyword invalidates the prefetched SEL records. You have to
rerun the `Prefetch SEL` keyword.
Example:
| Set Timeout | 5 seconds |
| Wait Until SEL Contains Sensor Type | 0x23 |
| Wait Until SEL Contains Sensor Type | Voltage |
"""
self.wait_until_sel_contains_x_times_sensor_type(1, type)
def select_sel_record_at_offset(self, offset):
"""Selects a SEL record at offset.
"""
offset = int_any_base(offset)
self._selected_sel_record = self._sel_records[offset]
def select_sel_record_by_sensor_type(self, type, index=1):
"""Selects a SEL record.
Selected SEL records can be further examined by the `Selected SEL
Records X`.
`type` can be either a string or a number. See `Wait Until SEL Contains
Sensor Type` for an example.
If more than one entry match `index` can be used to select the
subsequent ones. `index` can also be negative, see Python Sequences for
more details on this.
Example:
| # Selects the first matching SEL entry |
| Select SEL Record By Sensor Type | 0xcf |
| # Selects the third matching SEL entry |
| Select SEL Record By Sensor Type | 0xcf | 3 |
| # Selects the last matching SEL entry |
| Select SEL Record By Sensor Type | 0xcf | -1 |
SENSOR_TYPE_TEMPERATURE = 0x01
VOLTAGE = 0x02
CURRENT = 0x03
FAN = 0x04
CHASSIS_INTRUSION = 0x05
PLATFORM_SECURITY = 0x06
PROCESSOR = 0x07
POWER_SUPPLY = 0x08
POWER_UNIT = 0x09
COOLING_DEVICE = 0x0a
OTHER_UNITS_BASED_SENSOR = 0x0b
MEMORY = 0x0c
DRIVE_SLOT = 0x0d
POST_MEMORY_RESIZE = 0x0e
SYSTEM_FIRMWARE_PROGRESS = 0x0f
EVENT_LOGGING_DISABLED = 0x10
WATCHDOG_1 = 0x11
SYSTEM_EVENT = 0x12
CRITICAL_INTERRUPT = 0x13
BUTTON = 0x14
MODULE_BOARD = 0x15
MICROCONTROLLER_COPROCESSOR = 0x16
ADD_IN_CARD = 0x17
CHASSIS = 0x18
CHIP_SET = 0x19
OTHER_FRU = 0x1a
CABLE_INTERCONNECT = 0x1b
TERMINATOR = 0x1c
SYSTEM_BOOT_INITIATED = 0x1d
BOOT_ERROR = 0x1e
OS_BOOT = 0x1f
OS_CRITICAL_STOP = 0x20
SLOT_CONNECTOR = 0x21
SYSTEM_ACPI_POWER_STATE = 0x22
WATCHDOG_2 = 0x23
PLATFORM_ALERT = 0x24
ENTITY_PRESENT = 0x25
MONITOR_ASIC_IC = 0x26
LAN = 0x27
MANGEMENT_SUBSYSTEM_HEALTH = 0x28
BATTERY = 0x29
SESSION_AUDIT = 0x2a
VERSION_CHANGE = 0x2b
FRU_STATE = 0x2c
FRU_HOT_SWAP = 0xf0
IPMB_PHYSICAL_LINK = 0xf1
MODULE_HOT_SWAP = 0xf2
POWER_CHANNEL_NOTIFICATION = 0xf3
TELCO_ALARM_INPUT = 0xf4
"""
type = find_sensor_type(type)
index = int(index)
if index == 0:
raise RuntimeError('index must not be zero')
records = self._find_sel_records_by_sensor_type(type)
if len(records) == 0:
raise AssertionError(
'No SEL record found with sensor type "%s"' % type)
try:
if index > 0:
index -= 1
self._selected_sel_record = records[index]
except IndexError:
raise AssertionError(
'Only %d SEL records found with sensor type "%s"' %
(len(records), type))
def select_sel_record_by_sensor_number(self, number, index=1):
number = int_any_base(number)
index = int(index)
if index == 0:
raise RuntimeError('index must not be zero')
records = self._find_sel_records_by_sensor_number(number)
if len(records) == 0:
raise AssertionError(
'No SEL record found from sensor number "%d"' % number)
try:
if index > 0:
index -= 1
self._selected_sel_record = records[index]
except IndexError:
raise AssertionError(
'Only %d SEL records found from sensor number "%d"' %
(len(records), number))
def select_sel_record_by_record_id(self, record_id):
record_id = int_any_base(record_id)
for record in self._sel_records:
if record.record_id == record_id:
self._selected_sel_record = record
return
def selected_sel_records_event_data_should_be_equal(self, expected_value,
mask=0xffffff, msg=None):
"""Fails if the event data of the selected SEL record does not match
the given value.
Example:
| Select SEL Record By Sensor Type | 0xcf |
| Selected SEL Records Event Data Should Be Equal | 0xa10101 |
| Selected SEL Records Event Data Should Be Equal | 0x010000 | 0x0f0000 |
"""
expected_value = int_any_base(expected_value)
mask = int_any_base(mask)
record = self._selected_sel_record
# apply mask
expected_value = expected_value & mask
actual_value = (record.event_data[0] << 16
| record.event_data[1] << 8
| record.event_data[2])
actual_value = actual_value & mask
expected_value = '0x%x' % expected_value
actual_value = '0x%x' % actual_value
asserts.assert_equal(expected_value, actual_value, msg)
def selected_sel_records_event_direction_should_be(self,
expected_direction, msg=None):
"""Fails if the direction of the selected SEL record does not mathc
the given direction.
`expected_direction` can be: Assertion, Deassertion
"""
expected_direction = find_event_direction(expected_direction)
actual_direction = self._selected_sel_record.event_direction
asserts.assert_equal(expected_direction, actual_direction, msg)
def selected_sel_record_should_be_from_sensor_number(self, expected_number,
msg=None):
"""Fails if the sensor number of the selected SEL record does not match
the given sensor number.
"""
expected_number = int_any_base(expected_number)
actual_number = self._selected_sel_record.sensor_number
asserts.assert_equal(expected_number, actual_number, msg)
def selected_sel_record_should_be_from_sensor_type(self, expected_type, msg=None):
"""Fails if the sensor type of the selected SEL record does not match
the given sensor type.
"""
expected_type = find_sensor_type(expected_type)
actual_type = self._selected_sel_record.sensor_type
asserts.assert_equal(expected_type, actual_type, msg)
def get_sensor_number_from_selected_sel_record(self):
"""Returns the sensor number of the selected SEL record.
"""
return self._selected_sel_record.sensor_number
def get_selected_sel_entry_instance(self):
"""Returns the selected SEL entry instance
"""
return self._selected_sel_record
def set_event_receiver(self, ipmb_i2c_addr, lun=0):
"""
"""
ipmb_i2c_addr = int_any_base(ipmb_i2c_addr)
lun = int_any_base(lun)
self._ipmi.set_event_receiver(ipmb_i2c_addr, lun)
def get_event_receiver(self):
"""
"""
return self._ipmi.get_event_receiver() | /robotframework-ipmilibrary-0.3.5.tar.gz/robotframework-ipmilibrary-0.3.5/src/IpmiLibrary/sel.py | 0.758868 | 0.362743 | sel.py | pypi |
import array
from robot import utils
from robot.utils import asserts
from .mapping import *
from .utils import int_any_base
class Bmc:
def issue_bmc_cold_reset(self):
"""Sends a _bmc cold reset_ to the given controler.
"""
self._ipmi.cold_reset()
def get_bmc_device_id(self):
"""Sends a _bmc get device id_ command to the given controller.
"""
return self._ipmi.get_device_id()
def product_id_should_be(self, product_id):
"""Fails if the GetDeviceID command response does not contain
the given `device_id`.
"""
product_id = int_any_base(product_id)
device_id = self._ipmi.get_device_id()
asserts.assert_equal(device_id.product_id, product_id)
def manufacturer_id_should_be(self, manufacturer_id):
"""Fails if the GetDeviceID command response does not contain
the given `manufacturer_id`.
"""
manufacturer_id = int_any_base(manufacturer_id)
device_id = self._ipmi.get_device_id()
asserts.assert_equal(device_id.manufacturer_id, manufacturer_id)
def device_should_support(self, supported_function, msg=None):
"""The device can support the following functions:
'SENSOR', 'SDR_REPOSITORY', 'SEL', 'FRU_INVENTORY',
'IPMB_EVENT_RECEIVER', 'IPMB_EVENT_GENERATOR', 'BRIDGE', 'CHASSIS'.
"""
device_id = self._ipmi.get_device_id()
supports = device_id.supports_function(supported_function)
asserts.assert_equal(supports, True, msg=msg)
def device_should_not_support(self, supported_function, msg=None):
"""The device can support the following functions:
'SENSOR', 'SDR_REPOSITORY', 'SEL', 'FRU_INVENTORY',
'IPMB_EVENT_RECEIVER', 'IPMB_EVENT_GENERATOR', 'BRIDGE', 'CHASSIS'.
"""
device_id = self._ipmi.get_device_id()
supports = device_id.supports_function(supported_function)
asserts.assert_equal(supports, False, msg=msg)
def i2c_write_read(self, bus_type, bus_id, channel, address, count, *data):
"""Sends a _Master Write-Read_ command to the given bus.
"""
bus_type = int_any_base(bus_type)
bus_id = int_any_base(bus_id)
channel = int_any_base(channel)
address = int_any_base(address)
count = int_any_base(count)
if isinstance(data, basestring):
print('a', data)
data = [int_any_base(d) for d in data.split(' ')]
elif isinstance(data, tuple) or isinstance(data, list):
print('b', data)
data = [int_any_base(d) for d in data]
else:
print('c', data)
data = [int_any_base(data)]
data = array.array('B', data)
rsp = self._ipmi.i2c_write_read(bus_type, bus_id, channel, address,
count, data)
return rsp
def i2c_write(self, bus_type, bus_id, channel, address, *data):
"""Sends a _Master Write-Read_ command to the given bus.
"""
self.i2c_write_read(bus_type, bus_id, channel, address, 0, data)
def i2c_read(self, bus_type, bus_id, channel, address, count):
"""Sends a _Master Write-Read_ command to the given bus.
"""
return self.i2c_write_read(bus_type, bus_id, channel, address, count, None)
def start_watchdog_timer(self, value, action="Hard Reset",
timer_use="SMS OS"):
"""Sets and starts IPMI watchdog timer.
The watchdog is set to `value` and after that it is started.
The maximum value is 6553 seconds. `value` is given in Robot
Framework's time format (e.g. 1 minute 20 seconds) that is explained in
the User Guide.
`action` can be:
No Action, Hard Reset, Power Down, Power Cycle
`timer_use` can be:
OEM, SMS OS, OS Load, BIOS Post, BIOS Frb2
"""
timer_use = find_watchdog_timer_use(timer_use)
config = pyipmi.bmc.Watchdog()
config.timer_use = timer_use
config.dont_stop = 1
config.dont_log = 0
config.pre_timeout_interval = 0
config.pre_timeout_interrupt = 0
config.timer_use_expiration_flags = 0xff
# convert to 100ms
config.initial_countdown = int(utils.timestr_to_secs(value) * 10)
if (config.initial_countdown > 0xffff):
raise RuntimeError('Watchdog value out of range')
config.timeout_action = find_watchdog_action(action)
# set watchdog
self._ipmi.set_watchdog_timer(config)
# start watchdog
self._ipmi.reset_watchdog_timer()
def reset_watchdog_timer(self):
"""Send the Reset Watchdog Timer Command
"""
self._ipmi.reset_watchdog_timer()
def stop_watchdog_timer(self, msg=None):
"""Stops the IPMI wachtdog timer.
"""
config = pyipmi.bmc.Watchdog()
config.timer_use = pyipmi.bmc.Watchdog.TIMER_USE_OEM
config.dont_stop = 0
config.dont_log = 0
config.pre_timeout_interval = 0
config.pre_timeout_interrupt = 0
# 0xff means clear all expiration flags
config.timer_use_expiration_flags = 0xff
config.initial_countdown = 0
config.timeout_action = pyipmi.bmc.Watchdog.TIMEOUT_ACTION_NO_ACTION
self._ipmi.set_watchdog_timer(config)
def get_watchdog_timer_countdown_value(self):
"""Returns the present watchdog countdown value."""
config = self._ipmi.get_watchdog_timer()
return config.present_countdown
def watchdog_timeout_action_should_be(self, action, msg=None):
"""Fails if the IPMI Watchdog timeout action is not `action`
`action` can be:
No Action, Hard Reset, Power Down, Power Cycle
"""
action = find_watchdog_action(action)
config = self._ipmi.get_watchdog_timer()
asserts.assert_equal(action, config.timeout_action, msg)
def watchdog_timer_use_should_be(self, timer_use, msg=None):
"""Fails if the IPMI Watchdog timer use is not `timer_use`
`timer_use` can be:
OEM, SMS OS, OS Load, BIOS POST, BIOS FRB2
"""
timer_use = find_watchdog_timer_use(timer_use)
config = self._ipmi.get_watchdog_timer()
asserts.assert_equal(timer_use, config.timer_use, msg)
def watchdog_initial_timeout_value_should_be(self, value, msg=None):
"""
"""
value = int_any_base(value)
config = self._ipmi.get_watchdog_timer()
asserts.assert_equal(value, config.initial_countdown, msg)
def watchdog_should_be_started(self, msg=None):
config = self._ipmi.get_watchdog_timer()
asserts.assert_true(config.is_running, msg)
def watchdog_should_be_stopped(self, msg=None):
config = self._ipmi.get_watchdog_timer()
asserts.assert_false(config.is_running, msg) | /robotframework-ipmilibrary-0.3.5.tar.gz/robotframework-ipmilibrary-0.3.5/src/IpmiLibrary/bmc.py | 0.730482 | 0.291384 | bmc.py | pypi |
import array
from robot.utils import asserts
import pyipmi
from .utils import int_any_base
from .mapping import *
class Fru:
def _fru_data(self, fru_id):
if ('prefetched_fru_data' in self._cp
and fru_id in self._cp['prefetched_fru_data']):
return self._cp['prefetched_fru_data'][fru_id]
else:
return self._ipmi.read_fru_data(fru_id=fru_id)
def prefetch_fru_data(self, fru_id=0):
"""Fetches the FRU data of the given `fru_id`.
After prefetching the FRU data, all further operations will use this
cached data. Note that every connection has its own cache.
"""
fru_id = int(fru_id)
if 'prefetched_fru_data' not in self._cp:
self._cp['prefetched_fru_data'] = {}
self._cp['prefetched_fru_data'][fru_id] = \
self._ipmi.read_fru_data(fru_id=fru_id)
def get_fru_inventory_area_size(self, fru_id=0):
"""Returns the FRU Inventory Area Info size.
"""
fru_id = int_any_base(fru_id)
return self._ipmi.get_fru_inventory_area_info(fru_id)
def read_fru_data(self, offset, count, fru_id=0):
"""Reads data bytes from FRU data area.
`offset`
`count`
`fru_id`
"""
fru_id = int(fru_id)
offset = int_any_base(offset)
count = int_any_base(count)
data_string = self._ipmi.read_fru_data(offset, count, fru_id)
data = [ord(c) for c in data_string]
return data
def write_fru_data(self, offset, data, fru_id=0):
"""Writes data bytes to FRU data area.
`offset`
`data`
`fru_id`
"""
fru_id = int(fru_id)
offset = int_any_base(offset)
if isinstance(data, basestring):
data = [int_any_base(d) for d in data.split(' ')]
elif isinstance(data, list):
data = data
else:
data = [int_any_base(data)]
data = array.array('B', data)
self._ipmi.write_fru_data(data, offset, fru_id)
def fru_data_at_offset_should_be(self, offset, expected_data, fru_id=0,
msg=None):
"""Fails unless the FRU data contains the expected data at the given
offset."""
fru_id = int(fru_id)
offset = int_any_base(offset)
expected_data = [int_any_base(d) for d in expected_data.split(' ')]
data = self._fru_data(fru_id)[offset:offset+len(expected_data)]
# convert to common data structure
data = array.array('B', data)
expected_data = array.array('B', expected_data)
asserts.assert_equal(expected_data, data, msg)
def fru_data_tlv_at_offset_should_be(self, offset, expected_type,
expected_length, expected_data, fru_id=0, msg=None):
"""Fails unless the FRU data contains the TLV triplet at the given
offset."""
offset = int_any_base(offset)
expected_type = find_fru_field_type_code(expected_type)
expected_length = int_any_base(expected_length)
fru_id = int(fru_id)
# XXX: refactor this, pyipmi already has code for decoding TLVs
if expected_type == 0:
# binary
expected_data = [int_any_base(d) for d in expected_data.split(' ')]
else:
expected_data = str(expected_data)
expected_data = array.array('B', expected_data)
tlv = array.array('B',
self._fru_data(fru_id)[offset:offset+len(expected_data)+1])
asserts.assert_equal(expected_type, (tlv[0] >> 6) & 0x3, msg)
asserts.assert_equal(expected_length, tlv[0] & 0x3f, msg)
asserts.assert_equal(expected_data, tlv[1:], msg)
def fru_data_get_inventory(self, fru_id=0):
"""Return the Fru Data for the given fru_id.
"""
fru_id = int_any_base(fru_id)
return self._fru_data(fru_id)
def fru_data_get_board_manufacturer(self, fru_id=0):
"""Returns the Board Manufacturer.
"""
fru_id = int_any_base(fru_id)
fru = pyipmi.fru.FruInventory(self._fru_data(fru_id))
return str(fru.board_info_area.manufacturer)
def fru_data_board_manufacturer_should_be(self, expected_value, fru_id=0):
"""Fails if the Board Manufacturer is not as expected.
"""
value = self.fru_data_get_board_manufacturer(fru_id)
asserts.assert_equal(expected_value, value)
def fru_data_get_board_product_name(self, fru_id=0):
"""Returns the Board Product Name.
"""
fru_id = int_any_base(fru_id)
fru = pyipmi.fru.FruInventory(self._fru_data(fru_id))
return str(fru.board_info_area.product_name)
def fru_data_board_product_name_should_be(self, expected_value, fru_id=0):
"""Fails if the Board Product Name is not as expected.
"""
value = self.fru_data_get_board_product_name(fru_id)
asserts.assert_equal(expected_value, value)
def fru_data_get_board_serial_number(self, fru_id=0):
"""Returns the Board Serial Number.
"""
fru_id = int_any_base(fru_id)
fru = pyipmi.fru.FruInventory(self._fru_data(fru_id))
return str(fru.board_info_area.serial_number)
def fru_data_board_serial_number_should_be(self, expected_value, fru_id=0):
"""Fails if the Board Serial Number is not as expected.
"""
value = self.fru_data_get_board_serial_number(fru_id)
asserts.assert_equal(expected_value, value)
def fru_data_get_board_part_number(self, fru_id=0):
"""Returns the Board Part Number.
"""
fru_id = int_any_base(fru_id)
fru = pyipmi.fru.FruInventory(self._fru_data(fru_id))
return str(fru.board_info_area.part_number)
def fru_data_board_part_number_should_be(self, expected_value, fru_id=0):
"""Fails if the Board Part Number is not as expected.
"""
value = self.fru_data_get_board_part_number(fru_id)
asserts.assert_equal(expected_value, value)
def fru_data_get_product_manufacturer(self, fru_id=0):
"""Returns the Product Manufacturer.
"""
fru_id = int_any_base(fru_id)
fru = pyipmi.fru.FruInventory(self._fru_data(fru_id))
return str(fru.product_info_area.manufacturer)
def fru_data_product_manufacturer_should_be(self, expected_value, fru_id=0):
"""Fails if the Product Manufacturer is not as expected.
"""
value = self.fru_data_get_product_manufacturer(fru_id)
asserts.assert_equal(expected_value, value)
def fru_data_get_product_name(self, fru_id=0):
"""Returns the Product Name.
"""
fru_id = int_any_base(fru_id)
fru = pyipmi.fru.FruInventory(self._fru_data(fru_id))
return str(fru.product_info_area.name)
def fru_data_product_name_should_be(self, expected_value, fru_id=0):
"""Fails if the Product Name is not as expected.
"""
value = self.fru_data_get_product_name(fru_id)
asserts.assert_equal(expected_value, value)
def fru_data_get_product_part_number(self, fru_id=0):
"""Returns the Product Part Number.
"""
fru_id = int_any_base(fru_id)
fru = pyipmi.fru.FruInventory(self._fru_data(fru_id))
return str(fru.product_info_area.part_number)
def fru_data_product_part_number_should_be(self, expected_value, fru_id=0):
"""Fails if the Product Part Number is not as expected.
"""
value = self.fru_data_get_product_part_number(fru_id)
asserts.assert_equal(expected_value, value)
def fru_data_get_picmg_multirecord_from_type(self, record_type, index=0, fru_id=0):
"""Returns the PICMG mulirecord specified by type.
supported types are:
`record_type`: Power Module Capability
`index` specifies the index of the requested record.
`fru_id`
"""
record_type = find_picmg_multirecord_id(record_type)
index = int_any_base(index)
fru_id = int_any_base(fru_id)
fru = pyipmi.fru.FruInventory(self._fru_data(fru_id))
found_num = 0
for record in fru.multirecord_area.records:
if ((record.record_type_id, record.picmg_record_type_id) ==
(pyipmi.fru.FruDataMultiRecord.TYPE_OEM_PICMG, record_type)):
if found_num == index:
return record
raise AssertionError('Record type %s index=%s not found for fru_id=%s'
% (record_type, index, fru_id)) | /robotframework-ipmilibrary-0.3.5.tar.gz/robotframework-ipmilibrary-0.3.5/src/IpmiLibrary/fru.py | 0.649912 | 0.416915 | fru.py | pypi |
from robot.utils import asserts
from robot import utils
from pyipmi.errors import DataNotFound, CompletionCodeError
from .utils import int_any_base
from .mapping import *
class Hpm:
def hpm_start_firmware_upload(self, file_path, filename):
"""*DEPRECATED*"""
cmd = 'hpm upgrade %s/%s all' % (file_path, filename)
self._run_ipmitool_checked(cmd)
def hpm_start_firmware_upload_and_activate(self, file_path, filename):
"""*DEPRECATED*"""
cmd = 'hpm upgrade %s/%s activate all' % (file_path, filename)
self._run_ipmitool_checked(cmd)
def hpm_start_firmware_rollback(self):
"""*DEPRECATED*"""
cmd = 'hpm rollback'
self._run_ipmitool_checked(cmd)
def hpm_install_component_from_file(self, filename, component_name):
""" Install the specified component
"""
id = self._ipmi.find_component_id_by_descriptor(component_name)
self._ipmi.install_component_from_file(filename, id)
def hpm_open_upgrade_image(self, filename):
"""
"""
image = self._ipmi.open_upgrade_image(filename)
return image
def hpm_image_header_value_should_be(self, filename, field, expected_value):
"""
"""
image = self._ipmi.load_upgrade_image(filename)
value = getattr(image.header, field)
asserts.assert_equal(expected_value, value)
def hpm_get_image_upgrade_version(self, filename):
version = self._ipmi.get_upgrade_version_from_file(filename)
return version.version_to_string()
def hpm_get_target_upgrade_capabilities(self):
"""
"""
return self._ipmi.get_target_upgrade_capabilities()
def hpm_get_component_property(self, component_name, property):
"""Return the component property as string.
`component_name` is the description name of the component.
e.g.: "IPMC", "MMC", "Boot block"
`property` can be the following:
"general properties",
"current version",
"description string",
"rollback version",
"deferred version"
"""
property_id = find_hpm_component_property(property)
comp_id = self._ipmi.find_component_id_by_descriptor(component_name)
if comp_id is None:
raise DataNotFound('no component with name %s found' % component_name)
property = self._ipmi.get_component_property(comp_id, property_id)
if property_id == pyipmi.hpm.PROPERTY_GENERAL_PROPERTIES:
return property.general
elif property_id == pyipmi.hpm.PROPERTY_CURRENT_VERSION:
return property.version.version_to_string()
elif property_id == pyipmi.hpm.PROPERTY_DESCRIPTION_STRING:
return property.description
elif property_id == pyipmi.hpm.PROPERTY_ROLLBACK_VERSION:
return property.version.version_to_string()
elif property_id == pyipmi.hpm.PROPERTY_DEFERRED_VERSION:
return property.version.version_to_string()
def hpm_get_upgrade_status(self):
"""
"""
return self._ipmi.get_upgrade_status()
def hpm_activate_firmware(self, override=None):
"""
"""
return self._ipmi.activate_firmware_and_wait(timeout=10)
def hpm_abort_firmware_upgrade(self):
"""
"""
return self._ipmi.abort_firmware_upgrade()
def hpm_initiate_upgrade_action(self, component_name, action,
expected_cc=pyipmi.msgs.constants.CC_OK):
"""
component_name: Other than the raw command here is only one
component allowed. e.g. MMC, IPMC,
action:
BACKUP_COMPONENT,
PREPARE_COMPONENT,
UPLOAD_FOR_UPGRADE,
UPLOAD_FOR_COMPARE
"""
id = self._ipmi.find_component_id_by_descriptor(component_name)
action = find_hpm_upgrade_action(action)
expected_cc = int_any_base(expected_cc)
try:
self._ipmi.initiate_upgrade_action(1 << id, action)
except CompletionCodeError as e:
if e.cc == expected_cc:
pass
else:
raise CompletionCodeError(e.cc)
def hpm_upload_firmware_binary(self, binary):
self._ipmi.upload_binary(binary)
def hpm_finish_firmware_upload(self, component_name, size,
expected_cc=pyipmi.msgs.constants.CC_OK):
size = int_any_base(size)
id = self._ipmi.find_component_id_by_descriptor(component_name)
expected_cc = int_any_base(expected_cc)
if id is None:
raise AssertionError('component_name=%s not found' % (component_name))
try:
self._ipmi.finish_firmware_upload(id, size)
except CompletionCodeError as e:
if e.cc == expected_cc:
pass
else:
raise CompletionCodeError(e.cc)
def hpm_wait_until_long_duration_command_is_finished(self, cmd,
timeout, interval):
cmd = int_any_base(cmd)
timeout = utils.timestr_to_secs(timeout)
interval = utils.timestr_to_secs(interval)
self._ipmi.wait_for_long_duration_command(cmd, timeout, interval)
def hpm_query_selftest_results(self):
return self._ipmi.query_selftest_results()
def hpm_query_rollback_status(self):
return self._ipmi.query_rollback_status()
def hpm_initiate_manual_rollback(self):
return self._ipmi.initiate_manual_rollback_and_wait() | /robotframework-ipmilibrary-0.3.5.tar.gz/robotframework-ipmilibrary-0.3.5/src/IpmiLibrary/hpm.py | 0.527803 | 0.237664 | hpm.py | pypi |
from robot import utils
from robot.utils import asserts
from robot.utils.connectioncache import ConnectionCache
from robot.output import LOGGER
from robot.output.loggerhelper import Message
from .utils import int_any_base
from .mapping import *
class Picmg:
def get_picmg_properties(self):
return self._ipmi.get_picmg_properties()
def activate_fru(self, fruid=0):
"""Sends a _Set FRU Activation_ command to the given fru."""
fruid = int(fruid)
self._ipmi.set_fru_activation(fruid)
def deactivate_fru(self, fruid=0):
"""Sends a _Set FRU Deactivation_ command to the given fru."""
fruid = int(fruid)
self._ipmi.set_fru_deactivation(fruid)
def clear_activation_lock_bit(self, fruid=0):
"""Clears the activation lock bit for to the given FRU.
"""
fruid = int(fruid)
self._ipmi.clear_fru_activation_lock(fruid)
def clear_deactivation_lock_bit(self, fruid=0):
"""Clears the deactivation lock bit for to the given FRU.
"""
fruid = int(fruid)
self._ipmi.clear_fru_deactivation_lock(fruid)
def issue_frucontrol_cold_reset(self, fruid=0):
"""Sends a _frucontrol cold reset_ to the given FRU.
"""
fruid = int(fruid)
self._ipmi.fru_control_cold_reset(fruid)
def issue_frucontrol_diagnostic_interrupt(self, fruid=0):
"""Sends a _frucontrol diagnostic interrupt_ to the given FRU.
"""
fruid = int(fruid)
self._ipmi.fru_control_diagnostic_interrupt(fruid)
def get_fru_led_state(self, fru_id, led_id):
"""Returns the FRU LED state.
"""
fru_id = int(fru_id)
led_id = int(led_id)
self._cp['led_state'] = self._ipmi.get_led_state(fru_id, led_id)
self._debug('LED state is %s' % self._cp['led_state'])
def led_color_should_be(self, expected_color, msg=None, values=True):
"""Fails if Picmg FRU Led color is not as given value.
`expected_color` value can be:
Blue, Red, Green, Amber, Orange, White
"""
expected_color = find_picmg_led_color(expected_color)
if self._cp['led_state'].override_enabled:
actual_color = self._cp['led_state'].override_color
else:
actual_color = self._cp['led_state'].local_color
asserts.assert_equal(expected_color, actual_color, msg, values)
def led_function_should_be(self, expected_function, msg=None, values=True):
"""Fails if Picmg FRU Led function is not as given value.
`expected_function` value can be:
ON, OFF
"""
expected_function = find_picmg_led_function(expected_function)
if self._cp['led_state'].override_enabled:
actual_function = self._cp['led_state'].override_function
else:
actual_function = self._cp['led_state'].local_function
asserts.assert_equal(expected_function, actual_function, msg,
values)
def led_state_should_be(self, expected_state, msg=None, values=True):
"""Fails if Picmg FRU Led State is not as given value.
`expecte_state` value can be:
Local Control, Override, Lamp Test
"""
ac = self._active_connection
expected_state = find_picmg_led_function(expected_state)
if ac._led.override_enabled:
pass
elif ac._led.override_enabled:
function = ac._led.override_function
else:
function = ac._led.local_function
asserts.assert_equal(expected_function, function, msg, values)
def set_fru_led_state(self, fruid, ledid, state, color):
"""Set the FRU LED State.
"""
fruid = int(fruid)
ledid = int(ledid)
state = find_picmg_led_function(state)
color = find_picmg_led_color(color)
led = pyipmi.picmg.LedState()
led.fru_id = fruid
led.led_id = ledid
led.override_color = color
led.override_function = state
self._ipmi.set_led_state(led)
def set_port_state(self, interface, channel, flags, link_type,
link_type_ext, state, link_class=0):
"""Sends the "PICMG Set Portstate" command.
`interface` is one of the following interface types: BASE, FABRIC,
UPDATE_CHANNEL.
`channel` is the interface channel ID. `flags` is the lane mask and one
of the following values: LANE0, LANE0123.
`link_type` is one of the following values: BASE, ETHERNET_FABRIC,
INFINIBAND_FABRIC, STARFABRIC_FABRIC, PCIEXPRESS_FABRIC.
`link_class` is the channel signaling class capability and hast to be
one of the following values: CLASS_BASIC, CLASS_10_3125GBD.
`link_type_ext` is one of the following values: BASE0, BASE1,
ETHERNET_FIX1000_BX, ETHERNET_FIX10GB_X4, ETHERNET_FCPI,
ETHERNET_FIX1000KX_10G_KR, ETHERNET_FIX10GK_X4, ETHERNET_FIX40G_KR4
`state` is the link state and has to be one of the following values:
ENABLE, DISABLE.
Note: Link Grouping ID is not supported yet
Example:
| Set Port State | BASE | 1 | LANE0 | BASE | BASE0 | ENABLE
"""
link_descr = pyipmi.picmg.LinkDescriptor()
link_descr.interface = find_picmg_interface_type(interface)
link_descr.channel = int(channel)
link_descr.link_flags = find_picmg_link_flags(flags)
link_descr.type = find_picmg_link_type(link_type)
link_descr.sig_class = find_picmg_link_signaling_class(link_class)
link_descr.extension = find_picmg_link_type_extension(link_type_ext)
link_descr.grouping_id = 0
state = find_picmg_link_state(state)
self._ipmi.set_port_state(link_descr, state)
def get_port_state(self, interface, channel):
"""Returns the link and state of the interface link.
Example:
| ${link} | ${state}= | Get Port State | FABRIC | 1 |
"""
interface = find_picmg_interface_type(interface)
channel = int(channel)
return self._ipmi.get_port_state(channel, interface)
def port_state_should_be(self, interface, channel, expected_state):
"""Fails if the returned port state is not equal the expected.
"""
interface = find_picmg_interface_type(interface)
channel = int(channel)
expected_state = find_picmg_link_state(expected_state)
(link, state) = self._ipmi.get_port_state(channel, interface)
asserts.assert_equal(expected_state, state)
def link_flags_should_be(self, interface, channel, expected_flags):
"""Fails if the link flags does not match the expected flags.
"""
interface = find_picmg_interface_type(interface)
channel = int(channel)
expected_flags = find_picmg_link_flags(expected_flags)
(link, state) = self._ipmi.get_port_state(channel, interface)
asserts.assert_equal(expected_flags, link.link_flags)
def link_type_should_be(self, interface, channel, expected_type,
expected_ext):
"""Fails if the link type is not as the expected.
"""
interface = find_picmg_interface_type(interface)
channel = int(channel)
expected_type = find_picmg_link_type(expected_type)
expected_ext = find_picmg_link_type_extension(expected_ext)
(link, state) = self._ipmi.get_port_state(channel, interface)
asserts.assert_equal(expected_type, link.type)
asserts.assert_equal(expected_ext, link.extension)
def link_signaling_class_should_be(self, interface, channel,
expected_class):
"""Fails if the link type is not as the expected.
"""
interface = find_picmg_interface_type(interface)
channel = int(channel)
expected_class = find_picmg_link_signaling_class(expected_class)
(link, state) = self._ipmi.get_port_state(channel, interface)
asserts.assert_equal(expected_class, link.sig_class)
def get_power_level(self, fruid, power_type, offset):
"""return the specified power level for the fru
`fruid`
`power_type`
`offset`
"""
fruid = int_any_base(fruid)
power_type = int_any_base(power_type)
offset = int_any_base(offset)
pwr = self._ipmi.get_power_level(fruid, power_type)
return pwr.power_levels[offset]
def get_fan_speed_properties(self, fruid):
"""
"""
fruid = int_any_base(fruid)
return self._ipmi.get_fan_speed_properties(fruid)
def get_fan_override_level(self, fruid):
"""
"""
fruid = int_any_base(fruid)
(override_level, local_level) = self._ipmi.get_fan_level(fruid)
return override_level
def set_signaling_class(self, interface, channel, signaling_class):
"""*DEPRECATED* Sends the `Set Channel Siganling Class` command.
`interface` the interface type (BASE, FABRIC, UPDATE_CHANNEL)
`channel` is the interface channel ID.
`class` is the channel signaling class capability and hast to be one of
the following values: CLASS_BASIC, CLASS_10_3125GBD.
"""
interface = find_picmg_interface_type(interface)
channel = int(channel)
signaling_class = find_picmg_signaling_class(signaling_class)
self._ipmi.set_signaling_class(interface, channel, signaling_class)
def get_signaling_class(self, interface, channel):
"""*DEPRECATED* Sends `Get Channel Signaling Class` command
"""
interface = find_picmg_interface_type(interface)
channel = int(channel)
self._ipmi.get_signaling_class(interfac, channel)
def get_pm_global_status(self):
"""
"""
return self._ipmi.get_pm_global_status()
def get_pm_channel_status(self, channel_number):
"""
"""
channel_number = int_any_base(channel_number)
return self._ipmi.get_power_channel_status(channel_number)
def get_hotswap_sdr(self, entity):
"""Get the entities hotswap sensor SDR
Entity can be specified by 'entitiy_id:entity_instance'
Valid entitiy_id:
Power Module= 0x0a
Cooling Unit = 0x1e
Picmg Front Board= 0xa0
Picmg Rear Transition Module= 0xc0
Picmg Advanced MC = 0xc1
Picmg Microtca Carrier Hub = 0xc2
Picmg Shelf Management Controller = 0xf0
Picmg FIlteration Unit = 0xf1
Picmg Shelf FRU Information = 0xf2
"""
(entity_id, entity_instance) = entity.split(':')
entity_id = find_entity_type_id(entity_id)
entity_instance = int_any_base(entity_instance)
for sdr in self._sdr_entries():
if (sdr.type is not pyipmi.sdr.SDR_TYPE_FULL_SENSOR_RECORD and \
sdr.type is not pyipmi.sdr.SDR_TYPE_COMPACT_SENSOR_RECORD):
continue
if sdr.sensor_type_code != \
pyipmi.sensor.SENSOR_TYPE_FRU_HOT_SWAP:
continue
if sdr.entity_id == entity_id and \
sdr.entity_instance == entity_instance:
return sdr
raise AssertionError('Hotswap Sensor for entity %s %s not found' \
% (entity_id, entity_instance))
def prefetch_hotswap_sdr(self, entity):
if 'prefetched_hotswap_sdr' not in self._cp:
self._cp['prefetched_hotswap_sdr'] = {}
sdr = self.get_hotswap_sdr(entity)
self._cp['prefetched_hotswap_sdr'][sdr.device_id_string] = sdr
def prefetch_all_hotswap_sdr(self):
"""Scan all SDRs from sdr list for hotswap sensors and prefetch."""
if 'prefetched_hotswap_sdr' not in self._cp:
self._cp['prefetched_hotswap_sdr'] = {}
for sdr in self._sdr_entries():
if (sdr.type is not pyipmi.sdr.SDR_TYPE_FULL_SENSOR_RECORD and \
sdr.type is not pyipmi.sdr.SDR_TYPE_COMPACT_SENSOR_RECORD):
continue
if sdr.sensor_type_code != \
pyipmi.sensor.SENSOR_TYPE_FRU_HOT_SWAP:
continue
self._info('HS SDR %s found' % sdr.device_id_string)
self._cp['prefetched_hotswap_sdr'][sdr.device_id_string] = sdr
def _find_hotswap_sdr_by_entity(self, entity):
(entity_id, entity_instance) = entity.split(':')
entity_id = find_entity_type_id(entity_id)
entity_instance = int_any_base(entity_instance)
if 'prefetched_hotswap_sdr' in self._cp:
for name in self._cp['prefetched_hotswap_sdr']:
hs_sdr = self._cp['prefetched_hotswap_sdr'][name]
if hs_sdr.entity_id == entity_id and \
hs_sdr.entity_instance == entity_instance:
return hs_sdr
else:
self._info('HS SDR not found')
def _get_hotswap_state(self, sdr):
state = self.get_sensor_state(None, sdr)&0xff
if state & state-1 is not 0:
raise AssertionError('sensor reports invalid state 0x%02x'
% (state))
for s in xrange(7, -1, -1):
if state & (1<<s):
return s
def get_hotswap_state(self, entity):
sdr = self._find_hotswap_sdr_by_entity(entity)
return self._get_hotswap_state(sdr) | /robotframework-ipmilibrary-0.3.5.tar.gz/robotframework-ipmilibrary-0.3.5/src/IpmiLibrary/picmg.py | 0.790813 | 0.320795 | picmg.py | pypi |
import io
import json
import os.path
import jsonschema
from copy import deepcopy
from robot.api import logger
from robot.utils.asserts import fail
from jsonpath_ng import Index, Fields
from jsonpath_ng.ext import parse as parse_ng
from jsonpath_ng.exceptions import JsonPathParserError
__author__ = "Traitanit Huangsri"
__email__ = "traitanit.hua@gmail.com"
class JSONLibrary:
"""JSONLibrary is a robotframework testlibrary for manipulating JSON object (dictionary)
You can get, add, update and delete your json object using JSONPath.
== JSONPath Syntax ==
| JSONPath | Description |
| $ | the root object/element |
| @ | the current object/element |
| . or [] | child operator |
| .. | recursive descent. JSONPath borrows this syntax from E4X |
| * | wildcard. All objects/element regardless their names. |
| [] | subscript operator. XPath uses it to iterate over element collections and for predicates.
In Javascript and JSON it is the native array operator. |
| [,] | Union operator in XPath results in a combination of node sets. JSONPath allows alternate
names or array indices as a set. |
| [start:end:step] | array slice operator borrowed from ES4 |
| ?() | applies a filter (script) expression. |
| () | script expression, using the underlying script engine. |
== *** Known issue *** ==
If there is a space in JSONPath expression, the module used by this library will throw an exception.
Therefore, please avoid the space in JSONPath expression if possible.
*Example:*
| JSONPath | Exception? |
| $.[?(@.id == 1)] | Y |
| $.[?(@.id==1)] | N |
| $.[?(@.name=='test 123')] | N |
== Example Test Cases ==
| *** Settings *** |
| Library | JSONLibrary |
| |
| *** Test Cases *** |
| TestManipulatingJSON |
| ${json_object}= | Load JSON From File | example.json |
| ${object_to_add}= | Create Dictionary | country=Thailand |
| ${json_object}= | Add Object To Json | ${json_object} | $..address | ${object_to_add} |
| ${value}= | Get Value From Json | ${json_object} | $..country |
| Should Be Equal As Strings | ${value[0]} | Thailand |
"""
ROBOT_LIBRARY_SCOPE = "GLOBAL"
ROBOT_LIBRARY_DOC_FORMAT = "ROBOT"
ROBOT_EXIT_ON_FAILURE = True
@staticmethod
def _parse(json_path):
try:
return parse_ng(json_path)
except JsonPathParserError as e:
fail(
"Parser failed to understand syntax '{}'. error message: "
"\n{}\n\nYou may raise an issue on https://github.com/h2non/jsonpath-ng".format(
json_path, e
)
)
@staticmethod
def load_json_from_file(file_name, encoding=None):
"""Load JSON from file.
Return json as a dictionary object.
Arguments:
- file_name: absolute json file name
- encoding: encoding of the file
Return json object (list or dictionary)
Examples:
| ${result}= | Load Json From File | /path/to/file.json |
"""
logger.debug("Check if file exists")
if os.path.isfile(file_name) is False:
logger.error("JSON file: " + file_name + " not found")
raise IOError
with io.open(file_name, mode="r", encoding=encoding) as json_file:
data = json.load(json_file)
return data
def add_object_to_json(self, json_object, json_path, object_to_add):
"""Add an dictionary or list object to json object using json_path
Arguments:
- json_object: json as a dictionary object.
- json_path: jsonpath expression
- object_to_add: dictionary or list object to add to json_object which is matched by json_path
Return new json object.
Examples:
| ${dict}= | Create Dictionary | latitude=13.1234 | longitude=130.1234 |
| ${json}= | Add Object To Json | ${json} | $..address | ${dict} |
"""
json_path_expr = self._parse(json_path)
json_object_cpy = deepcopy(json_object)
object_to_add_cpy = deepcopy(object_to_add)
rv = json_path_expr.find(json_object_cpy)
if len(rv):
for match in rv:
if type(match.value) is dict:
match.value.update(object_to_add_cpy)
if type(match.value) is list:
match.value.append(object_to_add_cpy)
else:
parent_json_path = ".".join(json_path.split(".")[:-1])
child_name = json_path.split(".")[-1]
json_path_expr = self._parse(parent_json_path)
rv = json_path_expr.find(json_object_cpy)
if len(rv):
for match in rv:
match.value.update({child_name: object_to_add_cpy})
else:
fail(f"no match found for parent {parent_json_path}")
return json_object_cpy
def get_value_from_json(self, json_object, json_path, fail_on_empty=False):
"""Get Value From JSON using JSONPath
Arguments:
- json_object: json as a dictionary object.
- json_path: jsonpath expression
- fail_on_empty: fail the testcases if nothing is returned
Return array of values
Examples:
| ${values}= | Get Value From Json | ${json} | $..phone_number |
| ${values}= | Get Value From Json | ${json} | $..missing | fail_on_empty=${True} |
"""
json_path_expr = self._parse(json_path)
rv = json_path_expr.find(json_object)
# optional: make the keyword fails if nothing was return
if fail_on_empty is True and (rv is None or len(rv) == 0):
fail(f"Get Value From Json keyword failed to find a value for {json_path}")
return [match.value for match in rv]
def update_value_to_json(self, json_object, json_path, new_value):
"""Update value to JSON using JSONPath
Arguments:
- json_object: json as a dictionary object.
- json_path: jsonpath expression
- new_value: value to update
Return new json_object
Examples:
| ${json_object}= | Update Value To Json | ${json} | $..address.streetAddress | Ratchadapisek Road |
"""
json_path_expr = self._parse(json_path)
json_object_cpy = deepcopy(json_object)
for match in json_path_expr.find(json_object_cpy):
path = match.path
if isinstance(path, Index):
match.context.value[match.path.index] = new_value
elif isinstance(path, Fields):
match.context.value[match.path.fields[0]] = new_value
return json_object_cpy
def delete_object_from_json(self, json_object, json_path):
"""Delete Object From JSON using json_path
Arguments:
- json_object: json as a dictionary object.
- json_path: jsonpath expression
Return new json_object
Examples:
| ${json_object}= | Delete Object From Json | ${json} | $..address.streetAddress |
"""
json_path_expr = self._parse(json_path)
json_object_cpy = deepcopy(json_object)
for match in reversed(json_path_expr.find(json_object_cpy)):
path = match.path
if isinstance(path, Index):
del match.context.value[match.path.index]
elif isinstance(path, Fields):
del match.context.value[match.path.fields[0]]
return json_object_cpy
@staticmethod
def convert_json_to_string(json_object):
"""Convert JSON object to string
Arguments:
- json_object: json as a dictionary object.
Return new json_string
Examples:
| ${json_str}= | Convert JSON To String | ${json_obj} |
"""
return json.dumps(json_object)
@staticmethod
def convert_string_to_json(json_string):
"""Convert String to JSON object
Arguments:
- json_string: JSON string
Return new json_object
Examples:
| ${json_object}= | Convert String to JSON | ${json_string} |
"""
return json.loads(json_string)
def dump_json_to_file(self, dest_file, json_object, encoding=None):
"""Dump JSON to file
Arguments:
- dest_file: destination file
- json_object: json as a dictionary object.
Export the JSON object to a file
Examples:
| Dump JSON To File | ${OUTPUT_DIR)${/}output.json | ${json} |
"""
json_str = self.convert_json_to_string(json_object)
with open(dest_file, "w", encoding=encoding) as json_file:
json_file.write(json_str)
return str(dest_file)
def should_have_value_in_json(self, json_object, json_path):
"""Should Have Value In JSON using JSONPath
Arguments:
- json_object: json as a dictionary object.
- json_path: jsonpath expression
Fail if no value is found
Examples:
| Should Have Value In Json | ${json} | $..id_card_number |
"""
try:
self.get_value_from_json(json_object, json_path, fail_on_empty=True)
except AssertionError:
fail(f"No value found for path {json_path}")
def should_not_have_value_in_json(self, json_object, json_path):
"""Should Not Have Value In JSON using JSONPath
Arguments:
- json_object: json as a dictionary object.
- json_path: jsonpath expression
Fail if at least one value is found
Examples:
| Should Not Have Value In Json | ${json} | $..id_card_number |
"""
try:
rv = self.get_value_from_json(json_object, json_path, fail_on_empty=True)
except AssertionError:
pass
else:
fail(f"Match found for parent {json_path}: {rv}")
def validate_json_by_schema_file(
self, json_object, path_to_schema, encoding=None
) -> None:
"""Validate json object by json schema file.
Arguments:
- json_object: json as a dictionary object.
- json_path: path to file with json schema
Fail if json object does not match the schema
Examples:
| Simple | Validate Json By Schema File | {"foo":bar} | ${CURDIR}${/}schema.json |
"""
with open(path_to_schema, encoding=encoding) as f:
self.validate_json_by_schema(json_object, json.load(f))
@staticmethod
def validate_json_by_schema(json_object, schema) -> None:
"""Validate json object by json schema.
Arguments:
- json_object: json as a dictionary object.
- schema: schema as a dictionary object.
Fail if json object does not match the schema
Examples:
| Simple | Validate Json By Schema | {"foo":bar} | {"$schema": "https://schema", "type": "object"} |
"""
try:
jsonschema.validate(json_object, schema)
except jsonschema.ValidationError as e:
fail(f"Json does not match the schema: {e.schema}")
except jsonschema.SchemaError as e:
fail(f"Json schema error: {e}") | /robotframework-jsonlibrary-0.5.tar.gz/robotframework-jsonlibrary-0.5/JSONLibrary/jsonlibrary.py | 0.600891 | 0.38659 | jsonlibrary.py | pypi |
import json
from types import ModuleType
from importlib import import_module
from JsonQuery.queries import JmesPath, JsonPathNg, Querable
jsonParserModule = {
"jmespath": JmesPath,
"jsonpath_ng.ext": JsonPathNg,
"jsonpath_ng": JsonPathNg,
}
class JsonQuery:
ROBOT_LIBRARY_SCOPE = "SUITE"
def __init__(self, query_module: str = "jmespath") -> None:
"""Initialize library with a module name used to parse syntax for a specific implementation
Example:
| =Settings= |
| Libraray | JsonQuery | jmespath | # initialize library with jmespath library |
| Libraray | JsonQuery | jsonpath_ng.ext | # initialize library with jsonpath_ng.ext (extended version which handles filtering, etc.) |
"""
self.imported_module: ModuleType = import_module(query_module)
self.qmodule: Querable = jsonParserModule[query_module](self.imported_module)
def get_query_module(self) -> str:
"""Get module name loaded in initialization
Example:
| =Settings= |
| Library | JsonQuery | `jmespath` |
| |
| =Keywords= |
| ... |
| ${module_name} | Get Module Name |
| Should Be Equal | `jmespath` | ${module_name} |
"""
return f"{self.imported_module.__name__}"
def read_json_file(self, file_path: str) -> dict:
"""Read json file using standard json module and return data in a dict format
Example:
| ... |
| =Test Cases= |
| Read Sample File |
| | ${content} | Read Json File | /path/to/the/json/file.json \ \ \ # unix-like path |
| ... |
"""
with open(file_path, "r") as fl:
content = json.load(fl)
return content
def query_json(self, document: dict, expression: str) -> dict:
"""Query json document/dictionary with a given expression using module of choice
| ... |
| =Keywords= |
| ... |
| ${content} | Read Json File | /path/to/file.json |
| ${query_result} | Query Json | ${content} | locations[?state == 'WA'].name | sort(@) | {WashingtonCities: join(', ', @)} | #jmespath syntax |
"""
result = self.qmodule.search(expression, document)
return result | /robotframework_jsonquery-1.0.3.tar.gz/robotframework_jsonquery-1.0.3/JsonQuery/JsonQuery.py | 0.659186 | 0.222299 | JsonQuery.py | pypi |
import json
import pprint
from typing import Any, Dict, List, Optional, Union
import jsonschema
import jsonselect
import objectpath
from jsonpath_rw_ext import parse
from jsonpath_rw.jsonpath import DatumInContext, Fields, Index, JSONPath
JsonType = Union[Dict[str, Any], List[Dict[str, Any]]] # noqa: E993
class JsonValidator(object):
"""
Library for JSON validation.
Based on: JSONSchema, JSONPath, JSONSelect.
== Additional Information ==
- [ http://json-schema.org/ | Json Schema ]
- [ http://www.jsonschema.net/ | Jsonschema generator ]
- [ http://goessner.net/articles/JsonPath/ | JSONPath by Stefan Goessner ]
- [ http://jsonpath.curiousconcept.com/ | JSONPath Tester ]
- [ http://jsonselect.org/ | JSONSelect]
- [ http://jsonselect.curiousconcept.com/ | JSONSelect Tester]
== Dependencies ==
| jsonschema | https://pypi.python.org/pypi/jsonschema |
| jsonpath-rw-ext | https://pypi.python.org/pypi/jsonpath-rw-ext |
| objectpath | https://pypi.python.org/pypi/objectpath/ |
| pyjsonselect | https://pypi.python.org/pypi/pyjsonselect |
== Example of use ==
json_example.json
| { "store": {
| "book": [
| { "category": "reference",
| "author": "Nigel Rees",
| "title": "Sayings of the Century",
| "price": 8.95
| },
| { "category": "fiction",
| "author": "Evelyn Waugh",
| "title": "Sword of Honour",
| "price": 12.99
| },
| { "category": "fiction",
| "author": "Herman Melville",
| "title": "Moby Dick",
| "isbn": "0-553-21311-3",
| "price": 8.99
| },
| { "category": "fiction",
| "author": "J. R. R. Tolkien",
| "title": "The Lord of the Rings",
| "isbn": "0-395-19395-8",
| "price": 22.99
| }
| ],
| "bicycle": {
| "color": "red",
| "price": 19.95
| }
| }
| }
| *Settings* | *Value* |
| Library | JsonValidator |
| Library | OperatingSystem |
| *Test Cases* | *Action* | *Argument* | *Argument* |
| Check element | ${json_example}= | OperatingSystem.Get File | ${CURDIR}${/}json_example.json |
| | Element should exist | ${json_example} | .author:contains("Evelyn Waugh") |
"""
ROBOT_LIBRARY_SCOPE = 'GLOBAL'
def __init__(self) -> None:
""" Initialization. """
self._parser_cache: Dict[str, JSONPath] = {}
def _parse(self, expr: str) -> JSONPath:
"""
Parse JSONPath expression and store it into the cache.
*Args:*\n
_expr_ - JSONPath expression;
*Returns:*\n
Parsed JSONPath expression.
"""
if expr not in self._parser_cache:
self._parser_cache[expr] = parse(expr)
return self._parser_cache[expr]
def _validate_json(self, checked_json: JsonType, schema: Dict[str, Any]) -> None:
""" Validate JSON according to JSONSchema
*Args*:\n
_checked_json_: validated JSON.
_schema_: schema that used for validation.
"""
try:
jsonschema.validate(checked_json, schema)
except jsonschema.ValidationError as e:
print("""Failed validating '{0}'
in schema {1}:
{2}
On instance {3}:
{4}""".format(e.validator,
list(e.relative_schema_path)[:-1], pprint.pformat(e.schema),
"[%s]" % "][".join(repr(index) for index in e.absolute_path),
pprint.pformat(e.instance).encode('utf-8')))
raise JsonValidatorError("Failed validating json by schema")
except jsonschema.SchemaError as e:
raise JsonValidatorError(f'Json-schema error: {e}')
def validate_jsonschema_from_file(self, json_source: Union[str, JsonType], path_to_schema: str) -> None:
"""
Validate JSON according to schema, loaded from a file.
*Args:*\n
_json_source_ - JSON data structure;\n
_path_to_schema_ - path to file with JSON schema;
*Raises:*\n
JsonValidatorError
*Example:*\n
| *Settings* | *Value* |
| Library | JsonValidator |
| *Test Cases* | *Action* | *Argument* | *Argument* |
| Simple | Validate jsonschema from file | {"foo":bar} | ${CURDIR}${/}schema.json |
"""
with open(path_to_schema) as f:
schema = f.read()
load_input_json = self.convert_to_json(json_source)
try:
load_schema = json.loads(schema)
except ValueError as e:
raise JsonValidatorError('Error in schema: {}'.format(e))
self._validate_json(load_input_json, load_schema)
def validate_jsonschema(self, json_source: Union[str, JsonType], input_schema: str) -> None:
"""
Validate JSON according to schema.
*Args:*\n
_json_source_ - JSON data structure;\n
_input_schema_ - schema in string format;
*Raises:*\n
JsonValidatorError
*Example:*\n
| *Settings* | *Value* |
| Library | JsonValidator |
| Library | OperatingSystem |
| *Test Cases* | *Action* | *Argument* | *Argument* |
| Simple | ${schema}= | OperatingSystem.Get File | ${CURDIR}${/}schema_valid.json |
| | Validate jsonschema | {"foo":bar} | ${schema} |
"""
load_input_json = self.convert_to_json(json_source)
try:
load_schema = json.loads(input_schema)
except ValueError as e:
raise JsonValidatorError('Error in schema: {}'.format(e))
self._validate_json(load_input_json, load_schema)
def convert_to_json(self, json_source: Union[str, JsonType]) -> JsonType:
"""Convert a python object to JsonType.
Args:
json_source: source object to convert.
Returns:
JSON structure.
"""
if isinstance(json_source, str):
return self.string_to_json(json_source)
elif isinstance(json_source, (dict, list)):
return json_source
else:
raise JsonValidatorError(f'Invalid type of source_json: {type(json_source)}')
def string_to_json(self, source: str) -> JsonType:
"""
Deserialize string into JSON structure.
*Args:*\n
_source_ - JSON string
*Returns:*\n
JSON structure
*Raises:*\n
JsonValidatorError
*Example:*\n
| *Settings* | *Value* |
| Library | JsonValidator |
| Library | OperatingSystem |
| *Test Cases* | *Action* | *Argument* | *Argument* |
| String to json | ${json_string}= | OperatingSystem.Get File | ${CURDIR}${/}json_example.json |
| | ${json}= | String to json | ${json_string} |
| | Log | ${json["store"]["book"][0]["price"]} |
=>\n
8.95
"""
try:
load_input_json = json.loads(source)
except ValueError as e:
raise JsonValidatorError(f"Could not parse '{source}' as JSON: {e}")
return load_input_json
def json_to_string(self, source: JsonType) -> str:
"""
Serialize JSON structure into string.
*Args:*\n
_source_ - JSON structure
*Returns:*\n
JSON string
*Raises:*\n
JsonValidatorError
*Example:*\n
| *Settings* | *Value* |
| Library | JsonValidator |
| Library | OperatingSystem |
| *Test Cases* | *Action* | *Argument* | *Argument* |
| Json to string | ${json_string}= | OperatingSystem.Get File | ${CURDIR}${/}json_example.json |
| | ${json}= | String to json | ${json_string} |
| | ${string}= | Json to string | ${json} |
| | ${pretty_string}= | Pretty print json | ${string} |
| | Log to console | ${pretty_string} |
"""
try:
load_input_json = json.dumps(source)
except ValueError as e:
raise JsonValidatorError(f"Could serialize '{source}' to JSON: {e}")
return load_input_json
def get_elements(self, json_source: Union[str, JsonType], expr: str) -> Optional[List[Any]]:
"""
Get list of elements from _json_source_, matching [http://goessner.net/articles/JsonPath/|JSONPath] expression.
*Args:*\n
_json_source_ - JSON data structure;\n
_expr_ - JSONPath expression;
*Returns:*\n
List of found elements or ``None`` if no elements were found
*Example:*\n
| *Settings* | *Value* |
| Library | JsonValidator |
| Library | OperatingSystem |
| *Test Cases* | *Action* | *Argument* | *Argument* |
| Get json elements | ${json_example}= | OperatingSystem.Get File | ${CURDIR}${/}json_example.json |
| | ${json_elements}= | Get elements | ${json_example} | $.store.book[*].author |
=>\n
| [u'Nigel Rees', u'Evelyn Waugh', u'Herman Melville', u'J. R. R. Tolkien']
"""
load_input_json = self.convert_to_json(json_source)
# parsing jsonpath
jsonpath_expr = self._parse(expr)
# list of returned elements
value_list = []
for match in jsonpath_expr.find(load_input_json):
value_list.append(match.value)
if not value_list:
return None
else:
return value_list
def select_elements(self, json_source: Union[str, JsonType], expr: str) -> Optional[List[Any]]:
"""
Return list of elements from _json_source_, matching [ http://jsonselect.org/ | JSONSelect] expression.
*DEPRECATED* JSON Select query language is outdated and not supported any more.
Use other keywords of this library to query JSON.
*Args:*\n
_json_source_ - JSON data structure;\n
_expr_ - JSONSelect expression;
*Returns:*\n
List of found elements or ``None`` if no elements were found
*Example:*\n
| *Settings* | *Value* |
| Library | JsonValidator |
| Library | OperatingSystem |
| *Test Cases* | *Action* | *Argument* | *Argument* |
| Select json elements | ${json_example}= | OperatingSystem.Get File | ${CURDIR}${/}json_example.json |
| | ${json_elements}= | Select elements | ${json_example} | .author:contains("Evelyn Waugh")~.price |
=>\n
| 12.99
"""
load_input_json = self.convert_to_json(json_source)
# parsing jsonselect
match = jsonselect.match(sel=expr, obj=load_input_json)
ret = list(match)
return ret if ret else None
def select_objects(self, json_source: Union[str, JsonType], expr: str) -> Optional[List[Any]]:
"""
Return list of elements from _json_source_, matching [ http://objectpath.org// | ObjectPath] expression.
*Args:*\n
_json_source_ - JSON data structure;\n
_expr_ - ObjectPath expression;
*Returns:*\n
List of found elements. If no elements were found, empty list will be returned
*Example:*\n
| *Settings* | *Value* |
| Library | JsonValidator |
| Library | OperatingSystem |
| *Test Cases* | *Action* | *Argument* | *Argument* |
| Select json objects | ${json_example}= | OperatingSystem.Get File | ${CURDIR}${/}json_example.json |
| | ${json_objectss}= | Select objects | ${json_example} | $..book[@.author.name is "Evelyn Waugh"].price |
=>\n
| [12.99]
"""
load_input_json = self.convert_to_json(json_source)
# parsing objectpath
tree = objectpath.Tree(load_input_json)
values = tree.execute(expr)
return list(values)
def element_should_exist(self, json_source: Union[str, JsonType], expr: str) -> None:
"""
Check the existence of one or more elements, matching [ http://jsonselect.org/ | JSONSelect] expression.
*DEPRECATED* JSON Select query language is outdated and not supported any more.
Use other keywords of this library to query JSON.
*Args:*\n
_json_source_ - JSON data structure;\n
_expr_ - JSONSelect expression;\n
*Raises:*\n
JsonValidatorError
*Example:*\n
| *Settings* | *Value* |
| Library | JsonValidator |
| Library | OperatingSystem |
| *Test Cases* | *Action* | *Argument* | *Argument* |
| Check element | ${json_example}= | OperatingSystem.Get File | ${CURDIR}${/}json_example.json |
| | Element should exist | ${json_example} | .author:contains("Evelyn Waugh") |
| | Element should exist | ${json_example} | .store .book .price:expr(x=8.95) |
"""
value = self.select_elements(json_source, expr)
if value is None:
raise JsonValidatorError(f'Elements {expr} does not exist')
def element_should_not_exist(self, json_source: Union[str, JsonType], expr: str) -> None:
"""
Check that one or more elements, matching [ http://jsonselect.org/ | JSONSelect] expression, don't exist.
*DEPRECATED* JSON Select query language is outdated and not supported any more.
Use other keywords of this library to query JSON.
*Args:*\n
_json_source_ - JSON data structure;\n
_expr_ - JSONSelect expression;\n
*Raises:*\n
JsonValidatorError
"""
value = self.select_elements(json_source, expr)
if value is not None:
raise JsonValidatorError(f'Elements {expr} exist but should not')
def _json_path_search(self, json_dict: JsonType, expr: str) -> List[DatumInContext]:
"""
Scan JSON dictionary with using json-path passed sting of the format of $.element..element1[index] etc.
*Args:*\n
_json_dict_ - JSON dictionary;\n
_expr_ - string of fuzzy search for items within the directory;\n
*Returns:*\n
List of DatumInContext objects:
``[DatumInContext(value=..., path=..., context=[DatumInContext])]``
- value - found value
- path - value selector inside context.value (in implementation of jsonpath-rw: class Index or Fields)
*Raises:*\n
JsonValidatorError
"""
path = self._parse(expr)
results = path.find(json_dict)
if len(results) == 0:
raise JsonValidatorError(f"Nothing found in the dictionary {json_dict} using the given path {expr}")
return results
def update_json(self, json_source: Union[str, JsonType], expr: str, value: Any,
index: Union[int, str] = 0) -> JsonType:
"""
Replace the value in the JSON structure.
*Args:*\n
_json_source_ - JSON data structure;\n
_expr_ - JSONPath expression for determining the value to be replaced;\n
_value_ - the value to be replaced with;\n
_index_ - index for selecting item within a match list, default value is 0;\n
*Returns:*\n
Changed JSON in dictionary format.
*Example:*\n
| *Settings* | *Value* |
| Library | JsonValidator |
| Library | OperatingSystem |
| *Test Cases* | *Action* | *Argument* | *Argument* |
| Update element | ${json_example}= | OperatingSystem.Get File | ${CURDIR}${/}json_example.json |
| | ${json_update}= | Update_json | ${json_example} | $..color | changed |
"""
load_input_json = self.convert_to_json(json_source)
matches = self._json_path_search(load_input_json, expr)
datum_object = matches[int(index)]
if not isinstance(datum_object, DatumInContext):
raise JsonValidatorError("Nothing found by the given json-path")
path = datum_object.path
# Edit the directory using the received data
# If the user specified a list
if isinstance(path, Index):
datum_object.context.value[datum_object.path.index] = value
# If the user specified a value of type (string, bool, integer or complex)
elif isinstance(path, Fields):
datum_object.context.value[datum_object.path.fields[0]] = value
return load_input_json
def pretty_print_json(self, json_string: str) -> str:
"""
Return formatted JSON string _json_string_.\n
Using method json.dumps with settings: _indent=2, ensure_ascii=False_.
*Args:*\n
_json_string_ - JSON string.
*Returns:*\n
Formatted JSON string.
*Example:*\n
| *Settings* | *Value* |
| Library | JsonValidator |
| Library | OperatingSystem |
| *Test Cases* | *Action* | *Argument* | *Argument* |
| Check element | ${pretty_json}= | Pretty print json | {a:1,foo:[{b:2,c:3},{d:"baz",e:4}]} |
| | Log | ${pretty_json} |
=>\n
| {
| "a": 1,
| "foo": [
| {
| "c": 3,
| "b": 2
| },
| {
| "e": 4,
| "d": "baz"
| }
| ]
| }
"""
return json.dumps(self.string_to_json(json_string), indent=2, ensure_ascii=False)
class JsonValidatorError(Exception):
pass | /robotframework-jsonvalidator-2.0.0.tar.gz/robotframework-jsonvalidator-2.0.0/src/JsonValidator.py | 0.778649 | 0.309105 | JsonValidator.py | pypi |
from pathlib import Path
from robot.libraries.BuiltIn import BuiltIn
from SeleniumLibrary import SeleniumLibrary
from SeleniumLibrary.utils.librarylistener import LibraryListener
from .keywords import server, webelements
HERE = Path(__file__).parent
CLIENTS = [client for client in sorted(HERE.glob("clients/*")) if client.is_dir()]
COMMON = sorted(HERE.glob("common/*.resource"))
component_classes = [
webelements.WebElementKeywords,
server.ServerKeywords,
]
class JupyterLibraryListener(LibraryListener):
"""Custom listener to do per-suite imports of resource files."""
ROBOT_LISTENER_API_VERSION = 2
def start_suite(self, name, attrs):
"""Handle dynamic imports at suite startup."""
super().start_suite(name, attrs)
resources = []
for common in COMMON:
resources += [f"JupyterLibrary/common/{common.name}"]
for client in CLIENTS:
for path in sorted(client.rglob("*.resource")):
resources += [
f"JupyterLibrary/{path.relative_to(HERE).as_posix()}",
]
for resource in resources:
BuiltIn().import_resource(resource)
class JupyterLibrary(SeleniumLibrary):
"""JupyterLibrary is a Jupyter testing library for Robot Framework."""
def __init__(
self,
timeout=5.0,
implicit_wait=0.0,
run_on_failure="Capture Page Screenshot",
screenshot_root_directory=None,
**kwargs,
) -> None:
"""Instantiate a stateful ``JupyterLibrary`` instance.
JupyterLibrary can be imported with several optional arguments.
- ``timeout``:
Default value for `timeouts` used with ``Wait ...`` keywords.
- ``implicit_wait``:
Default value for `implicit wait` used when locating elements.
- ``run_on_failure``:
Default action for the `run-on-failure functionality`.
- ``screenshot_root_directory``:
Location where possible screenshots are created. If not given,
the directory where the log file is written is used.
"""
super().__init__(
timeout=timeout,
implicit_wait=implicit_wait,
run_on_failure=run_on_failure,
screenshot_root_directory=screenshot_root_directory,
**kwargs,
)
self.add_library_components(
[Component(self) for Component in component_classes],
)
self.ROBOT_LIBRARY_LISTENER = JupyterLibraryListener() | /robotframework_jupyterlibrary-0.5.0a0-py3-none-any.whl/JupyterLibrary/core.py | 0.785925 | 0.219683 | core.py | pypi |
import contextlib
import shutil
import tempfile
from hashlib import sha256
from pathlib import Path
import robot
from IPython import get_ipython
from IPython.core import magic_arguments
from IPython.core.magic import (
Magics,
cell_magic,
magics_class,
)
from IPython.display import (
HTML,
Markdown,
display,
)
from pygments import highlight
from pygments.formatters.html import HtmlFormatter
from pygments.lexers.robotframework import RobotFrameworkLexer
from pygments.styles import get_all_styles
HAS_CORE_TIDY = False
HAS_WIDGETS = False
try:
from robot.tidy import Tidy
HAS_CORE_TIDY = True
except ImportError:
pass
try:
import ipywidgets
HAS_WIDGETS = True
except ImportError:
pass
ENC = {"encoding": "utf-8"}
@magics_class
class RobotMagics(Magics):
"""Run Robot Framework code.
Example:
-------
%%robot --
*** Tasks ***
Just Log Something
Log Something
"""
PRETTY_CLASS = "robot-magic"
@cell_magic
@magic_arguments.magic_arguments()
@magic_arguments.argument(
"-o",
"--output-dir",
default=None,
help="""Name of directory to update (default:cwd/_robot_magic_) """,
)
@magic_arguments.argument(
"-e",
"--execute",
default=True,
help="""run the robot test""",
)
@magic_arguments.argument(
"-p",
"--pretty",
default=True,
help=(
"""print out syntax highlighted, tidied source. """
"""no effect if ``robot.tidy`` is unavailable"""
),
)
@magic_arguments.argument(
"-s",
"--style",
default="colorful",
help=f"""style to use, one of: {", ".join(sorted(get_all_styles()))}""",
)
@magic_arguments.argument(
"-g",
"--gui",
default="display",
help="""how to show outputs, one of: display, widget""",
)
@magic_arguments.argument(
"-a",
"--arg",
default=None,
help="name of a variable in user_ns to use for robot.run arguments",
)
@magic_arguments.argument(
"-n",
"--name",
default=None,
help="name of the suite. default: Untitled_<hash>",
)
def robot(self, line, cell):
"""Run some Robot Framework code."""
line = f" {line} "
m = sha256()
m.update(line.encode("utf-8"))
m.update(cell.encode("utf-8"))
content_hash = str(m.hexdigest())[:12]
args = magic_arguments.parse_argstring(self.robot, line)
if HAS_WIDGETS and args.gui.lower() in ["widget", "w", "widgets"]:
self.widget(args, cell)
else:
if args.pretty and HAS_CORE_TIDY:
html = self.pretty_core(args, cell)
if args.gui == "display":
display(html)
if args.execute:
self.execute(args, cell, content_hash)
def widget(self, args, cell):
"""Display a widget of files built during a run."""
log = ipywidgets.HTML()
titles = ["Log"]
children = [log]
tabs = ipywidgets.Tab(children)
tabs.titles = titles
if args.pretty:
out = ipywidgets.Output()
with out:
display(self.pretty(args, cell))
tabs.children = [*tabs.children, out]
tabs.titles = [*tabs.titles, "Pretty"]
display(tabs)
def execute(self, args, cell: str, content_hash: str):
"""Run a cell in the outputdir, in a directory named after the content hash."""
ip = get_ipython()
if args.output_dir:
outputdir = Path(args.output_dir).resolve() / "_robot_magic_" / content_hash
else:
outputdir = Path.cwd() / "_robot_magic_" / content_hash
display(Markdown(f"- _🤖 making files in_ `{outputdir}`"))
if outputdir.exists():
shutil.rmtree(outputdir)
outputdir.mkdir(parents=True)
name = args.name or f"Untitled_{content_hash}"
robot_file = outputdir / f"{name}.robot"
robot_file.write_text(cell, **ENC)
display(Markdown("- _🤖 running!_"))
stdout_file = outputdir / "stdout.txt"
stderr_file = outputdir / "stderr.txt"
robot_args = ip.user_ns[args.arg] if args.arg else {}
with contextlib.suppress(SystemExit), stdout_file.open(
"w+",
) as stdout, stderr_file.open(
"w+",
) as stderr:
rc = robot.run(
robot_file,
outputdir=outputdir,
stderr=stderr,
stdout=stdout,
**robot_args,
)
if args.gui == "display":
for outfile in [stdout_file, stderr_file]:
display(
HTML(
f"""<ul><li>
<code>{outfile.name}</code>
<code><pre>{outfile.read_text(**ENC) or "empty"}</pre></code>
</li></ul>""",
),
)
files = [
f"""<li>
<a href="{p.relative_to(Path.cwd()).as_posix()}"
data-commandlinker-command="filebrowser:open"
data-commandlinker-args="{{}}">
{p.relative_to(outputdir).as_posix()}
</a>
</li>"""
for p in sorted(outputdir.rglob("*"))
]
display(
HTML(
f"""<ul><li><details>
<summary>{len(files)} Files</summary>
<ul>
{"".join(files)}
</ul>
</li></ul>""",
),
)
display(Markdown(f"- _🤖 returned {rc}_"))
if rc:
msg = f"robot returned {rc}"
raise RuntimeError(msg)
def pretty_core(self, args, cell):
"""pretty-print the robot text."""
tidier = Tidy()
with tempfile.TemporaryDirectory() as td:
tdp = Path(td)
it = tdp / "ugly.robot"
it.write_text(cell, **ENC)
tidier.inplace(str(it))
cell = it.read_text(**ENC)
lexer = RobotFrameworkLexer()
formatter = HtmlFormatter(cssclass=self.PRETTY_CLASS, style=args.style)
css = formatter.get_style_defs(f".{self.PRETTY_CLASS}")
highlighted = highlight(cell, lexer, formatter)
return HTML(
f"""<ul><li>
<details>
<summary>Formatted Robot Code</summary>
<style>{css}</style>{highlighted}
</details>
</li></ul>""",
) | /robotframework_jupyterlibrary-0.5.0a0-py3-none-any.whl/JupyterLibrary/magic.py | 0.584983 | 0.214691 | magic.py | pypi |
import os
import shutil
import socket
import subprocess
import tempfile
import time
import typing
from pathlib import Path
from urllib.request import urlopen
from uuid import uuid4
from robot.libraries.BuiltIn import BuiltIn
from SeleniumLibrary.base import LibraryComponent, keyword
class ServerKeywords(LibraryComponent):
"""A component that extends the core with Jupyter server management."""
_handles: typing.List[subprocess.Popen] = []
_tmpdirs: typing.Dict[subprocess.Popen, str] = {}
_notebook_dirs: typing.Dict[subprocess.Popen, str] = {}
_ports: typing.Dict[subprocess.Popen, int] = {}
_base_urls: typing.Dict[subprocess.Popen, str] = {}
_tokens: typing.Dict[subprocess.Popen, str] = {}
_app_name: typing.Optional[str] = None
@keyword
def set_default_jupyter_app_name(
self,
app_name: typing.Optional[str] = None,
) -> typing.Optional[str]:
"""Set the current ``traitlets.Configurable`` Jupyter Server application, returning the previous value.
A value of ``None`` (the default) will try to detect the app based on
command name, such as:
- ``jupyter-notebook`` is configured with ``NotebookApp``
- ``jupyter-lab`` and most other are configured with ``ServerApp``
This may also be set externally via the ``JUPYTER_LIBRARY_APP`` environment
variable, but any explicit argument will override this.
See [#Get Jupyter App Name] for more.
"""
old_app_name = self.app_name
self.app_name = app_name
return old_app_name
@keyword
def get_jupyter_app_name(
self,
command: typing.Optional[str] = None,
) -> typing.Optional[str]:
"""Get the current ``traitlets.Configurable`` Jupyter Server application, optionally for a specific CLI command.
See [#Set Default Jupyter App Name] for more.
"""
app_name = os.environ.get("JUPYTER_LIBRARY_APP")
if self._app_name is not None:
app_name = self._app_name
elif command is not None and "jupyter-notebook" in command:
app_name = "NotebookApp"
return app_name or "ServerApp"
@keyword
def start_new_jupyter_server(
self,
command: typing.Optional[str] = "jupyter-notebook",
port: typing.Optional[int] = None,
base_url: typing.Optional[str] = "/@rf/",
notebook_dir: typing.Optional[str] = None,
token: typing.Optional[str] = None,
*args,
**config,
) -> subprocess.Popen:
"""Start a Jupyter server. All arguments are optional.
| = argument = | = default = | = notes = |
| ``command`` | ``jupyter-notebook`` | e.g. ``jupyter-lab`` |
| ``port`` | an unused port | |
| ``base_url`` | ``/@rf/`` | |
| ``notebook_dir`` | a temporary directory | |
| ``token`` | a random ``uuid4`` | |
| ``*args`` | | extra server arguments |
| ``**config`` | | extra process arguments |
| ``app_name`` | ``None`` (detect) | e.g. ``NotebookApp``, `ServerApp`` |
| ``extra_args`` | ``[]`` | extra arguments beyond ```token``, etc. |
If not configured, the ``%{HOME}`` environment variable and current
working directory will be set to avoid leaking configuration
between between runs (or the test instance) itself. These
directories will be cleaned up after the server process is
[#Terminate All Jupyter Servers|terminated].
The ``app_name`` argument is as described for the [#Get Jupyter App Name|app name],
with the default being to autodetect from the command and environment.
``extra_args`` are passed to ``Start Process`` before the ``token``
"""
app_name = (
config.pop("app_name", None)
or config.get("env:JUPYTER_LIBRARY_APP")
or self.get_jupyter_app_name(command)
)
port = port or self.get_unused_port()
token = str(uuid4()) if token is None else token
BuiltIn().import_library("Process")
plib = BuiltIn().get_library_instance("Process")
tmp_path = Path(tempfile.mkdtemp())
if "env:HOME" not in config:
home_dir = tmp_path / "home"
home_dir.mkdir()
config["env:HOME"] = str(home_dir)
if "stdout" not in config:
config["stdout"] = str(tmp_path / "server.log")
if "stderr" not in config:
config["stderr"] = "STDOUT"
if notebook_dir is None:
notebook_dir = tmp_path / "notebooks"
notebook_dir.mkdir()
config["cwd"] = str(notebook_dir)
args = args or self.build_jupyter_server_arguments(
port,
base_url,
token,
app_name,
)
extra_args = config.pop("extra_args", [])
handle = plib.start_process(command, *extra_args, *args, **config)
self._handles += [handle]
self._tmpdirs[handle] = str(tmp_path)
self._notebook_dirs[handle] = str(notebook_dir)
self._ports[handle] = port
self._base_urls[handle] = base_url
self._tokens[handle] = token
return handle
@keyword
def build_jupyter_server_arguments(
self,
port: int,
base_url: str,
token: str,
app_name: typing.Optional[str] = None,
) -> typing.Tuple[str]:
"""Build Some default Jupyter application arguments.
If the ``app_name`` is not provided, it will be detected based on the rules
in [#Get Jupyter App Name].
"""
app_name = app_name or self.get_jupyter_app_name()
return (
"--no-browser",
"--debug",
f"--port={port}",
f"--{app_name}.token={token}",
f"--{app_name}.base_url={base_url}",
)
@keyword
def copy_files_to_jupyter_directory(self, *sources: str, **kwargs) -> None:
"""Copy some files into the (temporary) jupyter server root.
| = argument = | = default = |
| ``nbserver`` | the most-recently launched server |
"""
nbserver = kwargs.get("nbserver", self._handles[-1])
notebook_dir = self._notebook_dirs[nbserver]
BuiltIn().import_library("OperatingSystem")
osli = BuiltIn().get_library_instance("OperatingSystem")
return osli.copy_files(*sorted(sources), notebook_dir)
@keyword
def copy_files_from_jupyter_directory(self, *src_and_dest: str, **kwargs) -> None:
"""Copy some files from the (temporary) jupyter server root.
| = argument = | = default = |
| ``nbserver`` | the most-recently launched server |
Patterns will have the notebook directory prepended
"""
nbserver = kwargs.get("nbserver", self._handles[-1])
notebook_dir = Path(self._notebook_dirs[nbserver])
BuiltIn().import_library("OperatingSystem")
osli = BuiltIn().get_library_instance("OperatingSystem")
sources = [str(notebook_dir / src) for src in src_and_dest[:-1]]
dest = src_and_dest[-1]
return osli.copy_files(*sources, dest)
@keyword
def get_jupyter_directory(
self,
nbserver: typing.Optional[subprocess.Popen] = None,
) -> str:
"""Get the Jupyter contents directory.
| = argument = | = default = |
| ``nbserver`` | the most-recently launched server |
"""
nbserver = nbserver if nbserver is not None else self._handles[-1]
return self._notebook_dirs[nbserver]
@keyword
def wait_for_jupyter_server_to_be_ready(
self,
*nbservers: subprocess.Popen,
**kwargs,
) -> int:
"""Wait for the most-recently started Jupyter server to be ready."""
interval = float(kwargs.get("interval", 0.5))
retries = int(kwargs.get("retries", 60))
if not nbservers:
if not self._handles:
return 0
nbservers = [self._handles[-1]]
ready = 0
error = None
while retries and ready != len(nbservers):
retries -= 1
ready = 0
try:
for nbh in nbservers:
urlopen(self.get_jupyter_server_url(nbh))
ready += 1
except Exception as _error:
time.sleep(interval)
error = _error
if ready != len(nbservers):
message = (
f"Only {ready} of {len(nbservers)} servers were ready after "
f"{interval * retries}s. Last error: {type(error)} {error}"
)
raise RuntimeError(message)
return ready
@keyword
def get_jupyter_server_url(
self,
nbserver: typing.Optional[subprocess.Popen] = None,
) -> str:
"""Get the given (or most recently-launched) server's URL."""
nbh = nbserver or self._handles[-1]
return f"http://127.0.0.1:{self._ports[nbh]}{self._base_urls[nbh]}"
@keyword
def get_jupyter_server_token(
self,
nbserver: typing.Optional[subprocess.Popen] = None,
) -> str:
"""Get the given (or most recently-launched) server's token."""
nbh = nbserver or self._handles[-1]
return self._tokens[nbh]
@keyword
def wait_for_new_jupyter_server_to_be_ready(
self,
command: typing.Optional[str] = None,
*args,
**config,
) -> subprocess.Popen:
"""Get the given (or most recently-launched) server's token.
See [#Start New Jupyter Server|Start New Jupyter Server].
"""
handle = self.start_new_jupyter_server(command, *args, **config)
self.wait_for_jupyter_server_to_be_ready(handle)
return handle
@keyword
def shut_down_jupyter_server(self, nbserver=None) -> int:
"""Gracefully shut down a Jupyter server started by [#Start New Jupyter Server|Start New Jupyter Server].
If no ``handle`` is given, the last-started server will be shut down.
"""
nbh = nbserver or self._handles[-1]
url = self.get_jupyter_server_url(nbh)
token = self.get_jupyter_server_token(nbh)
try:
urlopen(f"{url}api/shutdown?token={token}", data=[])
except Exception as err:
BuiltIn().log(err)
return 0
self._ports.pop(nbh)
self._base_urls.pop(nbh)
self._tokens.pop(nbh)
return 1
@keyword
def clean_up_jupyter_server_files(self, nbserver=None) -> int:
"""Clean up the files owned by a started by [#Start New Jupyter Server|Jupyter Server].
If no ``handle`` is given, the last-started server will be terminated.
"""
nbh = nbserver or self._handles[-1]
shutil.rmtree(self._tmpdirs[nbh], ignore_errors=True)
self._tmpdirs.pop(nbh)
@keyword
def terminate_jupyter_server(self, nbserver=None) -> int:
"""Close a Jupyter server started by [#Start New Jupyter Server|Start New Jupyter Server].
If no ``nbserver`` is given, the last-started server will be terminated.
Waiting ``timeout`` to ensure all files/processes are freed before
cleaning up temporary directories, if any.
"""
plib = BuiltIn().get_library_instance("Process")
nbh = nbserver or self._handles[-1]
plib.terminate_process(nbh)
self._handles.remove(nbh)
return 1
@keyword
def terminate_all_jupyter_servers(self, timeout: str = "6s") -> int:
"""Close all Jupyter servers started by [#Start New Jupyter Server|Start New Jupyter Server].
Waiting ``timeout`` after termination to ensure all files/processes are freed before
cleaning up temporary directories.
"""
was_shutdown = []
was_terminated = []
for nbh in self._handles:
try:
self.shut_down_jupyter_server(nbh)
was_shutdown += [nbh]
except Exception as err:
BuiltIn().log(err)
handles = list(self._handles)
for nbh in handles:
try:
self.terminate_jupyter_server(nbh)
was_terminated += [nbh]
except Exception as err:
BuiltIn().log(err)
if was_shutdown or was_terminated:
BuiltIn().sleep(timeout)
tmpdir_handles = list(self._tmpdirs)
for nbh in tmpdir_handles:
self.clean_up_jupyter_server_files(nbh)
return len(was_terminated)
@keyword
def get_unused_port(self) -> int:
"""Find an unused network port (could still create race conditions)."""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("localhost", 0))
s.listen(1)
port = s.getsockname()[1]
s.close()
return port | /robotframework_jupyterlibrary-0.5.0a0-py3-none-any.whl/JupyterLibrary/keywords/server.py | 0.654343 | 0.15084 | server.py | pypi |
from kafka import KafkaProducer
class Producer(object):
producer = None
def connect_producer(self, bootstrap_servers='127.0.0.1:9092', client_id='Robot', **kwargs):
"""A Kafka client that publishes records to the Kafka cluster.
Keyword Arguments:
- ``bootstrap_servers``: 'host[:port]' string (or list of 'host[:port]'
strings) that the producer should contact to bootstrap initial
cluster metadata. This does not have to be the full node list.
It just needs to have at least one broker that will respond to a
Metadata API Request. Default to `localhost:9092`.
- ``client_id`` (str): a name for this client. This string is passed in
each request to servers and can be used to identify specific
server-side log entries that correspond to this client.
Default: `Robot`.
Note:
Configuration parameters are described in more detail at
http://kafka-python.readthedocs.io/en/master/apidoc/KafkaProducer.html
"""
self.producer = KafkaProducer(bootstrap_servers=bootstrap_servers, client_id=client_id, **kwargs)
def send(self, topic, value=None, timeout=60, key=None, partition=None, timestamp_ms=None):
"""Publish a message to a topic.
- ``topic`` (str): topic where the message will be published
- ``value``: message value. Must be type bytes, or be serializable to bytes via configured value_serializer.
If value is None, key is required and message acts as a `delete`.
- ``timeout``
- ``key``: a key to associate with the message. Can be used to determine which partition
to send the message to. If partition is None (and producer's partitioner config is left as default),
then messages with the same key will be delivered to the same partition (but if key is None,
partition is chosen randomly). Must be type bytes, or be serializable to bytes via configured key_serializer.
- ``partition`` (int): optionally specify a partition.
If not set, the partition will be selected using the configured `partitioner`.
- ``timestamp_ms`` (int): epoch milliseconds (from Jan 1 1970 UTC) to use as the message timestamp.
Defaults to current time.
"""
future = self.producer.send(topic, value=value, key=key, partition=partition, timestamp_ms=timestamp_ms)
future.get(timeout=timeout)
def flush(self, timeout=None):
"""Invoking this method makes all buffered records immediately available to send
and blocks on the completion of the requests associated with these records.
- ``timeout`` (float): timeout in seconds to wait for completion
"""
self.producer.flush(timeout=timeout)
def close(self, timeout=None):
"""Close this producer.
- ``timeout`` (float): timeout in seconds to wait for completion.
"""
self.producer.close(timeout=timeout) | /robotframework-kafkalibrary-py3-0.0.2.1.tar.gz/robotframework-kafkalibrary-py3-0.0.2.1/KafkaLibrary/Producer.py | 0.864554 | 0.278736 | Producer.py | pypi |
from kafka import KafkaConsumer, TopicPartition
class Consumer(object):
consumer = None
def connect_consumer(
self,
bootstrap_servers='127.0.0.1:9092',
client_id='Robot',
group_id=None,
auto_offset_reset='latest',
enable_auto_commit=True,
**kwargs
):
"""Connect kafka consumer.
Keyword Arguments:
- ``bootstrap_servers``: 'host[:port]' string (or list of 'host[:port]'
strings) that the consumer should contact to bootstrap initial
cluster metadata. This does not have to be the full node list.
It just needs to have at least one broker that will respond to a
Metadata API Request. Default: `127.0.0.1:9092`.
- ``client_id`` (str): a name for this client. This string is passed in
each request to servers and can be used to identify specific
server-side log entries that correspond to this client. Also
submitted to GroupCoordinator for logging with respect to
consumer group administration. Default: `Robot`.
- ``group_id`` (str or None): name of the consumer group to join for dynamic
partition assignment (if enabled), and to use for fetching and
committing offsets. If None, auto-partition assignment (via
group coordinator) and offset commits are disabled.
Default: `None`.
- ``auto_offset_reset`` (str): A policy for resetting offsets on
OffsetOutOfRange errors: `earliest` will move to the oldest
available message, `latest` will move to the most recent. Any
other value will raise the exception. Default: `latest`.
- ``enable_auto_commit`` (bool): If true the consumer's offset will be
periodically committed in the background. Default: `True`.
Note:
Configuration parameters are described in more detail at
http://kafka-python.readthedocs.io/en/master/apidoc/KafkaConsumer.html
"""
self.consumer = KafkaConsumer(
bootstrap_servers=bootstrap_servers,
auto_offset_reset=auto_offset_reset,
client_id=client_id,
group_id=group_id,
enable_auto_commit=enable_auto_commit,
**kwargs
)
def _is_assigned(self, topic_partition):
for tp in topic_partition:
if tp in self.consumer.assignment():
return True
return False
def get_kafka_topics(self):
"""Return list of kafka topics.
"""
return list(self.consumer.topics())
def get_kafka_partitions_for_topic(self, topic=None):
"""Retrun list of partitions for kafka topic.
- ``topic`` (str): Topic to check.
"""
return list(self.consumer.partitions_for_topic(topic))
def assign_to_topic_partition(self, topic_partition=None):
"""Assign a list of TopicPartitions to this consumer.
- ``partitions`` (list of `TopicPartition`): Assignment for this instance.
"""
if isinstance(topic_partition, TopicPartition):
topic_partition = [topic_partition]
if not self._is_assigned(topic_partition):
self.consumer.assign(topic_partition)
def subscribe_topic(self, topics=[], pattern=None):
"""Subscribe to a list of topics, or a topic regex pattern.
- ``topics`` (list): List of topics for subscription.
- ``pattern`` (str): Pattern to match available topics. You must provide either topics or pattern,
but not both.
"""
if not isinstance(topics, list):
topics = [topics]
self.consumer.subscribe(topics, pattern=pattern)
def get_position(self, topic_partition=None):
"""Return offset of the next record that will be fetched.
- ``topic_partition`` (TopicPartition): Partition to check
"""
if isinstance(topic_partition, TopicPartition):
return self.consumer.position(topic_partition)
else:
raise TypeError("topic_partition must be of type TopicPartition, create it with Create TopicPartition keyword.")
def seek(self, offset, topic_partition=None):
"""Manually specify the fetch offset for a TopicPartition.
- ``offset``: Message offset in partition
- ``topic_partition`` (`TopicPartition`): Partition for seek operation
"""
if isinstance(topic_partition, TopicPartition):
self.consumer.seek(topic_partition, offset=offset)
else:
raise TypeError("topic_partition must be of type TopicPartition, create it with Create TopicPartition keyword.")
def seek_to_beginning(self, topic_partition=None):
"""Seek to the oldest available offset for partitions.
- ``topic_partition``: Optionally provide specific TopicPartitions,
otherwise default to all assigned partitions.
"""
if isinstance(topic_partition, TopicPartition):
self.consumer.seek_to_beginning(topic_partition)
else:
raise TypeError("topic_partition must be of type TopicPartition, create it with Create TopicPartition keyword.")
def seek_to_end(self, topic_partition=None):
"""Seek to the most recent available offset for partitions.
- ``topic_partition``: Optionally provide specific `TopicPartitions`,
otherwise default to all assigned partitions.
"""
if isinstance(topic_partition, TopicPartition):
self.consumer.seek_to_end(topic_partition)
else:
raise TypeError("topic_partition must be of type TopicPartition, create it with Create TopicPartition keyword.")
def get_assigned_partitions(self):
"""Return `TopicPartitions` currently assigned to this consumer.
"""
return list(self.consumer.assignment())
def get_number_of_messages_in_topics(self, topics):
"""Retrun number of messages in topics.
- ``topics`` (list): list of topics.
"""
if not isinstance(topics, list):
topics = [topics]
number_of_messages = 0
for t in topics:
part = self.get_kafka_partitions_for_topic(topic=t)
Partitions = map(lambda p: TopicPartition(topic=t, partition=p), part)
number_of_messages += self.get_number_of_messages_in_topicpartition(Partitions)
return number_of_messages
def get_number_of_messages_in_topicpartition(self, topic_partition=None):
"""Return number of messages in TopicPartition.
- ``topic_partition`` (list of TopicPartition)
"""
if isinstance(topic_partition, TopicPartition):
topic_partition = [topic_partition]
number_of_messages = 0
assignment = self.consumer.assignment()
self.consumer.unsubscribe()
for Partition in topic_partition:
if not isinstance(Partition, TopicPartition):
raise TypeError("topic_partition must be of type TopicPartition, create it with Create TopicPartition keyword.")
self.assign_to_topic_partition(Partition)
self.consumer.seek_to_end(Partition)
end = self.consumer.position(Partition)
self.consumer.seek_to_beginning(Partition)
start = self.consumer.position(Partition)
number_of_messages += end-start
self.consumer.unsubscribe()
self.consumer.assign(assignment)
return number_of_messages
def poll(self, timeout_ms=0, max_records=None):
"""Fetch data from assigned topics / partitions.
- ``max_records`` (int): maximum number of records to poll. Default: Inherit value from max_poll_records.
- ``timeout_ms`` (int): Milliseconds spent waiting in poll if data is not available in the buffer.
If 0, returns immediately with any records that are available currently in the buffer, else returns empty.
Must not be negative. Default: `0`
"""
messages = self.consumer.poll(timeout_ms=timeout_ms, max_records=max_records)
result = []
for _, msg in messages.items():
for item in msg:
result.append(item)
return result
def commit(self, offsets=None):
"""Commit offsets to kafka, blocking until success or error.
- ``offset`` (dict): `{TopicPartition: OffsetAndMetadata}` dict to commit with
the configured group_id. Defaults to currently consumed offsets for all subscribed partitions.
"""
self.consumer.commit(offsets)
def committed(self, topic_partition):
"""Returns the last committed offset for the given partition, or None if there was no prior commit.
- ``topic_partition`` (`TopicPartition`): The partition to check.
"""
return self.consumer.committed(topic_partition)
def close(self, autocommit=True):
"""Close the consumer, waiting indefinitely for any needed cleanup.
- ``autocommit`` (bool): Default `True`.
"""
self.consumer.close(autocommit=autocommit) | /robotframework-kafkalibrary-py3-0.0.2.1.tar.gz/robotframework-kafkalibrary-py3-0.0.2.1/KafkaLibrary/Consumer.py | 0.854217 | 0.294674 | Consumer.py | pypi |
from kafka import KafkaProducer
class Producer(object):
producer = None
def connect_producer(self, bootstrap_servers='127.0.0.1:9092', client_id='Robot', **kwargs):
"""A Kafka client that publishes records to the Kafka cluster.
Keyword Arguments:
- ``bootstrap_servers``: 'host[:port]' string (or list of 'host[:port]'
strings) that the producer should contact to bootstrap initial
cluster metadata. This does not have to be the full node list.
It just needs to have at least one broker that will respond to a
Metadata API Request. Default to `localhost:9092`.
- ``client_id`` (str): a name for this client. This string is passed in
each request to servers and can be used to identify specific
server-side log entries that correspond to this client.
Default: `Robot`.
Note:
Configuration parameters are described in more detail at
http://kafka-python.readthedocs.io/en/master/apidoc/KafkaProducer.html
"""
self.producer = KafkaProducer(bootstrap_servers=bootstrap_servers, client_id=client_id, **kwargs)
def send(self, topic, value=None, timeout=60, key=None, partition=None, timestamp_ms=None):
"""Publish a message to a topic.
- ``topic`` (str): topic where the message will be published
- ``value``: message value. Must be type bytes, or be serializable to bytes via configured value_serializer.
If value is None, key is required and message acts as a `delete`.
- ``timeout``
- ``key``: a key to associate with the message. Can be used to determine which partition
to send the message to. If partition is None (and producer's partitioner config is left as default),
then messages with the same key will be delivered to the same partition (but if key is None,
partition is chosen randomly). Must be type bytes, or be serializable to bytes via configured key_serializer.
- ``partition`` (int): optionally specify a partition.
If not set, the partition will be selected using the configured `partitioner`.
- ``timestamp_ms`` (int): epoch milliseconds (from Jan 1 1970 UTC) to use as the message timestamp.
Defaults to current time.
"""
future = self.producer.send(topic, value=value, key=key, partition=partition, timestamp_ms=timestamp_ms)
future.get(timeout=timeout)
def flush(self, timeout=None):
"""Invoking this method makes all buffered records immediately available to send
and blocks on the completion of the requests associated with these records.
- ``timeout`` (float): timeout in seconds to wait for completion
"""
self.producer.flush(timeout=timeout)
def close(self, timeout=None):
"""Close this producer.
- ``timeout`` (float): timeout in seconds to wait for completion.
"""
self.producer.close(timeout=timeout) | /robotframework-kafkalibrary-0.0.3.tar.gz/robotframework-kafkalibrary-0.0.3/KafkaLibrary/Producer.py | 0.864554 | 0.278736 | Producer.py | pypi |
from kafka import KafkaConsumer, TopicPartition
class Consumer(object):
consumer = None
def connect_consumer(
self,
bootstrap_servers='127.0.0.1:9092',
client_id='Robot',
group_id=None,
auto_offset_reset='latest',
enable_auto_commit=True,
**kwargs
):
"""Connect kafka consumer.
Keyword Arguments:
- ``bootstrap_servers``: 'host[:port]' string (or list of 'host[:port]'
strings) that the consumer should contact to bootstrap initial
cluster metadata. This does not have to be the full node list.
It just needs to have at least one broker that will respond to a
Metadata API Request. Default: `127.0.0.1:9092`.
- ``client_id`` (str): a name for this client. This string is passed in
each request to servers and can be used to identify specific
server-side log entries that correspond to this client. Also
submitted to GroupCoordinator for logging with respect to
consumer group administration. Default: `Robot`.
- ``group_id`` (str or None): name of the consumer group to join for dynamic
partition assignment (if enabled), and to use for fetching and
committing offsets. If None, auto-partition assignment (via
group coordinator) and offset commits are disabled.
Default: `None`.
- ``auto_offset_reset`` (str): A policy for resetting offsets on
OffsetOutOfRange errors: `earliest` will move to the oldest
available message, `latest` will move to the most recent. Any
other value will raise the exception. Default: `latest`.
- ``enable_auto_commit`` (bool): If true the consumer's offset will be
periodically committed in the background. Default: `True`.
Note:
Configuration parameters are described in more detail at
http://kafka-python.readthedocs.io/en/master/apidoc/KafkaConsumer.html
"""
self.consumer = KafkaConsumer(
bootstrap_servers=bootstrap_servers,
auto_offset_reset=auto_offset_reset,
client_id=client_id,
group_id=group_id,
enable_auto_commit=enable_auto_commit,
**kwargs
)
def _is_assigned(self, topic_partition):
for tp in topic_partition:
if tp in self.consumer.assignment():
return True
return False
def get_kafka_topics(self):
"""Return list of kafka topics.
"""
return list(self.consumer.topics())
def get_kafka_partitions_for_topic(self, topic=None):
"""Retrun list of partitions for kafka topic.
- ``topic`` (str): Topic to check.
"""
return list(self.consumer.partitions_for_topic(topic))
def assign_to_topic_partition(self, topic_partition=None):
"""Assign a list of TopicPartitions to this consumer.
- ``partitions`` (list of `TopicPartition`): Assignment for this instance.
"""
if isinstance(topic_partition, TopicPartition):
topic_partition = [topic_partition]
if not self._is_assigned(topic_partition):
self.consumer.assign(topic_partition)
def subscribe_topic(self, topics=[], pattern=None):
"""Subscribe to a list of topics, or a topic regex pattern.
- ``topics`` (list): List of topics for subscription.
- ``pattern`` (str): Pattern to match available topics. You must provide either topics or pattern,
but not both.
"""
if not isinstance(topics, list):
topics = [topics]
self.consumer.subscribe(topics, pattern=pattern)
def get_position(self, topic_partition=None):
"""Return offset of the next record that will be fetched.
- ``topic_partition`` (TopicPartition): Partition to check
"""
if isinstance(topic_partition, TopicPartition):
return self.consumer.position(topic_partition)
else:
raise TypeError("topic_partition must be of type TopicPartition, create it with Create TopicPartition keyword.")
def seek(self, offset, topic_partition=None):
"""Manually specify the fetch offset for a TopicPartition.
- ``offset``: Message offset in partition
- ``topic_partition`` (`TopicPartition`): Partition for seek operation
"""
if isinstance(topic_partition, TopicPartition):
self.consumer.seek(topic_partition, offset=offset)
else:
raise TypeError("topic_partition must be of type TopicPartition, create it with Create TopicPartition keyword.")
def seek_to_beginning(self, topic_partition=None):
"""Seek to the oldest available offset for partitions.
- ``topic_partition``: Optionally provide specific TopicPartitions,
otherwise default to all assigned partitions.
"""
if isinstance(topic_partition, TopicPartition):
self.consumer.seek_to_beginning(topic_partition)
else:
raise TypeError("topic_partition must be of type TopicPartition, create it with Create TopicPartition keyword.")
def seek_to_end(self, topic_partition=None):
"""Seek to the most recent available offset for partitions.
- ``topic_partition``: Optionally provide specific `TopicPartitions`,
otherwise default to all assigned partitions.
"""
if isinstance(topic_partition, TopicPartition):
self.consumer.seek_to_end(topic_partition)
else:
raise TypeError("topic_partition must be of type TopicPartition, create it with Create TopicPartition keyword.")
def get_assigned_partitions(self):
"""Return `TopicPartitions` currently assigned to this consumer.
"""
return list(self.consumer.assignment())
def get_number_of_messages_in_topics(self, topics):
"""Retrun number of messages in topics.
- ``topics`` (list): list of topics.
"""
if not isinstance(topics, list):
topics = [topics]
number_of_messages = 0
for t in topics:
part = self.get_kafka_partitions_for_topic(topic=t)
Partitions = map(lambda p: TopicPartition(topic=t, partition=p), part)
number_of_messages += self.get_number_of_messages_in_topicpartition(Partitions)
return number_of_messages
def get_number_of_messages_in_topicpartition(self, topic_partition=None):
"""Return number of messages in TopicPartition.
- ``topic_partition`` (list of TopicPartition)
"""
if isinstance(topic_partition, TopicPartition):
topic_partition = [topic_partition]
number_of_messages = 0
assignment = self.consumer.assignment()
for Partition in topic_partition:
if not isinstance(Partition, TopicPartition):
raise TypeError("topic_partition must be of type TopicPartition, create it with Create TopicPartition keyword.")
self.assign_to_topic_partition(Partition)
current = self.consumer.position(Partition)
self.consumer.seek_to_end(Partition)
end = self.consumer.position(Partition)
self.consumer.seek_to_beginning(Partition)
start = self.consumer.position(Partition)
self.consumer.seek(Partition, current)
number_of_messages += end-start
self.consumer.assign(assignment)
return number_of_messages
def poll(self, timeout_ms=0, max_records=None):
"""Fetch data from assigned topics / partitions.
- ``max_records`` (int): maximum number of records to poll. Default: Inherit value from max_poll_records.
- ``timeout_ms`` (int): Milliseconds spent waiting in poll if data is not available in the buffer.
If 0, returns immediately with any records that are available currently in the buffer, else returns empty.
Must not be negative. Default: `0`
"""
messages = self.consumer.poll(timeout_ms=timeout_ms, max_records=max_records)
result = []
for _, msg in messages.items():
for item in msg:
result.append(item)
return result
def commit(self, offsets=None):
"""Commit offsets to kafka, blocking until success or error.
- ``offset`` (dict): `{TopicPartition: OffsetAndMetadata}` dict to commit with
the configured group_id. Defaults to currently consumed offsets for all subscribed partitions.
"""
self.consumer.commit(offsets)
def committed(self, topic_partition):
"""Returns the last committed offset for the given partition, or None if there was no prior commit.
- ``topic_partition`` (`TopicPartition`): The partition to check.
"""
return self.consumer.committed(topic_partition)
def close(self, autocommit=True):
"""Close the consumer, waiting indefinitely for any needed cleanup.
- ``autocommit`` (bool): Default `True`.
"""
self.consumer.close(autocommit=autocommit) | /robotframework-kafkalibrary-0.0.3.tar.gz/robotframework-kafkalibrary-0.0.3/KafkaLibrary/Consumer.py | 0.857186 | 0.357764 | Consumer.py | pypi |
from robotlibcore import DynamicCore
from KeePassLibrary.keywords import (
KeePassDatabase,
KeePassEntry,
KeePassEntries,
KeePassGroup,
KeePassGroups
)
__version__ = '0.4.0'
class KeePassLibrary(DynamicCore):
"""KeePassLibrary is a library for Robot Framework.
KeePassLibrary uses the PyKeePass modules internally to access KeePass databases
See https://keepass.info for more information about KeePass in general.
== Table of contents ==
%TOC%
= Databases =
The following databases are supported;
- KDBX3 (3.1)
- KDBX4 (4.0)
https://keepass.info/help/kb/kdbx_4.html
Example:
| `Load Keepass Database` | Database.kdbx | Database.key | #Load a Keepass database named Database.kdbx using the keyfile Database.key |
= Entries and Groups =
A KeePass database (KDBX) is a tree of Groups, each Goup can contain multiple Groups and Entries
== Entry ==
Entries can be found using the `Get Entries` like keywords to return a single Entry or list of Entries.
| = Attribute = |
| title |
| username |
| password |
| url |
| tags |
| icon |
| parent_group |
| uuid |
| expires |
| expired |
| path |
== Group ==
Groups can be found using the `Get Groups` like keywords to return a single Group or list of Groups.
| = Attribute = |
| name |
| notes |
| entries |
| subgroups |
| is_root_group |
| icon |
| parent_group |
| uuid |
| expires |
| expired |
| path |
= Regular expression =
When ``regex`` is set to True the supplied string is matched using regular expression:
- https://docs.python.org/3/library/re.html#re-syntax
- https://docs.python.org/3/howto/regex.html#regex-howto
== Flags ==
When ``regex`` is set to True a combination of ``flags`` can be set:
| =Flag= | =Short Desciption= | =Long description= |
| i | Ignore case | Perform case-insensitive matching. |
| m | Multiline | Make begin/end {^, $} consider each line. |
| s | Dotall | Makes the '.' special character match any character at all, including a newline; without this flag, '.' will match anything except a newline. |
| l | Locale | Make {\w, \W, \b, \B} follow locale. |
| x | Verbose | Allow comment in regex. |
"""
ROBOT_LIBRARY_SCOPE = 'GLOBAL'
ROBOT_LIBRARY_VERSION = __version__
def __init__(self):
self._database = None
libraries = [
KeePassDatabase(self),
KeePassEntry(self),
KeePassEntries(self),
KeePassGroup(self),
KeePassGroups(self)
]
DynamicCore.__init__(self, libraries)
def __enter__(self):
return self
def __exit__(self, typ, value, tb):
del self._database
def run_keyword(self, name: str, args: tuple, kwargs: dict):
try:
return DynamicCore.run_keyword(self, name, args, kwargs)
except Exception:
raise
def get_keyword_tags(self, name: str) -> list:
tags = list(DynamicCore.get_keyword_tags(self, name))
return tags
def get_keyword_documentation(self, name: str) -> str:
return DynamicCore.get_keyword_documentation(self, name) | /robotframework-keepasslibrary-0.4.0.tar.gz/robotframework-keepasslibrary-0.4.0/src/KeePassLibrary/__init__.py | 0.8488 | 0.332988 | __init__.py | pypi |
from KeePassLibrary.base import keyword, LibraryComponent, Entry
from KeePassLibrary.errors import EntryInvalid
class KeePassEntry(LibraryComponent):
#---------- Base Element ----------
#---------- Title ----------
@keyword
def get_entry_title(self, entry:Entry):
"""Return the title value of the supplied KeePass ``entry``
Example:
| ${entry} = | `Get Entries By Title` | root_entry | first=True |
| ${value} = | `Get Entry Title` | ${entry} |
New in KeePassLibrary 0.3
"""
if isinstance(entry, Entry):
return entry.title
else:
raise EntryInvalid('Invalid KeePass Entry.')
@keyword
def set_entry_title(self, entry:Entry, value):
"""Set the title value of the supplied KeePass ``entry`` to the given ``value``
Example:
| ${entry} = | `Get Entries By Title` | root_entry | first=True |
| `Set Entry Title` | New Title |
New in KeePassLibrary 0.3
"""
if isinstance(entry, Entry):
entry.title = value
else:
raise EntryInvalid('Invalid KeePass Entry.')
#---------- Username ----------
@keyword
def get_entry_username(self, entry:Entry):
"""Return the username value of the supplied KeePass ``entry``
Example:
| ${entry} = | `Get Entries By Title` | root_entry | first=True |
| ${value} = | `Get Entry Username` | ${entry} |
New in KeePassLibrary 0.3
"""
if isinstance(entry, Entry):
return entry.username
else:
raise EntryInvalid('Invalid KeePass Entry.')
@keyword
def set_entry_username(self, entry:Entry, value):
"""Set the username value of the supplied KeePass ``entry`` to the given ``value``
Example:
| ${entry} = | `Get Entries By Title` | root_entry | first=True |
| `Set Entry Username` | New Username |
New in KeePassLibrary 0.3
"""
if isinstance(entry, Entry):
entry.username = value
else:
raise EntryInvalid('Invalid KeePass Entry.')
#---------- Password ----------
@keyword
def get_entry_password(self, entry:Entry):
"""Return the password value of the supplied KeePass ``entry``
Example:
| ${entry} = | `Get Entries By Title` | root_entry | first=True |
| ${value} = | `Get Entry Password` | ${entry} |
New in KeePassLibrary 0.3
"""
if isinstance(entry, Entry):
return entry.password
else:
raise EntryInvalid('Invalid KeePass Entry.')
@keyword
def set_entry_password(self, entry:Entry, value):
"""Set the Password value of the supplied KeePass ``entry`` to the given ``value``
Example:
| ${entry} = | `Get Entries By Title` | root_entry | first=True |
| `Set Entry Password` | N3w Passw0rd |
New in KeePassLibrary 0.3
"""
if isinstance(entry, Entry):
entry.password = value
else:
raise EntryInvalid('Invalid KeePass Entry.')
#---------- URL ----------
@keyword
def get_entry_url(self, entry:Entry):
"""Return the URL value of the supplied KeePass ``entry``
Example:
| ${entry} = | `Get Entries By Title` | root_entry | first=True |
| ${value} = | `Get Entry Url` | ${entry} |
New in KeePassLibrary 0.3
"""
if isinstance(entry, Entry):
return entry.url
else:
raise EntryInvalid('Invalid KeePass Entry.')
@keyword
def set_entry_url(self, entry:Entry, value):
"""Set the URL value of the supplied KeePass ``entry`` to the given ``value``
Example:
| ${entry} = | `Get Entries By Title` | root_entry | first=True |
| `Set Entry Url` | https://keepass.info/ |
New in KeePassLibrary 0.3
"""
if isinstance(entry, Entry):
entry.url = value
else:
raise EntryInvalid('Invalid KeePass Entry.')
#---------- Notes ----------
@keyword
def get_entry_notes(self, entry:Entry):
"""Return the notes value of the supplied KeePass ``entry``
Example:
| ${entry} = | `Get Entries By Title` | root_entry | first=True |
| ${value} = | `Get Entry Notes` | ${entry} |
New in KeePassLibrary 0.3
"""
if isinstance(entry, Entry):
return entry.notes
else:
raise EntryInvalid('Invalid KeePass Entry.')
@keyword
def set_entry_notes(self, entry:Entry, value):
"""Set the notes value of the supplied KeePass ``entry`` to the given ``value``
Example:
| ${entry} = | `Get Entries By Title` | root_entry | first=True |
| `Set Entry Notes` | New\\nnotes |
New in KeePassLibrary 0.3
"""
if isinstance(entry, Entry):
entry.notes = value
else:
raise EntryInvalid('Invalid KeePass Entry.')
#---------- Icon ----------
@keyword
def get_entry_icon(self, entry:Entry):
"""Return the icon valueof the supplied KeePass ``entry``
Example:
| ${entry} = | `Get Entries By Title` | root_entry | first=True |
| ${value} = | `Get Entry Icon` | ${entry} |
New in KeePassLibrary 0.3
"""
if isinstance(entry, Entry):
return entry.icon
else:
raise EntryInvalid('Invalid KeePass Entry.')
@keyword
def set_entry_icon(self, entry:Entry, value):
"""Set the icon value of the supplied KeePass ``entry`` to the given ``value``
Example:
| ${entry} = | `Get Entries By Title` | root_entry | first=True |
| `Set Entry Icon` | 20 | #Gear icon |
New in KeePassLibrary 0.3
"""
if isinstance(entry, Entry):
entry.icon = value
else:
raise EntryInvalid('Invalid KeePass Entry.')
#---------- Tags (not implemented) ----------
@keyword
def get_entry_tags(self, entry:Entry):
"""Return a list with tags of the supplied KeePass ``entry``
Example:
| ${entry} = | `Get Entries By Title` | root_entry | first=True |
| ${value} = | `Get Entry Tags` | ${entry} |
New in KeePassLibrary 0.3
"""
if isinstance(entry, Entry):
return entry.tags
else:
raise EntryInvalid('Invalid KeePass Entry.')
#
@keyword
def set_entry_tags(self, entry:Entry, value):
"""Set the tags value of the supplied KeePass ``entry`` to the given ``value``
Example:
| @{tags}= | Create List | tag1 | tag2 |
| Set Entry Tags | ${entry} | ${tags} | |
| Set Entry Tags | ${entry} | tag1;tag2 | |
New in KeePassLibrary 0.3
"""
if isinstance(entry, Entry):
entry.tags = value
else:
raise EntryInvalid('Invalid KeePass Entry.')
#---------- History (not fully implemented) ----------
# FIXME: not implented
# @property
# def history(self):
# if self._element.find('History') is not None:
# return [Entry(element=x, kp=self._kp) for x in self._element.find('History').findall('Entry')]
# else:
# return []
#
# FIXME: not implented
# @history.setter
# def history(self, value):
# raise NotImplementedError()
#
# def save_history(self):
# '''
# Save the entry in its history
# '''
# archive = deepcopy(self._element)
# hist = archive.find('History')
# if hist is not None:
# archive.remove(hist)
# self._element.find('History').append(archive)
# else:
# history = Element('History')
# history.append(archive)
# self._element.append(history)
#@keyword
# def entry_should_be_history_entry(self, entry:Entry):
# """Verifies that the specified entry is history entry.
#
# Example:
# | ${entry} = | `Get Entries By Title` | root_entry | first=True |
# | `Entry Should Be History Entry` | ${entry} |
#
# New in KeePassLibrary 0.3
# """
# if isinstance(entry, Entry):
# if not entry.is_a_history_entry:
# raise EntryInvalid('Entry is not a history entry.')
# else:
# raise EntryInvalid('Invalid KeePass Entry.')
#
# #@keyword
# def entry_should_not_be_history_entry(self, entry:Entry):
# """Verifies that the specified entry is history entry.
#
# Example:
# | ${entry} = | `Get Entries By Title` | root_entry | first=True |
# | `Entry Should Not Be History Entry` | ${entry} |
#
# New in KeePassLibrary 0.3
# """
# if isinstance(entry, Entry):
# if entry.is_a_history_entry:
# raise EntryInvalid('Entry is a history entry.')
# else:
# raise EntryInvalid('Invalid KeePass Entry.')
#---------- Autotype (not implemented)----------
# @property
# def autotype_enabled(self):
# enabled = self._element.find('AutoType/Enabled')
# if enabled.text is not None:
# return enabled.text == 'True'
#
# @autotype_enabled.setter
# def autotype_enabled(self, value):
# enabled = self._element.find('AutoType/Enabled')
# if value is not None:
# enabled.text = str(value)
# else:
# enabled.text = None
#---------- Autotype sequence (not implemented)----------
# @property
# def autotype_sequence(self):
# sequence = self._element.find('AutoType/DefaultSequence')
# return sequence.text if sequence is not None else None
#
# @autotype_sequence.setter
# def autotype_sequence(self, value):
# self._element.find('AutoType/DefaultSequence').text = value
# @property
# def path(self):
# # The root group is an orphan
# if self.is_a_history_entry:
# pentry = Entry(
# element=self._element.getparent().getparent(),
# kp=self._kp
# ).title
# return '[History of: {}]'.format(pentry)
# if self.parentgroup is None:
# return None
# p = self.parentgroup
# ppath = ''
# while p is not None and not p.is_root_group:
# if p.name is not None: # dont make the root group appear
# ppath = '{}/{}'.format(p.name, ppath)
# p = p.parentgroup
# return '{}{}'.format(ppath, self.title)
#
#---------- Custom Property ----------
@keyword
def set_entry_custom_property(self, entry:Entry, key, value):
"""Sets property ``key`` of the supplied ``entry`` to ``value``.
Example:
| ${entry} = | `Get Entries By Title` | root_entry | first=True |
| `Set Entry Custom Property` | new_field_name | new_field_value |
New in KeePassLibrary 0.3
"""
if isinstance(entry, Entry):
entry.set_custom_property(key, value)
else:
raise EntryInvalid('Invalid KeePass Entry.')
@keyword
def get_entry_custom_property(self, entry:Entry, key):
"""Return the value from a custom property matching the given ``key`` of the supplied ``entry``
Example:
| ${entry} = | `Get Entries By Title` | root_entry | first=True |
| ${value} = | `Get Entry Custom Property` | ${entry} | foobar_attribute |
New in KeePassLibrary 0.3
"""
if isinstance(entry, Entry):
return entry.get_custom_property(key)
else:
raise EntryInvalid('Invalid KeePass Entry.')
@keyword
def remove_entry_custom_property(self, entry:Entry, key):
"""Removes a custom property matching the given key of the supplied KeePass ``entry``
Example:
| ${entry} = | `Get Entries By Title` | root_entry | first=True |
| `Remove Entry Custom Property` | ${entry} | foobar_attribute |
New in KeePassLibrary 0.3
"""
if isinstance(entry, Entry):
entry.delete_custom_property(key)
else:
raise EntryInvalid('Invalid KeePass Entry.')
@keyword
def get_entry_custom_properties(self, entry:Entry):
"""Return a dictonary with all custom properties of the supplied KeePass ``entry``
Example:
| ${entry} = | `Get Entries By Title` | root_entry | first=True |
| ${value} = | `Get Entry Custom Properties` | ${entry} |
New in KeePassLibrary 0.3
"""
if isinstance(entry, Entry):
return entry.custom_properties
else:
raise EntryInvalid('Invalid KeePass Entry.')
#---------- Reference (not implemented) ----------
# def deref(self, attribute):
# return self._kp.deref(getattr(self, attribute))
# def ref(self, attribute):
# """Create reference to an attribute of this element."""
# attribute_to_field = {
# 'title': 'T',
# 'username': 'U',
# 'password': 'P',
# 'url': 'A',
# 'notes': 'N',
# 'uuid': 'I',
# }
# return '{{REF:{}@I:{}}}'.format(attribute_to_field[attribute], self.uuid.hex.upper())
#---------- (parent)Group ----------
#---------- Dump XML (not implemented) ----------
#@keyword
# def get_entry_xml(self, entry:Entry, pretty_print=False):
# """Return the XML of the supplied KeePass ``entry``
# """
# if isinstance(entry, Entry):
# entry.dump_xml(pretty_print)
# else:
# raise EntryInvalid('Invalid KeePass Entry.')
#---------- UUID ----------
@keyword
def get_entry_uuid(self, entry:Entry):
"""Return the UUID value of the supplied KeePass ``entry``
Example:
| ${entry} = | `Get Entries By Title` | root_entry | first=True |
| ${value} = | `Get Entry Uuid` | ${entry} |
New in KeePassLibrary 0.3
"""
if isinstance(entry, Entry):
return str(entry.uuid)
else:
raise EntryInvalid('Invalid KeePass Entry.')
# @keyword
# def set_entry_uuid(self, entry:Entry, value):
# """Set the ``UUID`` of the supplied KeePass ``entry`` to the given ``value``
# """
# if isinstance(entry, Entry):
# entry.uuid = value
# else:
# raise EntryInvalid('Invalid KeePass Entry.')
#---------- Expired ----------
@keyword
def get_entry_expired(self, entry:Entry):
"""Return expired value of the supplied KeePass ``entry``
Example:
| ${entry} = | `Get Entries By Title` | root_entry | first=True |
| ${value} = | `Get Entry Expired` | ${entry} |
New in KeePassLibrary 0.3
"""
if isinstance(entry, Entry):
return entry.expired
else:
raise EntryInvalid('Invalid KeePass Entry.')
# @keyword
# def entry_should_be_expired(self, entry:Entry, message=None):
# """Fails if the specified entry is not expired.
#
# Example:
# | ${entry} = | `Get Entries By Title` | root_entry | first=True |
# | ${value} = | `Get Entry Expired` | ${entry} |
#
# New in KeePassLibrary 0.3
# """
# if isinstance(entry, Entry):
# if is_noney(message):
# message = f"The element '{entrylocator}' should be visible, but it is not."
# raise AssertionError(message)
# else:
# raise EntryInvalid('Invalid KeePass Entry.')
#
# @keyword
# def entry_should_not_be_expired(self, entry:Entry):
# """Verifies that the specified entry is not expired.
#
# New in KeePassLibrary 0.3
# """
# if isinstance(entry, Entry):
# return not entry.expired
# else:
# raise EntryInvalid('Invalid KeePass Entry.')
#---------- Expires ----------
@keyword
def set_entry_expires(self, entry:Entry, value:bool):
"""Sets expires value of the supplied KeePass ``entry`` to the given ``value``.
Example:
| ${entry} = | `Get Entries By Title` | root_entry | first=True |
| `Set Entry Expires` | ${entry} | True |
New in KeePassLibrary 0.3
"""
if isinstance(entry, Entry):
entry.expires = value
else:
raise EntryInvalid('Invalid KeePass Entry.')
@keyword
def get_entry_expires(self, entry:Entry):
"""Return expires value of the supplied KeePass ``entry``
Example:
| ${entry} = | `Get Entries By Title` | root_entry | first=True |
| ${value} = | `Get Entry Expires` | ${entry} |
New in KeePassLibrary 0.3
"""
if isinstance(entry, Entry):
return entry.expires
else:
raise EntryInvalid('Invalid KeePass Entry.')
#---------- Expired Time ----------
# FIXME: not implented
#@keyword
# def get_entry_expiry_time(self, entry:Entry):
# """*DEPRECATED*
# """
# raise NotImplementedYet('this keyword is not implemented.')
#
# FIXME: not implented
#@keyword
# def set_entry_expiry_time(self, entry:Entry, value):
# """*DEPRECATED*
# """
# raise NotImplementedYet('this keyword is not implemented.')
#
#---------- Created Time ----------
# FIXME: not implented
#@keyword
# def get_entry_created_time(self, entry:Entry):
# """*DEPRECATED*
# """
# raise NotImplementedYet('this keyword is not implemented.')
#
# FIXME: not implented
#@keyword
# def set_entry_created_time(self, entry:Entry, value):
# """*DEPRECATED*
# """
# raise NotImplementedYet('this keyword is not implemented.')
#---------- Last Access Time ----------
# FIXME: not implented
#@keyword
# def get_entry_last_access_time(self, entry:Entry):
# """*DEPRECATED*
# """
# raise NotImplementedYet('this keyword is not implemented.')
#
# FIXME: not implented
#@keyword
# def set_entry_last_access_time(self, entry:Entry, value):
# """*DEPRECATED*
# """
# raise NotImplementedYet('this keyword is not implemented.')
#
#---------- Modified Time ----------
# FIXME: not implented
#@keyword
# def get_entry_modified_time(self, entry:Entry):
# """*DEPRECATED*
# """
# raise NotImplementedYet('this keyword is not implemented.')
# FIXME: not implented
#@keyword
# def set_entry_modified_time(self, entry:Entry, value):
# """*DEPRECATED*
# """
# raise NotImplementedYet('this keyword is not implemented.')
#---------- Touch ----------
#@keyword
# def entry_touch(self, entry:Entry, modify=False):
# """Update last access time of an entry
# """
# if isinstance(entry, Entry):
# return entry.touch(modify)
# else:
# raise EntryInvalid('Invalid KeePass Entry.')
#---------- Attachements (not implemented) ----------
# TODO: Implented
#@keyword
# def get_entry_attachments(self, entry:Entry):
# """*DEPRECATED*
# """
# raise NotImplementedYet('this keyword is not implemented.')
#
# def attachments(self):
# return self._kp.find_attachments(
# element=self,
# filename='.*',
# regex=True,
# recursive=False
# )
#
# TODO: Implented
#@keyword
# def add_entry_attachment(self, entry:Entry, ide):
# """*DEPRECATED*
# """
# raise NotImplementedYet('this keyword is not implemented.')
# def add_attachment(self, id, filename):
# element = E.Binary(
# E.Key(filename),
# E.Value(Ref=str(id))
# )
# self._element.append(element)
#
# return pykeepass.attachment.Attachment(element=element, kp=self._kp)
# TODO: Implented
#@keyword
# def remove_entry_attachment(self, entry:Entry, ide):
# """*DEPRECATED*
# """
# raise NotImplementedYet('this keyword is not implemented.')
# #
# def delete_attachment(self, attachment):
# attachment.delete() | /robotframework-keepasslibrary-0.4.0.tar.gz/robotframework-keepasslibrary-0.4.0/src/KeePassLibrary/keywords/keepassentry.py | 0.877772 | 0.279747 | keepassentry.py | pypi |
from KeePassLibrary.base import keyword, LibraryComponent, UUID
from KeePassLibrary.errors import DatabaseNotOpened
from robot.utils import is_truthy
class KeePassGroups(LibraryComponent):
# FIXME: Catch uuid string and convert to class
@keyword
def get_groups(self, recursive=True, path=None, group=None, **kwargs):
"""Return a list of groups in the open KeePass database matching the given arguments
The ``recursive`` argument can be set ``True`` this enables recursive searching, default value is False.\n
The ``path`` argument sets the path which the groups should match, default value is None.\n
The ``group`` argument has to match an existing Group is supplied only entries which are a direct child will be searched, default value is None. See ``Get Groups`` for information about selecting a group \n
See the `Entries and Groups` section for more information about Entries and Groups.\n
*Additional arguments:*
- The ``history`` argument can be set to ``True`` to include history groups in the search, default value is False.
- The ``first`` argument can be set ``True`` this has the effect that only the first match will be returned, default value is False.
- The ``regex`` argument can be set to ``True`` this enables regular expression searching, default value is False.
- The ``flags`` argument can be set to modify the regular expression search, default value is None. See the `Regular expression` section for more information about about the ``regex`` and ``flags`` syntax.
- The ``name`` argument can be given to search matching names, default value is None.
- The ``notes`` argument sets the notes which the groups should match, default value is None.
- The ``uuid`` argument sets the uuid which the groups should match, default value is None.
Example:
| @{groups}= | `Get Groups` | name=.*group | notes=^.{0}$ | regex=True |
| ${groups}= | `Get Groups` | name=.*group | notes=^.{0}$ | regex=True | first=True |
| ${group}= | `Get Groups By Name` | subgroup | first=True |
| @{groups}= | `Get Groups` | subgroup2 | group=${group} |
"""
if self.database is None:
raise DatabaseNotOpened('No KeePass Database Opened.')
else:
if 'regex' in kwargs:
kwargs['regex'] = is_truthy(kwargs['regex'])
if 'first' in kwargs:
kwargs['first'] = is_truthy(kwargs['first'])
return self.database.find_groups(recursive, path, group, **kwargs)
@keyword
def get_groups_all(self):
"""Return a list of all groups in the open KeePass database.
See the `Entries and Groups` section for more information about Entries and Groups.\n
Example:
| ${groups} = | Get Groups All |
"""
if self.database is None:
raise DatabaseNotOpened('No KeePass Database Opened.')
else:
return self.database.find_groups_by_name('.*',
regex=True)
@keyword
def get_groups_by_name(self, group_name, regex=False, flags=None,
group=None, first=False):
"""Return a list of groups in the open KeePass database
matching the given string
See `Get Groups` for more details about optional arguments.
Examples:
| ${groups} = | Get Groups By Name | subgroup |
| ${groups} = | Get Groups By Name | .* | regex=True |
"""
if self.database is None:
raise DatabaseNotOpened('No KeePass Database Opened.')
else:
return self.database.find_groups_by_name(group_name=group_name,
regex=regex,
flags=flags,
group=group,
first=first)
@keyword
def get_groups_by_path(self, group_path_str=None, regex=False, flags=None,
group=None, first=False):
"""Return a list of groups in the open KeePass database
matching the given path
See `Get Groups` for more details about optional arguments.
Example:
| ${groups} = | Get Groups By Path | foobar_group/subgroup |
"""
if self.database is None:
raise DatabaseNotOpened('No KeePass Database Opened.')
else:
group_path_list = group_path_str.split('/')
return self.database.find_groups_by_path(group_path_str=group_path_list,
regex=regex,
flags=flags,
group=group,
first=first)
@keyword
def get_groups_by_uuid(self, uuid, regex=False, flags=None,
group=None, history=False, first=False):
"""Return a list of groups in the open KeePass database
matching the given uuid
See `Get Groups` for more details about optional arguments.
Example:
| ${groups} = | Get Groups By Uuid | 12345678-1234-5678-1234-567812345678 |
"""
if self.database is None:
raise DatabaseNotOpened('No KeePass Database Opened.')
else:
uuid = UUID('urn:uuid:'+ uuid)
return self.database.find_groups_by_uuid(uuid=uuid,
regex=regex,
flags=flags,
group=group,
history=history,
first=first)
@keyword
def get_groups_by_notes(self, notes, regex=False, flags=None,
group=None, history=False, first=False):
"""Return a list of groups in the open KeePass database
matching the given notes
See `Get Groups` for more details about optional arguments.
Example:
| ${groups} = | Get Groups By Notes | group notes |
"""
if self.database is None:
raise DatabaseNotOpened('No KeePass Database Opened.')
else:
return self.database.find_groups_by_notes(notes=notes,
regex=regex,
flags=flags,
group=group,
history=history,
first=first) | /robotframework-keepasslibrary-0.4.0.tar.gz/robotframework-keepasslibrary-0.4.0/src/KeePassLibrary/keywords/keepassgroups.py | 0.662141 | 0.387893 | keepassgroups.py | pypi |
from KeePassLibrary.base import keyword, LibraryComponent, UUID
from KeePassLibrary.errors import DatabaseNotOpened
from robot.utils import is_truthy
class KeePassEntries(LibraryComponent):
# FIXME: Catch uuid string and convert to class
@keyword
def get_entries(self, history=False, first=False, recursive=True,
path=None, group=None, **kwargs):
"""Return a list of entries in the open KeePass database matching the given arguments.\n
The ``history`` argument can be set to ``True`` to include history entries in the search, default value is False.\n
The ``first`` argument can be set ``True`` this has the effect that only the first match will be returned, default value is False.\n
The ``recursive`` argument can be set ``True`` this enables recursive searching, default value is False.\n
The ``path`` argument sets the path which the entries should match, default value is None.\n
The ``group`` argument has to match an existing Group is supplied only entries which are a direct child will be searched, default value is None. See ``Get Groups`` for information about selecting a group \n
See the `Entries and Groups` section for more information about Entries and Groups.\n
*Additional arguments:*
- The ``regex`` argument can be set to ``True`` this enables regular expression searching, default value is False.
- The ``flags`` argument can be set to modify the regular expression search, default value is None. See the `Regular expression` section for more information about about the ``regex`` and ``flags`` syntax.
- The ``title`` argument can be given to search matching titles, default value is None.
- The ``username`` argument sets the username which the entries should match, default value is None.
- The ``password`` argument sets the password which the entries should match, default value is None.
- The ``url`` argument sets the url which the entries should match, default value is None.
- The ``notes`` argument sets the notes which the entries should match, default value is None.
- The ``uuid`` argument sets the uuid which the entries should match, default value is None.
Examples:
| @{entries}= | `Get Entries` | title=.*entry | regex=True |
| ${entry}= | `Get Entries` | title=.*entry | regex=True | first=True |
| @{entries}= | `Get Entries` | title=.*entry | username=.*user | regex=True |
| @{entries}= | `Get Entries` | title=.*entry | notes=.*entry notes | regex=True |
| ${group}= | Get Groups By Name | subgroup | first=True |
| ${entries}= | Get Entries By Username | foobar | group=${group} | first=True |
"""
if self.database is None:
raise DatabaseNotOpened('No KeePass Database Opened.')
else:
if 'regex' in kwargs:
kwargs['regex'] = is_truthy(kwargs['regex'])
return self.database.find_entries(recursive=recursive,
path=path,
group=group,
history=history,
first=first,
**kwargs)
@keyword
def get_entries_all(self):
"""Return a list of all entries in the open KeePass database.\n
See the `Entries and Groups` section for more information about Entries and Groups.\n
Example:
| ${entries} = | `Get Entries All` |
"""
if self.database is None:
raise DatabaseNotOpened('No KeePass Database Opened.')
else:
return self.database.find_entries_by_title('.*',
regex=True)
@keyword
def get_entries_by_title(self, title, regex=False, flags=None,
group=None, history=False, first=False):
"""Return a list of entries in the open KeePass database matching the given ``title``.\n
See `Get Entries` for more details about optional arguments.
Example:
| @{entries} = | `Get Entries By Title` | root_entry |
=> all entries with title: root_entry
| ${entry} = | `Get Entries By Title` | root_entry | first=True |
=> first entry with title: root_entry
"""
if self.database is None:
raise DatabaseNotOpened('No KeePass Database Opened.')
else:
return self.database.find_entries_by_title(title,
regex,
flags,
group,
history,
first)
@keyword
def get_entries_by_username(self, username, regex=False, flags=None,
group=None, history=False, first=False):
"""Return a list of entries in the open KeePass database matching the given ``username``.\n
See `Get Entries` for more details about optional arguments.
Example:
| @{entries} = | `Get Entries By Username` | foobar_user |
"""
if self.database is None:
raise DatabaseNotOpened('No KeePass Database Opened.')
else:
return self.database.find_entries_by_username(username,
regex,
flags,
group,
history,
first)
@keyword
def get_entries_by_password(self, password, regex=False, flags=None,
group=None, history=False, first=False):
"""Return a list of entries in the open KeePass database matching the given ``password``.\n
See `Get Entries` for more details about optional arguments.
Example:
| @{entries} = | `Get Entries By Password` | passw0rd |
"""
if self.database is None:
raise DatabaseNotOpened('No KeePass Database Opened.')
else:
return self.database.find_entries_by_password(password,
regex,
flags,
group,
history,
first)
@keyword
def get_entries_by_url(self, url, regex=False, flags=None,
group=None, history=False, first=False):
"""Return a list of entries in the open KeePass database matching the given ``url``.\n
See `Get Entries` for more details about optional arguments.
Example:
| @{entries} = | `Get Entries By Url` | http://example.com |
"""
if self.database is None:
raise DatabaseNotOpened('No KeePass Database Opened.')
else:
return self.database.find_entries_by_url(url,
regex,
flags,
group,
history,
first)
@keyword
def get_entries_by_notes(self, notes, regex=False, flags=None,
group=None, history=False, first=False):
"""Return a list of entries in the open KeePass database matching the given ``notes``.\n
See `Get Entries` for more details about optional arguments.
Example:
| @{entries} = | `Get Entries By Notes` | root entry notes |
"""
if self.database is None:
raise DatabaseNotOpened('No KeePass Database Opened.')
else:
return self.database.find_entries_by_notes(notes,
regex,
flags,
group,
history,
first)
# FIXME: Return more then 1 match even when first is set to false
@keyword
def get_entries_by_path(self, entry_path_str, regex=False, flags=None,
group=None, history=False, first=False):
"""Return a list of entries in the open KeePass database matching the given ``path``.\n
See `Get Entries` for more details about optional arguments.
Note, only 1 entry can be selected by path
Example:
| ${entry} = | Get Entries By Path | foobar_group/group_entry |
"""
if self.database is None:
raise DatabaseNotOpened('No KeePass Database Opened.')
else:
entry_path_list = entry_path_str.split('/')
return self.database.find_entries_by_path(entry_path_list,
regex,
flags,
group,
history,
first)
@keyword
def get_entries_by_uuid(self, uuid, regex=False, flags=None,
group=None, history=False, first=False):
"""Return a list of entries in the open KeePass database matching the given ``uuid``.\n
See `Get Entries` for more details about optional arguments.
Example:
| ${entries} = | Get Entries By Uuid | 12345678-1234-5678-1234-567812345678 |
"""
if self.database is None:
raise DatabaseNotOpened('No KeePass Database Opened.')
else:
uuid = UUID('urn:uuid:'+ uuid)
return self.database.find_entries_by_uuid(uuid,
regex,
flags,
group,
history,
first)
# TODO: Add documentation
@keyword
def get_entries_by_string(self, string, regex=False, flags=None,
group=None, history=False, first=False):
"""Return a list of entries in the open KeePass database matching the given ``string``.\n
See `Get Entries` for more details about optional arguments.
Example:
| &{string}= | Create Dictionary | UserName=foobar_user | Title=group_entry |
Valid dictonary keys:
| Title |
| UserName |
| Password |
| URL |
| Notes |
| IconID |
| Tags |
| History |
"""
if self.database is None:
raise DatabaseNotOpened('No KeePass Database Opened.')
else:
return self.database.find_entries_by_string(string,
regex,
flags,
group,
history,
first) | /robotframework-keepasslibrary-0.4.0.tar.gz/robotframework-keepasslibrary-0.4.0/src/KeePassLibrary/keywords/keepassentries.py | 0.612657 | 0.452838 | keepassentries.py | pypi |
from __future__ import print_function
"""
This script can be used to reorganize symbol libraries
based on regex patterns specified in a JSON file.
Entire libraries can be renamed, or symbols within each
library can be matched and moved
The JSON file should be formatted as such:
{
"LibName" : {
"Filter1*" : "NewLibName",
"Filter2*" : "NewLibName2",
},
"LibNameA" : {
"Cmp*XXX" : "Components",
},
// Simply rename a library
"EntireLib" : "NewLib2",
// Retain original name (e.g. "SomeLibName")
"SomeLibName" : ""
}
"""
import shutil
import os
import argparse
import fnmatch
import sys
import glob
import json
import fnmatch
def get_output_lib(pattern):
return PATTERNS[pattern]
def is_entire_lib(pattern):
if pattern in PATTERNS:
filters = PATTERNS[pattern]
if type(filters) is dict:
return True
if '*' in filters:
return True
def get_lib_patterns(lib_name):
return PATTERNS.get(lib_name, None)
def get_entire_lib_match(lib_name):
"""
If the library is to be moved entirely,
return the destination library.
Otherwise, return None
"""
patterns = get_lib_patterns(lib_name)
# Remap to single lib
if type(patterns) in [str, unicode]:
# Return original lib name
if patterns in [""]:
return lib_name
return patterns
else:
return None
def get_matches(lib_name, cmp_name):
patterns = get_lib_patterns(lib_name)
if not patterns:
return []
matches = []
for pattern in patterns:
output = patterns[pattern]
pattern = pattern.lower()
if pattern == cmp_name.lower():
# Exact match!
return output
elif fnmatch.fnmatch(cmp_name.lower(), pattern):
matches.append(output)
return matches
parser = argparse.ArgumentParser(description='Reorganizing the KiCad libs is fun!')
parser.add_argument('libs', help='List of source libraries', nargs='+')
parser.add_argument('--dest', help='Path to store the output', action='store', default='output')
parser.add_argument('--real', help='Real run (test run by default)', action='store_true')
parser.add_argument('--silent', help='Suppress output messages', action='store_true')
parser.add_argument('--leave', help='Leave unallocated symbols in the library they started in', action='store_true')
parser.add_argument('--clean', help='Clean output directory before running script', action='store_true')
parser.add_argument('-p', '--patterns', help='Path to pattern file (JSON)', action='store')
parser.add_argument('-i', '--interactive', help='Interactive mode', action='store_true')
args = parser.parse_args()
real_mode = args.real
# Import the schlib utils
import schlib
dst_dir = os.path.abspath(args.dest)
# Output dir must exist if real output is to be made
if not os.path.isdir(dst_dir) and args.real:
print("dest_dir not a valid directory")
sys.exit(1)
if args.real and args.clean:
#todo
pass
if args.patterns:
with open(args.patterns) as f:
PATTERNS = json.loads(f.read())
else:
PATTERNS = {}
# Find the source libraries
src_libs = []
for lib in args.libs:
src_libs += glob.glob(lib)
# Output libraries
output_libs = {}
# Find any libraries in the output directory
if os.path.exists(args.dest) and os.path.isdir(args.dest):
op_libs = [x for x in os.listdir(args.dest) if x.endswith('.lib')]
for op_lib in op_libs:
lib_name = op_lib.split(os.path.sep)[-1].replace('.lib', '')
lib = schlib.SchLib(op_lib)
output_libs[lib_name] = lib
print("Found existing library - '{l}'".format(l=lib_name))
allocated_symbols = 0
unallocated_symbols = []
overallocated_symbols = []
def output_lib(name):
# Case insensitive to reduce mistakes
for lib in output_libs:
if name.lower() == lib.lower():
return output_libs[lib]
output_libs[name] = schlib.SchLib(os.path.join(dst_dir, name + '.lib'), create=real_mode)
if not args.silent:
print("Creating new library - '{n}'".format(n=name))
return output_libs[name]
# Iterate through all remaining libraries
for src_lib in src_libs:
lib_name = src_lib.split(os.path.sep)[-1].replace('.lib', '')
lib = schlib.SchLib(src_lib)
# Make a copy of each component (so list indexing doesn't get messed up)
components = [c for c in lib.components]
# Should this entire library be copied?
copy_lib = get_entire_lib_match(lib_name)
if copy_lib is not None:
if not args.silent:
print("Copying entire library '{src}' -> '{dst}'".format(src=lib_name, dst=copy_lib))
if not copy_lib in output_libs:
output_libs[copy_lib] = schlib.SchLib(os.path.join(dst_dir, copy_lib + '.lib'), create=real_mode)
out_lib = output_lib(copy_lib)
for cmp in lib.components:
out_lib.addComponent(cmp)
allocated_symbols += 1
# Skip any further checks
continue
for cmp in lib.components:
# A component should not match more than one filter
filter_matches = 0
matches = get_matches(lib_name, cmp.name)
# No matches found
if len(matches) == 0:
if args.leave:
# Leave the item in the same library it already existed in
out_lib = output_lib(lib_name)
out_lib.addComponent(cmp)
if not args.silent:
print("No match found for '{cmp}' - leaving in library '{lib}'".format(cmp = cmp.name, lib=lib_name))
unallocated_symbols.append(lib_name + ' : ' + cmp.name)
continue
# Too many matches!
if len(matches) > 1:
overallocated_symbols.append(lib_name + ' : ' + cmp.name)
continue
match = matches[0]
out_lib = output_lib(match)
out_lib.addComponent(cmp)
allocated_symbols += 1
if not args.silent:
print("{lib} : {name} -> {out}".format(lib=lib_name, name=cmp.name, out=match))
# Save the converted libraries
for key in output_libs:
lib = output_libs[key]
if real_mode:
lib.save()
if len(unallocated_symbols) > 0:
print("\nUnallocated Symbols:")
for s in unallocated_symbols:
print(s)
if len(overallocated_symbols) > 0:
print("\nOverallocated Symbols:")
for s in overallocated_symbols:
print(s)
remaining = len(unallocated_symbols) + len(overallocated_symbols)
print("")
print("Allocated Symbols: {x}".format(x=allocated_symbols))
if remaining > 0:
print("Symbols remaining: {x}".format(x=remaining))
else:
print("No symbols remaining! You did well.") | /robotframework-kicadlibrary-2.0.1.tar.gz/robotframework-kicadlibrary-2.0.1/src/KiCadLibrary/kicad_library_utils/schlib/move_symbols.py | 0.531696 | 0.210138 | move_symbols.py | pypi |
from __future__ import print_function
from schlib import *
import argparse
# cases covered by this script:
# (1) resize pins with posx wrong if component has pins with L direction but not R direction
# (2) resize pins with posx wrong if component has pins with R direction but not L direction
# (3) resize pins with posy wrong if component has pins with U direction but not D direction
# (4) resize pins with posy wrong if component has pins with D direction but not U direction
# (5) resize pins with posx wrong if component has at least one pin wrong in each of the following direction: L, R
# (6) resize pins with posy wrong if component has at least one pin wrong in each of the following direction: U, D
class CheckComponent(object):
def __init__(self, component):
self.component = component
self.prerequisites_ok = False
self.header_printed = False
self.pinsL = component.filterPins(direction='L')
self.pinsL_count = len(self.pinsL)
self.pinsR = component.filterPins(direction='R')
self.pinsR_count = len(self.pinsR)
self.pinsU = component.filterPins(direction='U')
self.pinsU_count = len(self.pinsU)
self.pinsD = component.filterPins(direction='D')
self.pinsD_count = len(self.pinsD)
self.need_fix_L = False
self.need_fix_R = False
self.need_fix_U = False
self.need_fix_D = False
## check the prerequisites
# component has only one rectangle
# assuming it as the body of the component
if len(component.draw['rectangles']) != 1:
return
# all pins L and R have the same size
lengths = []
if self.pinsL_count > 0:
lengths += [pin['length'] for pin in self.pinsL]
if self.pinsR_count > 0:
lengths += [pin['length'] for pin in self.pinsR]
if lengths and lengths.count(lengths[0]) != len(lengths):
return
# all pins U and D have the same size
lengths = []
if self.pinsU_count > 0:
lengths += [pin['length'] for pin in self.pinsU]
if self.pinsD_count > 0:
lengths += [pin['length'] for pin in self.pinsD]
if lengths and lengths.count(lengths[0]) != len(lengths):
return
# pins length have to be multiple of 50mil
for pin in component.pins:
if (int(pin['length']) % 50) != 0:
return
# pins posx and posy have to be multiple of 50mil
for pin in component.pins:
if (int(pin['posx']) % 50) != 0 or (int(pin['posy']) % 50) != 0:
return
# check if at least one pin is wrong in each direction
if self.pinsL_count > 0 and self.pinsR_count > 0:
for pin in self.pinsL:
posx = int(pin['posx'])
if (posx % 100) != 0:
self.need_fix_L = True
break
for pin in self.pinsR:
posx = int(pin['posx'])
if (posx % 100) != 0:
self.need_fix_R = True
break
if self.pinsU_count > 0 and self.pinsD_count > 0:
for pin in self.pinsU:
posy = int(pin['posy'])
if (posy % 100) != 0:
self.need_fix_U = True
break
for pin in self.pinsD:
posy = int(pin['posy'])
if (posy % 100) != 0:
self.need_fix_D = True
break
self.prerequisites_ok = True
def print_header(self):
if not self.header_printed:
print('\tcomponent: %s' % component.name)
self.header_printed = True
def resize_pin(self, pin, new_len, pos, new_pos):
self.print_header()
print('\t\t[resize] pin: %s (%s), length: %s -> %i, %s: %s -> %i' %
(pin['name'], pin['num'], pin['length'], new_len, pos, pin[pos], new_pos))
pin['length'] = str(new_len)
pin[pos] = str(new_pos)
def resize_component_pins(component):
component = CheckComponent(component)
# case (1)
if component.pinsL_count > 0 and component.pinsR_count == 0:
for pin in component.pinsL:
posx = int(pin['posx'])
length = int(pin['length'])
if (posx % 100) != 0:
if length <= 100:
length += 50
posx += 50
elif length >= 150:
length -= 50
posx -= 50
component.resize_pin(pin, length, 'posx', posx)
# case (2)
if component.pinsR_count > 0 and component.pinsL_count == 0:
for pin in component.pinsR:
posx = int(pin['posx'])
length = int(pin['length'])
if (posx % 100) != 0:
if length <= 100:
length += 50
posx -= 50
elif length >= 150:
length -= 50
posx += 50
component.resize_pin(pin, length, 'posx', posx)
# case (3)
if component.pinsU_count > 0 and component.pinsD_count == 0:
for pin in component.pinsU:
posy = int(pin['posy'])
length = int(pin['length'])
if (posy % 100) != 0:
if length <= 100:
length += 50
posy -= 50
elif length >= 150:
length -= 50
posy += 50
component.resize_pin(pin, length, 'posy', posy)
# case (4)
if component.pinsD_count > 0 and component.pinsU_count == 0:
for pin in component.pinsD:
posy = int(pin['posy'])
length = int(pin['length'])
if (posy % 100) != 0:
if length <= 100:
length += 50
posy += 50
elif length >= 150:
length -= 50
posy -= 50
component.resize_pin(pin, length, 'posy', posy)
# case (5)
if component.need_fix_L and component.need_fix_R:
for pin in (component.pinsL + component.pinsR):
posx = int(pin['posx'])
length = int(pin['length'])
if length <= 100:
length += 50
posx += 50 if posx > 0 else -50
elif length >= 150:
length -= 50
posx += -50 if posx > 0 else 50
component.resize_pin(pin, length, 'posx', posx)
# case (6)
if component.need_fix_U and component.need_fix_D:
for pin in (component.pinsU + component.pinsD):
posy = int(pin['posy'])
length = int(pin['length'])
if length <= 100:
length += 50
posy += 50 if posy > 0 else -50
elif length >= 150:
length -= 50
posy += -50 if posy > 0 else 50
component.resize_pin(pin, length, 'posy', posy)
return component.header_printed
parser = argparse.ArgumentParser()
parser.add_argument('libfiles', nargs='+')
parser.add_argument('-y', '--apply', help='Apply the suggested modifications in the report', action='store_true')
parser.add_argument('-v', '--verbose', help='Print output for all pins - violating or not', action='store_true')
args = parser.parse_args()
for libfile in args.libfiles:
lib = SchLib(libfile)
print('library: %s' % libfile)
for component in lib.components:
component_printed = resize_component_pins(component)
if not component_printed:
if args.verbose:
print('\tcomponent: %s......OK' % component.name)
if args.apply:
lib.save() | /robotframework-kicadlibrary-2.0.1.tar.gz/robotframework-kicadlibrary-2.0.1/src/KiCadLibrary/kicad_library_utils/schlib/fix-pins.py | 0.627381 | 0.485722 | fix-pins.py | pypi |
from __future__ import print_function
from schlib import *
import argparse, sys
# cases covered by this script:
# (1) resize field text sizes that are not 50mils
class CheckComponent(object):
def __init__(self, component):
self.component = component
self.prerequisites_ok = False
self.header_printed = False
self.fieldsToFix = []
self.pinTextsToFix = []
self.pinNumsToFix = []
# field text sizes have to be 50mils
for field in component.fields:
if int(field['text_size']) != 50:
self.fieldsToFix.append(field)
# pin text sizes have to be 50mils
for pin in component.pins:
if int(pin['name_text_size']) > 50:
self.pinTextsToFix.append(pin)
if int(pin['num_text_size']) > 50:
self.pinNumsToFix.append(pin)
self.prerequisites_ok = True
def print_header(self):
if not self.header_printed:
print('\tcomponent: %s' % component.name)
self.header_printed = True
def resize_field(self, field):
self.print_header()
print('\t\t[resize] field size: %s -> %i' %
(field['text_size'], 50))
field['text_size'] = "50"
def resize_pin_name_text(self, pin):
self.print_header()
print('\t\t[resize] pin name text size: %s -> %i' %
(pin['name_text_size'], 50))
pin['name_text_size'] = "50"
def resize_pin_num_text(self, pin):
self.print_header()
print('\t\t[resize] pin num text size: %s -> %i' %
(pin['num_text_size'], 50))
pin['num_text_size'] = "50"
def resize_component_fields(component):
component = CheckComponent(component)
# The 1st case that needs fixing is a field text size different than 50mils
for field in component.fieldsToFix:
size = int(field['text_size'])
if size != 0:
component.resize_field(field)
# The 2nd case that needs fixing is a pin text size over 50mils
for pin in component.pinTextsToFix:
size = int(pin['name_text_size'])
if size != 0:
component.resize_pin_name_text(pin)
# The 3rd case that needs fixing is a pin num size over 50mils
for pin in component.pinNumsToFix:
size = int(pin['num_text_size'])
if size != 0:
component.resize_pin_num_text(pin)
return component.header_printed
parser = argparse.ArgumentParser(description='Moves a component symbol between libraries')
parser.add_argument('libfiles', nargs='+')
parser.add_argument('-y', '--apply', help='Apply the suggested modifications in the report', action='store_true')
parser.add_argument('-v', '--verbose', help='Print output for all pins - violating or not', action='store_true')
args = parser.parse_args()
for libfile in args.libfiles:
lib = SchLib(libfile)
print('library: %s' % libfile)
for component in lib.components:
component_printed = resize_component_fields(component)
if not component_printed:
if args.verbose:
print('\tcomponent: %s......OK' % component.name)
if args.apply:
lib.save() | /robotframework-kicadlibrary-2.0.1.tar.gz/robotframework-kicadlibrary-2.0.1/src/KiCadLibrary/kicad_library_utils/schlib/fix-text-sizes.py | 0.453504 | 0.226602 | fix-text-sizes.py | pypi |
import json
import re
import ssl
import urllib3
from os import environ
from kubernetes import client, config, dynamic, stream
from robot.api import logger
from robot.api.deco import library
from string import digits, ascii_lowercase
from random import choices
from KubeLibrary.exceptions import BearerTokenWithPrefixException
from KubeLibrary.version import version
# supressing SSL warnings when using self-signed certs
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
class DynamicClient(dynamic.DynamicClient):
@property
def api_client(self):
return self.client
@library(scope="GLOBAL", version=version, auto_keywords=True)
class KubeLibrary:
"""KubeLibrary is a Robot Framework test library for Kubernetes.
The approach taken by this library is to provide easy to access kubernetes objects representation that can
be then accessed to define highlevel keywords for tests.
= Kubeconfigs =
By default ~/.kube/config is used. Kubeconfig location
can also be passed by setting KUBECONFIG environment variable or as Library argument.
| ***** Settings *****
| Library KubeLibrary /path/to/kubeconfig
= Context =
By default current context from kubeconfig is used. Setting multiple contexts in
different test suites allows working on multiple clusters.
| ***** Settings *****
| Library KubeLibrary context=k3d-k3d-cluster2
= Bearer token authentication =
It is possible to authenticate using bearer token by passing API url, bearer token and optionally CA certificate.
| ***** Settings *****
| Library KubeLibrary api_url=%{K8S_API_URL} bearer_token=%{K8S_TOKEN} ca_cert=%{K8S_CA_CRT}
= In cluster execution =
If tests are supposed to be executed from within cluster, KubeLibrary can be configured to use standard
token authentication. Just set incluster parameter to True.
= Auth methods precedence =
If enabled, auth methods takes precedence in following order:
1. Incluster
2. Bearer Token
3. Kubeconfig
| ***** Settings *****
| Library KubeLibrary None True
"""
def __init__(self, kube_config=None, context=None, api_url=None, bearer_token=None,
ca_cert=None, incluster=False, cert_validation=True):
"""KubeLibrary can be configured with several optional arguments.
- ``kube_config``:
Path pointing to kubeconfig of target Kubernetes cluster.
- ``context``:
Active context. If None current_context from kubeconfig is used.
- ``api_url``:
K8s API url, used for bearer token authenticaiton.
- ``bearer_token``:
Bearer token, used for bearer token authenticaiton. Do not include 'Bearer ' prefix.
- ``ca_cert``:
Optional CA certificate file path, used for bearer token authenticaiton.
- ``incuster``:
Default False. Indicates if used from within k8s cluster. Overrides kubeconfig.
- ``cert_validation``:
Default True. Can be set to False for self-signed certificates.
Environment variables:
- INIT_FOR_LIBDOC_ONLY:
Set to '1' to generate keyword documentation and skip to load a kube config..
"""
if "1" == environ.get('INIT_FOR_LIBDOC_ONLY', "0"):
return
self.reload_config(kube_config=kube_config, context=context, api_url=api_url, bearer_token=bearer_token,
ca_cert=ca_cert, incluster=incluster, cert_validation=cert_validation)
@staticmethod
def get_proxy():
return environ.get('https_proxy') or environ.get('HTTPS_PROXY') or environ.get('http_proxy') or environ.get('HTTP_PROXY')
@staticmethod
def get_no_proxy():
return environ.get('no_proxy') or environ.get('NO_PROXY')
@staticmethod
def generate_alphanumeric_str(size):
"""Generates a random alphanumeric string with given size.
Returns a string.
- ``size``:
Desired size of the output string
"""
return "".join(choices(ascii_lowercase + digits, k=size))
@staticmethod
def evaluate_callable_from_k8s_client(attr_name, *args, **kwargs):
"""Evaluates a callable from kubernetes client.
Returns the output of the client callable.
- ``attr_name``:
Callable name
- ``*args``:
Positional arguments for argument forwarding
- ``**kwargs``:
Keyword arguments for argument forwarding
"""
attr = getattr(client, attr_name, None)
assert callable(attr), f"kubernetes.client does not contain {attr_name}!"
return attr(*args, **kwargs)
def get_dynamic_resource(self, api_version, kind):
"""Returns a dynamic resource based on the provided api version and kind.
- ``api_version``:
Api version of the desired kubernetes resource
- ``kind``:
Kind of the desired kubernetes resource
"""
return self.dynamic.resources.get(api_version=api_version, kind=kind)
def get(self, api_version, kind, **kwargs):
"""Retrieves resource instances based on the provided parameters.
Can be optionally given a ``namespace``, ``name``, ``label_selector``, ``body`` and ``field_selector``.
Returns a resource list.
- ``api_version``:
Api version of the desired kubernetes resource
- ``kind``:
Kind of the desired kubernetes resource
- ``**kwargs``:
Keyword arguments for argument forwarding
"""
resource = self.get_dynamic_resource(api_version, kind)
return resource.get(**kwargs)
def create(self, api_version, kind, **kwargs):
"""Creates resource instances based on the provided configuration.
If the resource is namespaced (ie, not cluster-level), then one of ``namespace``, ``label_selector``, or ``field_selector`` is required.
If the resource is cluster-level, then one of ``name``, ``label_selector``, or ``field_selector`` is required.
Can be optionally given a kubernetes manifest (``body``) which respects the above considerations.
- ``api_version``:
Api version of the desired kubernetes resource
- ``kind``:
Kind of the desired kubernetes resource
- ``**kwargs``:
Keyword arguments for argument forwarding
"""
resource = self.get_dynamic_resource(api_version, kind)
resource.create(**kwargs)
def delete(self, api_version, kind, **kwargs):
"""Deletes resource instances based on the provided configuration.
Can be optionally given a ``namespace``, ``name``, ``label_selector``, ``body`` and ``field_selector``.
- ``api_version``:
Api version of the desired kubernetes resource
- ``kind``:
Kind of the desired kubernetes resource
- ``**kwargs``:
Keyword arguments for argument forwarding
"""
resource = self.get_dynamic_resource(api_version, kind)
resource.delete(**kwargs)
def patch(self, api_version, kind, **kwargs):
"""Patches resource instances based on the provided parameters.
Can be optionally given a ``namespace``, ``name``, ``label_selector``, ``body`` and ``field_selector``.
- ``api_version``:
Api version of the desired kubernetes resource
- ``kind``:
Kind of the desired kubernetes resource
- ``**kwargs``:
Keyword arguments for argument forwarding
"""
resource = self.get_dynamic_resource(api_version, kind)
resource.patch(**kwargs)
def replace(self, api_version, kind, **kwargs):
"""Replaces resource instances based on the provided parameters.
Can be optionally given a ``namespace``, ``name``, ``label_selector``, ``body`` and ``field_selector``.
- ``api_version``:
Api version of the desired kubernetes resource
- ``kind``:
Kind of the desired kubernetes resource
- ``**kwargs``:
Keyword arguments for argument forwarding
"""
resource = self.get_dynamic_resource(api_version, kind)
resource.replace(**kwargs)
def reload_config(self, kube_config=None, context=None, api_url=None, bearer_token=None, ca_cert=None, incluster=False, cert_validation=True):
"""Reload the KubeLibrary to be configured with different optional arguments.
This can be used to connect to a different cluster during the same test.
- ``kube_config``:
Path pointing to kubeconfig of target Kubernetes cluster.
- ``context``:
Active context. If None current_context from kubeconfig is used.
- ``api_url``:
K8s API url, used for bearer token authenticaiton.
- ``bearer_token``:
Bearer token, used for bearer token authenticaiton. Do not include 'Bearer ' prefix.
- ``ca_cert``:
Optional CA certificate file path, used for bearer token authenticaiton.
- ``incuster``:
Default False. Indicates if used from within k8s cluster. Overrides kubeconfig.
- ``cert_validation``:
Default True. Can be set to False for self-signed certificates.
Environment variables:
- HTTP_PROXY:
Proxy URL
"""
self.api_client = None
self.cert_validation = cert_validation
if incluster:
try:
config.load_incluster_config()
except config.config_exception.ConfigException as e:
logger.error('Are you sure tests are executed from within k8s cluster?')
raise e
elif api_url and bearer_token:
if bearer_token.startswith('Bearer '):
raise BearerTokenWithPrefixException
configuration = client.Configuration()
configuration._default.proxy = KubeLibrary.get_proxy()
configuration._default.no_proxy = KubeLibrary.get_no_proxy()
configuration.api_key["authorization"] = bearer_token
configuration.api_key_prefix['authorization'] = 'Bearer'
configuration.host = api_url
configuration.ssl_ca_cert = ca_cert
self.api_client = client.ApiClient(configuration)
else:
try:
config.load_kube_config(kube_config, context)
client.Configuration._default.proxy = KubeLibrary.get_proxy()
client.Configuration._default.no_proxy = KubeLibrary.get_no_proxy()
except TypeError:
logger.error('Neither KUBECONFIG nor ~/.kube/config available.')
if not self.api_client:
self.api_client = client.ApiClient(configuration=client.Configuration().get_default_copy())
self._add_api('v1', client.CoreV1Api)
self._add_api('networkingv1api', client.NetworkingV1Api)
self._add_api('batchv1', client.BatchV1Api)
self._add_api('appsv1', client.AppsV1Api)
# self._add_api('batchv1_beta1', client.BatchV1Api)
self._add_api('custom_object', client.CustomObjectsApi)
self._add_api('rbac_authv1_api', client.RbacAuthorizationV1Api)
self._add_api('autoscalingv1', client.AutoscalingV1Api)
self._add_api('dynamic', DynamicClient)
def _add_api(self, reference, class_name):
self.__dict__[reference] = class_name(self.api_client)
if not self.cert_validation:
self.__dict__[reference].api_client.rest_client.pool_manager.connection_pool_kw['cert_reqs'] = ssl.CERT_NONE
def k8s_api_ping(self):
"""Performs GET on /api/v1/ for simple check of API availability.
Returns tuple of (response data, response status, response headers). Can be used as prerequisite in tests.
"""
path_params = {}
query_params = []
header_params = {}
auth_settings = ['BearerToken']
resp = self.v1.api_client.call_api('/api/v1/', 'GET',
path_params,
query_params,
header_params,
response_type='str',
auth_settings=auth_settings,
async_req=False,
_return_http_data_only=False)
return resp
def list_namespace(self, label_selector=""):
"""Lists available namespaces.
Can be optionally filtered by label. e.g. label_selector=label_key=label_value
Returns list of namespaces.
"""
ret = self.v1.list_namespace(watch=False, label_selector=label_selector)
return ret.items
def get_namespaces(self, label_selector=""):
"""*DEPRECATED* Will be removed in v1.0.0. Use list_namespace.
Gets a list of available namespaces.
Can be optionally filtered by label. e.g. label_selector=label_key=label_value
Returns list of namespaces names.
"""
return self.filter_names(self.list_namespace(label_selector=label_selector))
def get_healthy_nodes_count(self, label_selector=""):
"""Counts node with KubeletReady and status True.
Can be optionally filtered by label. e.g. label_selector=label_key=label_value
Can be used to check number of healthy nodes. Can be used as prerequisite in tests.
"""
ret = self.v1.list_node(watch=False, label_selector=label_selector)
healthy_nods = []
for item in ret.items:
for condition in item.status.conditions:
if condition.reason == 'KubeletReady' and condition.status == 'True':
healthy_nods.append(item.metadata.name)
return len(healthy_nods)
def get_pod_names_in_namespace(self, name_pattern, namespace, label_selector=""):
"""*DEPRECATED* Will be removed in v1.0.0. Use list_namespaced_pod_by_pattern.
Gets pod name matching pattern in given namespace.
Can be optionally filtered by label. e.g. label_selector=label_key=label_value
Returns list of strings.
- ``name_pattern``:
Pod name pattern to check
- ``namespace``:
Namespace to check
"""
ret = self.v1.list_namespaced_pod(namespace, watch=False, label_selector=label_selector)
r = re.compile(name_pattern + '.*')
return [item.metadata.name for item in ret.items if r.match(item.metadata.name)]
def list_namespaced_pod_by_pattern(self, name_pattern, namespace, label_selector=""):
"""List pods matching pattern in given namespace.
Can be optionally filtered by label. e.g. label_selector=label_key=label_value
Returns list of pods.
- ``name_pattern``:
Pod name pattern to check
- ``namespace``:
Namespace to check
"""
ret = self.v1.list_namespaced_pod(namespace, watch=False, label_selector=label_selector)
r = re.compile(name_pattern)
pods = [item for item in ret.items if r.match(item.metadata.name)]
return pods
def get_pods_in_namespace(self, name_pattern, namespace, label_selector=""):
"""*DEPRECATED* Will be removed in v1.0.0. Use list_namespaced_pod_by_pattern.
Gets pods matching pattern in given namespace.
Can be optionally filtered by label. e.g. label_selector=label_key=label_value
Returns list of pods.
- ``name_pattern``:
Pod name pattern to check
- ``namespace``:
Namespace to check
"""
ret = self.v1.list_namespaced_pod(namespace, watch=False, label_selector=label_selector)
r = re.compile(name_pattern)
pods = [item for item in ret.items if r.match(item.metadata.name)]
return pods
def read_namespaced_pod_log(self, name, namespace, container):
"""Gets container logs of given pod in given namespace.
Returns logs.
- ``name``:
Pod name to check
- ``namespace``:
Namespace to check
- ``container``:
Container to check
"""
pod_logs = self.v1.read_namespaced_pod_log(name=name, namespace=namespace, container=container, follow=False)
return pod_logs
def get_pod_logs(self, name, namespace, container):
"""*DEPRECATED* Will be removed in v1.0.0. Use read_namespaced_pod_log.
Gets container logs of given pod in given namespace.
Returns logs.
- ``name``:
Pod name to check
- ``namespace``:
Namespace to check
- ``container``:
Container to check
"""
pod_logs = self.v1.read_namespaced_pod_log(name=name, namespace=namespace, container=container, follow=False)
return pod_logs
def list_namespaced_config_map_by_pattern(self, name_pattern, namespace, label_selector=""):
"""Lists configmaps matching pattern in given namespace.
Can be optionally filtered by label. e.g. label_selector=label_key=label_value
Returns list of configmaps.
- ``name_pattern``:
configmap name pattern to check
- ``namespace``:
Namespace to check
"""
ret = self.v1.list_namespaced_config_map(namespace, watch=False, label_selector=label_selector)
r = re.compile(name_pattern)
configmaps = [item for item in ret.items if r.match(item.metadata.name)]
return configmaps
def get_configmaps_in_namespace(self, name_pattern, namespace, label_selector=""):
"""*DEPRECATED* Will be removed in v1.0.0. Use list_namespaced_config_map_by_pattern.
Gets configmaps matching pattern in given namespace.
Can be optionally filtered by label. e.g. label_selector=label_key=label_value
Returns list of configmaps.
- ``name_pattern``:
configmap name pattern to check
- ``namespace``:
Namespace to check
"""
ret = self.v1.list_namespaced_config_map(namespace, watch=False, label_selector=label_selector)
r = re.compile(name_pattern)
configmaps = [item for item in ret.items if r.match(item.metadata.name)]
return configmaps
def list_namespaced_service_account_by_pattern(self, name_pattern, namespace, label_selector=""):
"""Lists service accounts matching pattern in given namespace.
Can be optionally filtered by label. e.g. label_selector=label_key=label_value
Returns list of service accounts.
- ``name_pattern``:
Service Account name pattern to check
- ``namespace``:
Namespace to check
"""
ret = self.v1.list_namespaced_service_account(namespace, watch=False, label_selector=label_selector)
r = re.compile(name_pattern)
service_accounts = [item for item in ret.items if r.match(item.metadata.name)]
return service_accounts
def get_service_accounts_in_namespace(self, name_pattern, namespace, label_selector=""):
"""*DEPRECATED* Will be removed in v1.0.0. Use list_namespaced_service_account_by_pattern.
Gets service accounts matching pattern in given namespace.
Can be optionally filtered by label. e.g. label_selector=label_key=label_value
Returns list of service accounts.
- ``name_pattern``:
Service Account name pattern to check
- ``namespace``:
Namespace to check
"""
ret = self.v1.list_namespaced_service_account(namespace, watch=False, label_selector=label_selector)
r = re.compile(name_pattern)
service_accounts = [item for item in ret.items if r.match(item.metadata.name)]
return service_accounts
def list_namespaced_deployment_by_pattern(self, name_pattern, namespace, label_selector=""):
"""Gets deployments matching pattern in given namespace.
Can be optionally filtered by label. e.g. label_selector=label_key=label_value
Returns list of deployments.
- ``name_pattern``:
deployment name pattern to check
- ``namespace``:
Namespace to check
"""
ret = self.appsv1.list_namespaced_deployment(namespace, watch=False, label_selector=label_selector)
r = re.compile(name_pattern)
deployments = [item for item in ret.items if r.match(item.metadata.name)]
return deployments
def get_deployments_in_namespace(self, name_pattern, namespace, label_selector=""):
"""*DEPRECATED* Will be removed in v1.0.0. Use list_namespaced_deployment_by_pattern.
Gets deployments matching pattern in given namespace.
Can be optionally filtered by label. e.g. label_selector=label_key=label_value
Returns list of deployments.
- ``name_pattern``:
deployment name pattern to check
- ``namespace``:
Namespace to check
"""
ret = self.appsv1.list_namespaced_deployment(namespace, watch=False, label_selector=label_selector)
r = re.compile(name_pattern)
deployments = [item for item in ret.items if r.match(item.metadata.name)]
return deployments
def list_namespaced_replica_set_by_pattern(self, name_pattern, namespace, label_selector=""):
"""Lists replicasets matching pattern in given namespace.
Can be optionally filtered by label. e.g. label_selector=label_key=label_value
Returns list of replicasets.
- ``name_pattern``:
replicaset name pattern to check
- ``namespace``:
Namespace to check
"""
ret = self.appsv1.list_namespaced_replica_set(namespace, watch=False, label_selector=label_selector)
r = re.compile(name_pattern)
replicasets = [item for item in ret.items if r.match(item.metadata.name)]
return replicasets
def get_replicasets_in_namespace(self, name_pattern, namespace, label_selector=""):
"""*DEPRECATED* Will be removed in v1.0.0. Use list_namespaced_replica_set_by_pattern.
Gets replicasets matching pattern in given namespace.
Can be optionally filtered by label. e.g. label_selector=label_key=label_value
Returns list of replicasets.
- ``name_pattern``:
replicaset name pattern to check
- ``namespace``:
Namespace to check
"""
ret = self.appsv1.list_namespaced_replica_set(namespace, watch=False, label_selector=label_selector)
r = re.compile(name_pattern)
replicasets = [item for item in ret.items if r.match(item.metadata.name)]
return replicasets
def list_namespaced_job_by_pattern(self, name_pattern, namespace, label_selector=""):
"""Gets jobs matching pattern in given namespace.
Can be optionally filtered by label. e.g. label_selector=label_key=label_value
Returns list of jobs.
- ``name_pattern``:
job name pattern to check
- ``namespace``:
Namespace to check
"""
ret = self.batchv1.list_namespaced_job(namespace, watch=False, label_selector=label_selector)
r = re.compile(name_pattern)
jobs = [item for item in ret.items if r.match(item.metadata.name)]
return jobs
def get_jobs_in_namespace(self, name_pattern, namespace, label_selector=""):
"""*DEPRECATED* Will be removed in v1.0.0. Use list_namespaced_job_by_pattern.
Gets jobs matching pattern in given namespace.
Can be optionally filtered by label. e.g. label_selector=label_key=label_value
Returns list of jobs.
- ``name_pattern``:
job name pattern to check
- ``namespace``:
Namespace to check
"""
ret = self.batchv1.list_namespaced_job(namespace, watch=False, label_selector=label_selector)
r = re.compile(name_pattern)
jobs = [item for item in ret.items if r.match(item.metadata.name)]
return jobs
def list_namespaced_secret_by_pattern(self, name_pattern, namespace, label_selector=""):
"""Lists secrets matching pattern in given namespace.
Can be optionally filtered by label. e.g. label_selector=label_key=label_value
Returns list of secrets.
- ``name_pattern``:
secret name pattern to check
- ``namespace``:
Namespace to check
"""
ret = self.v1.list_namespaced_secret(namespace, watch=False, label_selector=label_selector)
r = re.compile(name_pattern)
secrets = [item for item in ret.items if r.match(item.metadata.name)]
return secrets
def get_secrets_in_namespace(self, name_pattern, namespace, label_selector=""):
"""*DEPRECATED* Will be removed in v1.0.0. Use list_namespaced_secret_by_pattern.
Gets secrets matching pattern in given namespace.
Can be optionally filtered by label. e.g. label_selector=label_key=label_value
Returns list of secrets.
- ``name_pattern``:
secret name pattern to check
- ``namespace``:
Namespace to check
"""
ret = self.v1.list_namespaced_secret(namespace, watch=False, label_selector=label_selector)
r = re.compile(name_pattern)
secrets = [item for item in ret.items if r.match(item.metadata.name)]
return secrets
def get_namespaced_pod_exec(self, name, namespace, argv_cmd, container=None):
"""Exec command on selected container for POD.
Returns command stdout/stderr
- ``name``:
pod name
- ``namespace``:
namespace to check
- ``argv_cmd``:
command to be executed using argv syntax: ["/bin/sh", "-c", "ls"]
it do not use shell as default!
- ``container``:
container on which we run exec, default: None
"""
if not isinstance(argv_cmd, list) or not len(argv_cmd):
raise TypeError(
f"argv_cmd parameter should be a list and contains values like [\"/bin/bash\", \"-c\", \"ls\"] "
f"not {argv_cmd}")
if not container:
return stream.stream(self.v1.connect_get_namespaced_pod_exec,
name,
namespace,
command=argv_cmd,
stderr=True,
stdin=True,
stdout=True,
tty=False).strip()
else:
return stream.stream(self.v1.connect_get_namespaced_pod_exec,
name,
namespace,
container=container,
command=argv_cmd,
stderr=True,
stdin=True,
stdout=True,
tty=False).strip()
def filter_names(self, objects):
"""Filter .metadata.name for list of k8s objects.
Returns list of strings.
- ``objects``:
List of k8s objects
"""
return [obj.metadata.name for obj in objects]
def filter_by_key(self, objects, key, match):
"""Filter object with key matching value for list of k8s objects.
Returns list of objects.
- ``objects``:
List of k8s objects
- ``key``:
Key to match
- ``match``:
Value of the key based on which objects will be included
"""
return [obj for obj in objects if getattr(obj, key) == match]
def filter_deployments_names(self, deployments):
"""*DEPRECATED* Will be removed in v1.0.0. See examples in TBD.
Returns list of strings.
- ``deployments``:
List of deployments objects
"""
return self.filter_names(deployments)
def filter_replicasets_names(self, replicasets):
"""*DEPRECATED* Will be removed in v1.0.0. See examples in TBD.
Returns list of strings.
- ``replicasets``:
List of replicasets objects
"""
return self.filter_names(replicasets)
def filter_pods_names(self, pods):
"""*DEPRECATED* Will be removed in v1.0.0. See examples in TBD.
Filter pod names for list of pods.
Returns list of strings.
- ``pods``:
List of pods objects
"""
return self.filter_names(pods)
def filter_service_accounts_names(self, service_accounts):
"""*DEPRECATED* Will be removed in v1.0.0. See examples in TBD.
Filter service accounts names for list of service accounts.
Returns list of strings.
- ``service_accounts``:
List of service accounts objects
"""
return self.filter_names(service_accounts)
def filter_configmap_names(self, configmaps):
"""*DEPRECATED* Will be removed in v1.0.0. See examples in TBD.
Filter configmap names for list of configmaps.
Returns list of strings.
- ``configmaps``:
List of configmap objects
"""
return self.filter_names(configmaps)
def filter_endpoints_names(self, endpoints):
"""Filter endpoints names for list of endpoints.
Returns list of strings.
- ``endpoints``:
List of endpoint objects
"""
return self.filter_names(endpoints.items)
@staticmethod
def filter_pods_containers_by_name(pods, name_pattern):
"""Filters pods containers by name for given list of pods.
Returns lists of containers (flattens).
- ``pods``:
List of pods objects
"""
containers = []
r = re.compile(name_pattern)
for pod in pods:
for container in pod.spec.containers:
if r.match(container.name):
containers.append(container)
return containers
@staticmethod
def filter_containers_images(containers):
"""Filters container images for given lists of containers.
Returns list of images.
- ``containers``:
List of containers
"""
return [container.image for container in containers]
@staticmethod
def filter_containers_resources(containers):
"""Filters container resources for given lists of containers.
Returns list of resources.
- ``containers``:
List of containers
"""
return [container.resources for container in containers]
@staticmethod
def filter_pods_containers_statuses_by_name(pods, name_pattern):
"""Filters pods containers statuses by container name for given list of pods.
Returns lists of containers statuses.
- ``pods``:
List of pods objects
"""
container_statuses = []
r = re.compile(name_pattern)
for pod in pods:
for container_status in pod.status.container_statuses:
if r.match(container_status.name):
container_statuses.append(container_status)
return container_statuses
def read_namespaced_pod_status(self, name, namespace):
"""Reads pod status in given namespace.
- ``name``:
Name of pod.
- ``namespace``:
Namespace to check
"""
ret = self.v1.read_namespaced_pod_status(name, namespace)
return ret.status
def get_pod_status_in_namespace(self, name, namespace):
"""*DEPRECATED* Will be removed in v1.0.0. Use read_namespaced_pod_status.
- ``name``:
Name of pod.
- ``namespace``:
Namespace to check
"""
ret = self.v1.read_namespaced_pod_status(name, namespace)
return ret.status.phase
@staticmethod
def assert_pod_has_labels(pod, labels_json):
"""Assert pod has labels.
Returns True/False
- ``pod``:
Pod object.
- ``labels_json``:
JSON representing labels
"""
try:
labels = json.loads(labels_json)
for k, v in labels.items():
if pod.metadata.labels and k in pod.metadata.labels:
if pod.metadata.labels[k] != v:
logger.error(f'Label "{k}" value "{v}" not matching actual "{pod.metadata.labels[k]}"')
return False
else:
logger.error(f'Label "{k}" not found in actual')
return False
return True
except json.JSONDecodeError:
logger.error(f'Failed parsing Pod Labels JSON:{labels_json}')
return False
@staticmethod
def assert_pod_has_annotations(pod, annotations_json):
"""Assert pod has annotations.
Returns True/False
- ``pod``:
Pod object.
- ``annotations_json``:
JSON representing annotations
"""
try:
annotations = json.loads(annotations_json)
for k, v in annotations.items():
if pod.metadata.annotations and k in pod.metadata.annotations:
if pod.metadata.annotations[k] != v:
logger.error(f'Annotation "{k}" value "{v}" not matching actual "{pod.metadata.annotations[k]}"')
return False
else:
logger.error(f'Annotation "{k}" not found in actual')
return False
return True
except json.JSONDecodeError:
logger.error(f'Failed parsing Pod Annotations JSON:{annotations_json}')
return False
@staticmethod
def assert_container_has_env_vars(container, env_vars_json):
"""Assert container has env vars.
Returns True/False
- ``container``:
Container object.
- ``env_var_json``:
JSON representing env vars i.e.: {"EXAMPLE_VAR": "examplevalue"}
"""
try:
env_vars = json.loads(env_vars_json)
for k, v in env_vars.items():
found = False
for ev in container.env:
if k == ev.name and v == ev.value:
found = True
break
elif k == ev.name and v != ev.value:
logger.error(f'Env var "{k}" value "{v}" not matching actual "{ev.value}"')
return False
if not found:
logger.error(f'Env var "{k}" not found in actual')
return False
return True
except json.JSONDecodeError:
logger.error(f'Failed parsing Container Env Var JSON:{env_vars_json}')
return False
def list_namespaced_service(self, namespace, label_selector=""):
"""Gets services in given namespace.
Can be optionally filtered by label. e.g. label_selector=label_key=label_value
Returns list of strings.
- ``namespace``:
Namespace to check
"""
ret = self.v1.list_namespaced_service(namespace, watch=False, label_selector=label_selector)
return [item for item in ret.items]
def get_services_in_namespace(self, namespace, label_selector=""):
"""*DEPRECATED* Will be removed in v1.0.0. Use list_namespaced_service.
Gets services in given namespace.
Can be optionally filtered by label. e.g. label_selector=label_key=label_value
Returns list of strings.
- ``namespace``:
Namespace to check
"""
ret = self.v1.list_namespaced_service(namespace, watch=False, label_selector=label_selector)
return [item.metadata.name for item in ret.items]
def read_namespaced_service(self, name, namespace):
"""Gets service details in given namespace.
Returns Service object representation. Can be accessed using
| Should Be Equal As integers | ${service_details.spec.ports[0].port} | 8080 |
- ``name``:
Name of service.
- ``namespace``:
Namespace to check
"""
ret = self.v1.read_namespaced_service(name, namespace)
return ret
def get_service_details_in_namespace(self, name, namespace):
"""*DEPRECATED* Will be removed in v1.0.0. Use read_namespaced_service.
Gets service details in given namespace.
Returns Service object representation. Can be accessed using
| Should Be Equal As integers | ${service_details.spec.ports[0].port} | 8080 |
- ``name``:
Name of service.
- ``namespace``:
Namespace to check
"""
ret = self.v1.read_namespaced_service(name, namespace)
return ret
def list_namespaced_horizontal_pod_autoscaler(self, namespace, label_selector=""):
"""Gets Horizontal Pod Autoscalers in given namespace.
Can be optionally filtered by label. e.g. label_selector=label_key=label_value
Returns list of strings.
- ``namespace``:
Namespace to check
"""
ret = self.autoscalingv1.list_namespaced_horizontal_pod_autoscaler(namespace, watch=False, label_selector=label_selector)
return [item for item in ret.items]
def get_hpas_in_namespace(self, namespace, label_selector=""):
"""*DEPRECATED* Will be removed in v1.0.0. Use list_namespaced_horizontal_pod_autoscaler.
Gets Horizontal Pod Autoscalers in given namespace.
Can be optionally filtered by label. e.g. label_selector=label_key=label_value
Returns list of strings.
- ``namespace``:
Namespace to check
"""
ret = self.autoscalingv1.list_namespaced_horizontal_pod_autoscaler(namespace, watch=False, label_selector=label_selector)
return [item.metadata.name for item in ret.items]
def read_namespaced_horizontal_pod_autoscaler(self, name, namespace):
"""Gets Horizontal Pod Autoscaler details in given namespace.
Returns Horizontal Pod Autoscaler object representation. Can be accessed using
| Should Be Equal As integers | ${hpa_details.spec.target_cpu_utilization_percentage} | 50 |
- ``name``:
Name of Horizontal Pod Autoscaler
- ``namespace``:
Namespace to check
"""
ret = self.autoscalingv1.read_namespaced_horizontal_pod_autoscaler(name, namespace)
return ret
def get_hpa_details_in_namespace(self, name, namespace):
"""*DEPRECATED* Will be removed in v1.0.0. Use list_namespaced_horizontal_pod_autoscaler.
Gets Horizontal Pod Autoscaler details in given namespace.
Returns Horizontal Pod Autoscaler object representation. Can be accessed using
| Should Be Equal As integers | ${hpa_details.spec.target_cpu_utilization_percentage} | 50 |
- ``name``:
Name of Horizontal Pod Autoscaler
- ``namespace``:
Namespace to check
"""
ret = self.autoscalingv1.read_namespaced_horizontal_pod_autoscaler(name, namespace)
return ret
def read_namespaced_endpoints(self, name, namespace):
"""Gets endpoint details in given namespace.
Returns Endpoint object representation. Can be accessed using
| Should Match | ${endpoint_details.subsets[0].addresses[0].target_ref.name} | pod-name-123456 |
- ``name``:
Name of endpoint.
- ``namespace``:
Namespace to check
"""
ret = self.v1.read_namespaced_endpoints(name, namespace)
return ret
def get_endpoints_in_namespace(self, name, namespace):
"""*DEPRECATED* Will be removed in v1.0.0. Use read_namespaced_endpoints.
Gets endpoint details in given namespace.
Returns Endpoint object representation. Can be accessed using
| Should Match | ${endpoint_details.subsets[0].addresses[0].target_ref.name} | pod-name-123456 |
- ``name``:
Name of endpoint.
- ``namespace``:
Namespace to check
"""
ret = self.v1.read_namespaced_endpoints(name, namespace)
return ret
def list_namespaced_persistent_volume_claim(self, namespace, label_selector=""):
"""Gets pvcs in given namespace.
Can be optionally filtered by label. e.g. label_selector=label_key=label_value
Returns list of strings.
- ``namespace``:
Namespace to check
"""
ret = self.v1.list_namespaced_persistent_volume_claim(namespace, watch=False, label_selector=label_selector)
return [item for item in ret.items]
def list_namespaced_persistent_volume_claim_by_pattern(self, name_pattern, namespace, label_selector=""):
"""Gets pvcs in given namespace.
Can be optionally filtered by label. e.g. label_selector=label_key=label_value
Returns list of strings.
- ``namespace``:
Namespace to check
- ``name_pattern``:
pvc name pattern to check
"""
ret = self.v1.list_namespaced_persistent_volume_claim(namespace, watch=False, label_selector=label_selector)
r = re.compile(name_pattern)
return [item for item in ret.items if r.match(item.metadata.name)]
def list_namespaced_stateful_set(self, namespace, label_selector=""):
"""Lists statefulsets in given namespace.
Can be optionally filtered by label. e.g. label_selector=label_key=label_value
Returns list of statefulsets.
- ``namespace``:
Namespace to check
"""
ret = self.appsv1.list_namespaced_stateful_set(namespace, watch=False, label_selector=label_selector)
return [item for item in ret.items]
def list_namespaced_stateful_set_by_pattern(self, name_pattern, namespace, label_selector=""):
"""Lists statefulsets matching pattern in given namespace.
Can be optionally filtered by label. e.g. label_selector=label_key=label_value
Returns list of statefulsets.
- ``namespace``:
Namespace to check
- ``name_pattern``:
statefulset name pattern to check
"""
ret = self.appsv1.list_namespaced_stateful_set(namespace, watch=False, label_selector=label_selector)
r = re.compile(name_pattern)
statefulsets = [item for item in ret.items if r.match(item.metadata.name)]
return statefulsets
def get_pvc_in_namespace(self, namespace, label_selector=""):
"""*DEPRECATED* Will be removed in v1.0.0. Use list_namespaced_persistent_volume_claim.
Gets pvcs in given namespace.
Can be optionally filtered by label. e.g. label_selector=label_key=label_value
Returns list of strings.
- ``namespace``:
Namespace to check
"""
ret = self.v1.list_namespaced_persistent_volume_claim(namespace, watch=False, label_selector=label_selector)
return [item.metadata.name for item in ret.items]
def read_namespaced_persistent_volume_claim(self, name, namespace):
"""Gets PVC details in given namespace.
Returns PVC object representation. Can be accessed using
| Should Be Equal As strings | ${pvc.status.capacity.storage} | 1Gi |
- ``name``:
Name of PVC.
- ``namespace``:
Namespace to check
"""
ret = self.v1.read_namespaced_persistent_volume_claim(name, namespace)
return ret
def get_pvc_capacity(self, name, namespace):
"""*DEPRECATED* Will be removed in v1.0.0. Use read_namespaced_persistent_volume_claim.
Gets PVC details in given namespace.
Returns PVC object representation. Can be accessed using
| Should Be Equal As strings | ${pvc.status.capacity.storage} | 1Gi |
- ``name``:
Name of PVC.
- ``namespace``:
Namespace to check
"""
ret = self.v1.read_namespaced_persistent_volume_claim(name, namespace)
return ret
def get_kubelet_version(self, label_selector=""):
"""Gets list of kubelet versions on each node.
Can be optionally filtered by label. e.g. label_selector=label_key=label_value
Returns list of strings.
"""
ret = self.v1.list_node(watch=False, label_selector=label_selector)
return [item.status.node_info.kubelet_version for item in ret.items]
def create_namespaced_service_account(self, namespace, body):
"""Creates service account in a namespace
Returns created service account
- ``body``:
Service Account object.
- ``namespace``:
Namespace to check
"""
ret = self.v1.create_namespaced_service_account(namespace=namespace, body=body)
return ret
def create_service_account_in_namespace(self, namespace, body):
"""*DEPRECATED* Will be removed in v1.0.0. Use create_namespaced_service_account.
Creates service account in a namespace
Returns created service account
- ``body``:
Service Account object.
- ``namespace``:
Namespace to check
"""
ret = self.v1.create_namespaced_service_account(namespace=namespace, body=body)
return ret
def delete_namespaced_service_account(self, name, namespace):
"""Deletes service account in a namespace
Returns V1status
- ``name``:
Service Account name
- ``namespace``:
Namespace to check
"""
ret = self.v1.delete_namespaced_service_account(name=name, namespace=namespace)
return ret
def delete_service_account_in_namespace(self, name, namespace):
"""*DEPRECATED* Will be removed in v1.0.0. Use delete_namespaced_service_account.
Deletes service account in a namespace
Returns V1status
- ``name``:
Service Account name
- ``namespace``:
Namespace to check
"""
ret = self.v1.delete_namespaced_service_account(name=name, namespace=namespace)
return ret
def get_healthcheck(self, endpoint='/readyz', verbose=False):
"""Performs GET on /readyz or /livez for simple health check.
Can be used to verify the readiness/current status of the API server
Returns tuple of (response data, response status and response headers)
- ``endpoint``:
/readyz, /livez or induvidual endpoints like '/livez/etcd'. defaults to /readyz
- ``verbose``:
More detailed output.
https://kubernetes.io/docs/reference/using-api/health-checks
"""
path_params = {}
query_params = []
header_params = {}
auth_settings = ['BearerToken']
if not (endpoint.startswith('/readyz') or endpoint.startswith('/livez')):
raise RuntimeError(f'{endpoint} does not start with "/readyz" or "/livez"')
endpoint = endpoint if not verbose else endpoint + '?verbose'
resp = self.v1.api_client.call_api(endpoint, 'GET',
path_params,
query_params,
header_params,
response_type='str',
auth_settings=auth_settings,
async_req=False,
_return_http_data_only=False)
return resp
def list_namespaced_ingress(self, namespace, label_selector=""):
"""Gets ingresses in given namespace.
Can be optionally filtered by label. e.g. label_selector=label_key=label_value
Returns list of strings.
- ``namespace``:
Namespace to check
"""
ret = self.networkingv1api.list_namespaced_ingress(namespace, watch=False, label_selector=label_selector)
return [item for item in ret.items]
def get_ingresses_in_namespace(self, namespace, label_selector=""):
"""*DEPRECATED* Will be removed in v1.0.0. Use list_namespaced_ingress.
Gets ingresses in given namespace.
Can be optionally filtered by label. e.g. label_selector=label_key=label_value
Returns list of strings.
- ``namespace``:
Namespace to check
"""
ret = self.networkingv1api.list_namespaced_ingress(namespace, watch=False, label_selector=label_selector)
return [item.metadata.name for item in ret.items]
def read_namespaced_ingress(self, name, namespace):
"""Gets ingress details in given namespace.
Returns Ingress object representation.
Name of ingress.
- ``namespace``:
Namespace to check
"""
ret = self.networkingv1api.read_namespaced_ingress(name, namespace)
return ret
def get_ingress_details_in_namespace(self, name, namespace):
"""*DEPRECATED* Will be removed in v1.0.0. Use read_namespaced_ingress.
Gets ingress details in given namespace.
Returns Ingress object representation.
Name of ingress.
- ``namespace``:
Namespace to check
"""
ret = self.networkingv1api.read_namespaced_ingress(name, namespace)
return ret
def list_namespaced_cron_job(self, namespace, label_selector=""):
"""Gets cron jobs in given namespace.
Can be optionally filtered by label. e.g. label_selector=label_key=label_value
Returns list of strings.
- ``namespace``:
Namespace to check
"""
ret = self.batchv1.list_namespaced_cron_job(namespace, watch=False, label_selector=label_selector)
return [item for item in ret.items]
def get_cron_jobs_in_namespace(self, namespace, label_selector=""):
"""*DEPRECATED* Will be removed in v1.0.0. Use list_namespaced_cron_job.
Gets cron jobs in given namespace.
Can be optionally filtered by label. e.g. label_selector=label_key=label_value
Returns list of strings.
- ``namespace``:
Namespace to check
"""
ret = self.batchv1.list_namespaced_cron_job(namespace, watch=False, label_selector=label_selector)
return [item.metadata.name for item in ret.items]
def read_namespaced_cron_job(self, name, namespace):
"""Gets cron job details in given namespace.
Returns Cron job object representation.
- ``name``:
Name of cron job.
- ``namespace``:
Namespace to check
"""
ret = self.batchv1.read_namespaced_cron_job(name, namespace)
return ret
def get_cron_job_details_in_namespace(self, name, namespace):
"""*DEPRECATED* Will be removed in v1.0.0. Use read_namespaced_cron_job.
Gets cron job details in given namespace.
Returns Cron job object representation.
- ``name``:
Name of cron job.
- ``namespace``:
Namespace to check
"""
ret = self.batchv1.read_namespaced_cron_job(name, namespace)
return ret
def list_namespaced_daemon_set(self, namespace, label_selector=""):
"""Gets a list of available daemonsets.
Can be optionally filtered by label. e.g. label_selector=label_key=label_value
Returns list of deaemonsets.
- ``namespace``:
Namespace to check
"""
ret = self.appsv1.list_namespaced_daemon_set(namespace, watch=False, label_selector=label_selector)
return [item for item in ret.items]
def get_daemonsets_in_namespace(self, namespace, label_selector=""):
"""*DEPRECATED* Will be removed in v1.0.0. Use list_namespaced_daemon_set.
Gets a list of available daemonsets.
Can be optionally filtered by label. e.g. label_selector=label_key=label_value
Returns list of deaemonsets.
- ``namespace``:
Namespace to check
"""
ret = self.appsv1.list_namespaced_daemon_set(namespace, watch=False, label_selector=label_selector)
return [item.metadata.name for item in ret.items]
def read_namespaced_daemon_set(self, name, namespace):
"""Gets deamonset details in given namespace.
Returns daemonset object representation.
- ``name``:
Name of the daemonset
- ``namespace``:
Namespace to check
"""
ret = self.appsv1.read_namespaced_daemon_set(name, namespace)
return ret
def get_daemonset_details_in_namespace(self, name, namespace):
"""*DEPRECATED* Will be removed in v1.0.0. Use read_namespaced_daemon_set.
Gets deamonset details in given namespace.
Returns daemonset object representation.
- ``name``:
Name of the daemonset
- ``namespace``:
Namespace to check
"""
ret = self.appsv1.read_namespaced_daemon_set(name, namespace)
return ret
def list_cluster_role(self):
"""Gets a list of cluster_roles.
Returns list of cluster_roles.
"""
ret = self.rbac_authv1_api.list_cluster_role(watch=False)
return [item for item in ret.items]
def get_cluster_roles(self):
"""*DEPRECATED* Will be removed in v1.0.0. Use list_cluster_role.
Gets a list of cluster_roles.
Returns list of cluster_roles.
"""
ret = self.rbac_authv1_api.list_cluster_role(watch=False)
return [item.metadata.name for item in ret.items]
def list_cluster_role_binding(self):
"""Gets a list of cluster_role_bindings.
Returns list of cluster_role_bindings.
"""
ret = self.rbac_authv1_api.list_cluster_role_binding(watch=False)
return [item for item in ret.items]
def get_cluster_role_bindings(self):
"""*DEPRECATED* Will be removed in v1.0.0. Use list_cluster_role_binding.
Gets a list of cluster_role_bindings.
Returns list of cluster_role_bindings.
"""
ret = self.rbac_authv1_api.list_cluster_role_binding(watch=False)
return [item.metadata.name for item in ret.items]
def list_namespaced_role(self, namespace):
"""Gets roles in given namespace.
Returns list of roles.
- ``namespace``:
Namespace to check
"""
ret = self.rbac_authv1_api.list_namespaced_role(namespace, watch=False)
return [item for item in ret.items]
def get_roles_in_namespace(self, namespace):
"""*DEPRECATED* Will be removed in v1.0.0. Use list_namespaced_role.
Gets roles in given namespace.
Returns list of roles.
- ``namespace``:
Namespace to check
"""
ret = self.rbac_authv1_api.list_namespaced_role(namespace, watch=False)
return [item.metadata.name for item in ret.items]
def list_namespaced_role_binding(self, namespace):
"""Gets role_bindings in given namespace.
Returns list of role_bindings.
- ``namespace``:
Namespace to check
"""
ret = self.rbac_authv1_api.list_namespaced_role_binding(namespace, watch=False)
return [item for item in ret.items]
def get_role_bindings_in_namespace(self, namespace):
"""*DEPRECATED* Will be removed in v1.0.0. Use list_namespaced_role_binding.
Gets role_bindings in given namespace.
Returns list of role_bindings.
- ``namespace``:
Namespace to check
"""
ret = self.rbac_authv1_api.list_namespaced_role_binding(namespace, watch=False)
return [item.metadata.name for item in ret.items]
def list_cluster_custom_object(self, group, version, plural):
"""Lists cluster level custom objects.
Returns an object.
- ``group``:
API Group, e.g. 'k8s.cni.cncf.io'
- ``version``:
API version, e.g. 'v1'
- ``plural``:
e.g. 'network-attachment-definitions'
As in ``GET /apis/{group}/{version}/{plural}``
https://github.com/kubernetes-client/python/blob/master/kubernetes/README.md
"""
return self.custom_object.list_cluster_custom_object(group, version, plural)
def list_cluster_custom_objects(self, group, version, plural):
"""*DEPRECATED* Will be removed in v1.0.0. Use list_cluster_custom_object.
Lists cluster level custom objects.
Returns an object.
- ``group``:
API Group, e.g. 'k8s.cni.cncf.io'
- ``version``:
API version, e.g. 'v1'
- ``plural``:
e.g. 'network-attachment-definitions'
As in ``GET /apis/{group}/{version}/{plural}``
https://github.com/kubernetes-client/python/blob/master/kubernetes/README.md
"""
return self.custom_object.list_cluster_custom_object(group, version, plural)
def get_cluster_custom_object(self, group, version, plural, name):
"""Get cluster level custom object.
Returns an object.
- ``group``:
API Group, e.g. 'scheduling.k8s.io'
- ``version``:
API version, e.g. 'v1'
- ``plural``:
e.g. 'priorityclasses'
- ``name``:
e.g. 'system-node-critical'
As in ``GET /apis/{group}/{version}/{plural}/{name}``
https://github.com/kubernetes-client/python/blob/master/kubernetes/README.md
"""
return self.custom_object.get_cluster_custom_object(group, version, plural, name)
def get_namespaced_custom_object(self, group, version, namespace, plural, name):
"""Get custom object in namespace.
Returns an object.
- ``group``:
API Group, e.g. 'k8s.cni.cncf.io'
- ``version``:
API version, e.g. 'v1'
- ``namespace``:
Namespace, e.g. 'default'
- ``plural``:
e.g. 'network-attachment-definitions'
- ``name``:
e.g. 'my-network'
As in ``GET /apis/{group}/{version}/namespaces/{namespace}/{plural}/{name}``
https://github.com/kubernetes-client/python/blob/master/kubernetes/README.md
"""
return self.custom_object.get_namespaced_custom_object(group, version, namespace, plural, name)
def list_namespaced_custom_object(self, group, version, namespace, plural):
"""List custom objects in namespace.
Returns an object.
- ``group``:
API Group, e.g. 'k8s.cni.cncf.io'
- ``version``:
API version, e.g. 'v1'
- ``namespace``:
Namespace, e.g. 'default'
- ``plural``:
e.g. 'network-attachment-definitions'
As in ``GET /apis/{group}/{version}/namespaces/{namespace}/{plural}``
https://github.com/kubernetes-client/python/blob/master/kubernetes/README.md
"""
return self.custom_object.list_namespaced_custom_object(group, version, namespace, plural)
def get_custom_object_in_namespace(self, group, version, namespace, plural, name):
"""*DEPRECATED* Will be removed in v1.0.0. Use get_namespaced_custom_object.
Get custom object in namespace.
Returns an object.
- ``group``:
API Group, e.g. 'k8s.cni.cncf.io'
- ``version``:
API version, e.g. 'v1'
- ``namespace``:
Namespace, e.g. 'default'
- ``plural``:
e.g. 'network-attachment-definitions'
- ``name``:
e.g. 'my-network'
As in ``GET /apis/{group}/{version}/namespaces/{namespace}/{plural}/{name}``
https://github.com/kubernetes-client/python/blob/master/kubernetes/README.md
"""
return self.custom_object.get_namespaced_custom_object(group, version, namespace, plural, name)
def create_namespaced_cron_job(self, namespace, body):
"""Creates cron_job in a namespace
Returns created cron_job
- ``body``:
Cron_job object.
- ``namespace``:
Namespace to check
"""
ret = self.batchv1.create_namespaced_cron_job(namespace=namespace, body=body)
return ret
def create_cron_job_in_namespace(self, namespace, body):
"""*DEPRECATED* Will be removed in v1.0.0. Use create_namespaced_cron_job.
Creates cron_job in a namespace
Returns created cron_job
- ``body``:
Cron_job object.
- ``namespace``:
Namespace to check
"""
ret = self.batchv1.create_namespaced_cron_job(namespace=namespace, body=body)
return ret
def delete_namespaced_cron_job(self, name, namespace):
"""Deletes cron_job in a namespace
Returns V1 status
- ``name``:
Cron Job name
- ``namespace``:
Namespace to check
"""
ret = self.batchv1.delete_namespaced_cron_job(name=name, namespace=namespace)
return ret
def delete_cron_job_in_namespace(self, name, namespace):
"""*DEPRECATED* Will be removed in v1.0.0. Use delete_namespaced_cron_job.
Deletes cron_job in a namespace
Returns V1 status
- ``name``:
Cron Job name
- ``namespace``:
Namespace to check
"""
ret = self.batchv1.delete_namespaced_cron_job(name=name, namespace=namespace)
return ret | /robotframework_kubelibrary-0.8.6-py3-none-any.whl/KubeLibrary/KubeLibrary.py | 0.772445 | 0.162513 | KubeLibrary.py | pypi |
from .version import VERSION
from .keywords import *
from .utils import LibraryListener
__version__ = VERSION
class LDTPLibrary(LoggingKeywords,
RunOnFailureKeywords,
LDTPKeywords,
ScreenshotKeywords,
TableKeywords):
"""
LDTPLibrary is a gui application testing library for Robot Framework.
It uses the LDTP (Linux Desktop Test Project) libraries internally to control a gui application.
See http://ldtp.freedesktop.org/wiki/ for more information on LDTP.
Author: John.Wang <wywincl@gmail.com>
Examples:
| *Settings* | *Value* |
| Library | LDTPLibrary |
| *Variables* | *Value* |
| ${APP_NAME} | gnome-calculator |
| ${FRM_NAME} | frmCalculator |
| *Test Cases* | *Action* | *Argument* | *Arguments* |
| Example_Test | Launch App | ${APP_NAME} | |
| | Click | ${FRM_NAME} | btn1 |
"""
ROBOT_LIBRARY_SCOPE = 'GLOBAL'
ROBOT_LIBRARY_VERSION = VERSION
def __init__(self,
run_on_failure='Nothing',
screenshot_root_directory=None,
*args):
"""
LDTPLibrary can be imported with optional arguments.
:param [run_on_failure]: specifies the name of a keyword (from any available
libraries) to execute when a LDTPLibrary keyword fails. By default 'Nothing' will be used.
"Nothing" will disable this feature altogether. See `Register Keywords To Run On Failure` keyword
for more information about this functionality.
Examples:
| Library `|` LDTPLibrary `|` run_on_failure = Log Source | # run `Log Source` on failure |
| Library `|` LDTPLibrary `|` run_on_failure = Capture Screenshot | # run `Capture Screenshot` on failure |
| Library `|` LDTPLibrary `|` run_on_failure = Capture Screenshot `|` out_file=/tmp/shot.png | # run `Capture Screenshot` on failure |
| Library `|` LDTPLibrary `|` run_on_failure = Nothing | # does nothing on failure |
| Library `|` LDTPLibrary `|` run_on_failure = Capture Windows Screenshot `|` screenshot_root_directory=/tmp/ | # run `Capture Windows Screenshot ` on failure |
"""
for base in LDTPLibrary.__bases__:
base.__init__(self)
self.screenshot_root_directory = screenshot_root_directory
self.register_keyword_to_run_on_failure(run_on_failure, *args)
self.ROBOT_LIBRARY_LISTENER = LibraryListener() | /robotframework-ldtplibrary-10.2.2.tar.gz/robotframework-ldtplibrary-10.2.2/src/LDTPLibrary/__init__.py | 0.637482 | 0.16398 | __init__.py | pypi |
import sys
try:
import ldtp
except ImportError:
if sys.platform != "darwin":
raise
import atomac.ldtp as ldtp
from .keywordgroup import KeywordGroup
from ._exception import LdtpError
try:
from ldtp.client_exception import LdtpExecutionError
except ImportError:
if sys.platform != "darwin":
raise
from atomac.ldtp.client_exception import LdtpExecutionError
class TableKeywords(KeywordGroup):
def __init__(self):
self._client = ldtp
def double_click_row_index(self, window_name, object_name, row_index, col_index=0):
"""
Double click row matching given text
@param window_name: Window name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type object_name: string
@param row_index: Row index to click
@type row_index: integer
@param col_index: Column index to click
@type col_index: integer
@return: row index matching the text on success.
@rtype: integer
"""
self._info("double click row index, col index (%d,%d) of [%s, %s]" % (row_index, col_index,
window_name, object_name))
try:
self._client.doubleclickrowindex(window_name, object_name, row_index, col_index)
except LdtpError as e:
raise LdtpError(e.message)
def select_row_partial_match(self, window_name, object_name, row_text):
"""
Select row partial match
@param window_name: Window name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type object_name: string
@param row_text: Row text to select
@type row_text: string
@return: 1 on success.
@rtype: integer
"""
try:
self._info("select row partial match (%s, %s, %s) " % (window_name, object_name, row_text))
return ldtp.selectrowpartialmatch(window_name, object_name, row_text)
except LdtpExecutionError as e:
raise LdtpExecutionError(e.message)
def multi_select(self, window_name, object_name, row_text_list,
partial_match=False):
"""
Select multiple row
@param window_name: Window name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type object_name: string
@param row_text_list: Row list with matching text to select
@type row_text_list: string
@param partial_match:
@return: 1 on success.
@rtype: integer
"""
try:
self._info("multi select (%s, %s, %s) " % (window_name, object_name, row_text_list))
return ldtp.multiselect(window_name, object_name, row_text_list)
except LdtpExecutionError as e:
raise LdtpExecutionError(e.message)
def multi_remove(self, window_name, object_name, row_text_list,
partial_match=False):
"""
Remove multiple row
@param window_name: Window name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type object_name: string
@param row_text_list: Row list with matching text to select
@type row_text_list: string
Args:
partial_match:
@return: 1 on success.
@rtype: integer
"""
try:
self._info("multi remove (%s, %s, %s) " % (window_name, object_name, row_text_list))
return ldtp.multiremove(window_name, object_name, row_text_list)
except LdtpExecutionError as e:
raise LdtpExecutionError(e.message)
def select_row_index(self, window_name, object_name, row_index):
"""
Select row index
@param window_name: Window name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type object_name: string
@param row_index: Row index to select
@type row_index: integer
@return: 1 on success.
@rtype: integer
"""
try:
self._info("select row index (%s, %s, %d)" % (window_name, object_name, row_index))
return ldtp.selectrowindex(window_name, object_name, row_index)
except LdtpExecutionError as e:
raise LdtpExecutionError(e.message)
def select_last_row(self, window_name, object_name):
"""
Select last row
@param window_name: Window name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type object_name: string
@return: 1 on success.
@rtype: integer
"""
try:
self._info("select last row of (%s, %s)" % (window_name, object_name))
return ldtp.selectlastrow(window_name, object_name)
except LdtpExecutionError as e:
raise LdtpExecutionError(e.message)
def set_cell_value(self, window_name, object_name, row_index,
column=0, data=None):
"""
Set cell value
@param window_name: Window name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type object_name: string
@param row_index: Row index to get
@type row_index: integer
@param column: Column index to get, default value 0
@type column: integer
@param data: data, default value None
None, used for toggle button
@type data: string
@return: 1 on success.
@rtype: integer
"""
try:
self._info("set cell value (%s, %s, data=%s)" % (window_name, object_name, data))
return ldtp.setcellvalue(window_name, object_name, row_index, column, data)
except LdtpExecutionError as e:
raise LdtpExecutionError(e.message)
def get_cell_value(self, window_name, object_name, row_index, column=0):
"""
Get cell value
@param window_name: Window name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type object_name: string
@param row_index: Row index to get
@type row_index: integer
@param column: Column index to get, default value 0
@type column: integer
@return: cell value on success.
@rtype: string
"""
try:
self._info("get cell value (%s, %s, %d)" % (window_name, object_name, row_index))
return ldtp.getcellvalue(window_name, object_name, row_index, column)
except LdtpExecutionError as e:
raise LdtpExecutionError(e.message)
def get_cell_size(self, window_name, object_name, row_index, column=0):
"""
Get cell size
@param window_name: Window name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type object_name: string
@param row_index: Row index to get
@type row_index: integer
@param column: Column index to get, default value 0
@type column: integer
@return: x, y, width, height on success.
@rtype: list
"""
try:
self._info("get cell size (%s, %s, %d)" % (window_name, object_name, row_index))
return ldtp.getcellsize(window_name, object_name, row_index, column)
except LdtpExecutionError as e:
raise LdtpExecutionError(e.message)
def right_click(self, window_name, object_name, row_text):
"""
Right click on table cell
@param window_name: Window name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type object_name: string
@param row_text: Row text to click
@type row_text: string
@return: 1 on success.
@rtype: integer
"""
try:
self._info("right click row object (%s, %s, %s)" % (window_name, object_name, row_text))
return ldtp.rightclick(window_name, object_name, row_text)
except LdtpExecutionError as e:
raise LdtpExecutionError(e.message)
def check_row(self, window_name, object_name, row_index, column=0):
"""
Check row
@param window_name: Window name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type object_name: string
@param row_index: Row index to get
@type row_index: integer
@param column: Column index to get, default value 0
@type column: integer
@return: cell value on success.
@rtype: string
"""
try:
self._info("check row at (%s, %s, %d)" % (window_name, object_name, row_index))
return ldtp.checkrow(window_name, object_name, row_index, column)
except LdtpExecutionError as e:
raise LdtpExecutionError(e.message)
def expand_table_cell(self, window_name, object_name, row_index, column=0):
"""
Expand or contract table cell
@param window_name: Window name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type object_name: string
@param row_index: Row index to get
@type row_index: integer
@param column: Column index to get, default value 0
@type column: integer
@return: cell value on success.
@rtype: string
"""
try:
self._info("expend table cell at (%s, %s, %d)" % (window_name, object_name, row_index))
return ldtp.expendtablecell(window_name, object_name, row_index, column)
except LdtpExecutionError as e:
raise LdtpExecutionError(e.message)
def uncheck_row(self, window_name, object_name, row_index, column=0):
"""
Check row
@param window_name: Window name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type object_name: string
@param row_index: Row index to get
@type row_index: integer
@param column: Column index to get, default value 0
@type column: integer
@return: 1 on success.
@rtype: integer
"""
try:
self._info("uncheck row at (%s, %s, %d)" % (window_name, object_name, row_index))
return ldtp.uncheckrow(window_name, object_name, row_index, column)
except LdtpExecutionError as e:
raise LdtpExecutionError(e.message)
def get_table_row_index(self, window_name, object_name, row_text):
"""
Get table row index matching given text
@param window_name: Window name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type object_name: string
@param row_text: Row text to select
@type row_text: string
@return: row index matching the text on success.
@rtype: integer
"""
try:
self._info("get table row with index (%s, %s, %s)" % (window_name, object_name, row_text))
return ldtp.gettablerowindex(window_name, object_name, row_text)
except LdtpExecutionError as e:
raise LdtpExecutionError(e.message)
def single_click_row(self, window_name, object_name, row_text):
"""
Single click row matching given text
@param window_name: Window name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type object_name: string
@param row_text: Row text to select
@type row_text: string
@return: row index matching the text on success.
@rtype: integer
"""
try:
self._info("single click row (%s, %s, %s)" % (window_name, object_name, row_text))
return ldtp.singleclickrow(window_name, object_name, row_text)
except LdtpExecutionError as e:
raise LdtpExecutionError(e.message)
def double_click_row(self, window_name, table_name, row_text):
"""
Double click row matching given text
:param window_name:
:param table_name:
:param row_text:
:return:
"""
try:
self._info("double click row matching given text")
return ldtp.doubleclickrow(window_name, table_name, row_text)
except LdtpExecutionError:
raise LdtpExecutionError("Double click row failed")
def verify_table_cell(self, window_name, object_name, row_index,
column_index, row_text):
"""
Verify table cell value with given text
@param window_name: Window name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type object_name: string
@param row_index: Row index to get
@type row_index: integer
@param column_index: Column index to get, default value 0
@type column_index: integer
@param row_text: Row text to match
@type row_text: string
@return: 1 on success 0 on failure.
@rtype: integer
"""
try:
self._info("verify table cell text ")
return ldtp.verifytablecell(window_name, object_name, row_index, column_index, row_text)
except LdtpExecutionError as e:
raise LdtpExecutionError(e.message)
def does_row_exist(self, window_name, object_name, row_text,
partial_match=False):
"""
Verify table cell value with given text
@param window_name: Window name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type object_name: string
@param row_text: Row text to match
@type row_text: string
@param partial_match: Find partial match strings
@type partial_match:boolean
@return: 1 on success 0 on failure.
@rtype: integer
"""
try:
self._info("Does row exist (%s, %s, %s)" % (window_name, object_name, row_text))
return ldtp.doesrowexist(window_name, object_name, row_text, partial_match)
except LdtpExecutionError as e:
raise LdtpExecutionError(e.message)
def verify_partial_table_cell(self, window_name, object_name, row_index,
column_index, row_text):
"""
Verify partial table cell value
@param window_name: Window name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type object_name: string
@param row_index: Row index to get
@type row_index: integer
@param column_index: Column index to get, default value 0
@type column_index: integer
@param row_text: Row text to match
@type row_text: string
@return: 1 on success 0 on failure.
@rtype: integer
"""
try:
self._info("verify partial table cell ...")
return ldtp.verifypartialtablecell(window_name, object_name, row_index, column_index, row_text)
except LdtpExecutionError as e:
raise LdtpExecutionError(e.message) | /robotframework-ldtplibrary-10.2.2.tar.gz/robotframework-ldtplibrary-10.2.2/src/LDTPLibrary/keywords/_table.py | 0.486819 | 0.153011 | _table.py | pypi |
import os
import errno
import robot
from LDTPLibrary import utils
from .keywordgroup import KeywordGroup
class ScreenshotKeywords(KeywordGroup):
def __init__(self):
self._screenshot_index = {}
self._screenshot_path_stack = []
self.screenshot_root_directory = None
# Public
def set_screenshot_directory(self, path, persist=False):
"""Sets the root output directory for captured screenshots.
``path`` argument specifies the absolute path where the screenshots
should be written to. If the specified ``path`` does not exist,
it will be created. Setting ``persist`` specifies that the given
``path`` should be used for the rest of the test execution, otherwise
the path will be restored at the end of the currently executing scope.
"""
path = os.path.abspath(path)
self._create_directory(path)
if persist is False:
self._screenshot_path_stack.append(self.screenshot_root_directory)
# Restore after current scope ends
utils.events.on('scope_end', 'current',
self._restore_screenshot_directory)
self.screenshot_root_directory = path
def capture_windows_screenshot(self, window_name=None,
filename='ldtp-screenshot-{index}.png'):
"""Takes a screenshot of the current windows and embeds it into the log.
if window_name is none ,it will takes a screenshot of the whole desktop.
``filename`` argument specifies the name of the file to write the
screenshot into. If no ``filename`` is given, the screenshot is saved
into file ldtp-screenshot-{index}.png_ under the directory where
the Robot Framework log file is written into. The ``filename`` is
also considered relative to the same directory, if it is not
given in absolute format. If an absolute or relative path is given
but the path does not exist it will be created.
Starting from LDTPLibrary 10.1.6 if ``filename`` contains _{index}_
characters, it will be automatically replaced with running index.
The running index is unique for each different filename. The absolute
path of the saved screenshot is always returned and it does not depend
does the ``filename`` contain _{index}_. See example 1 and 2 for more
details.
The _{index}_ is replaced with the actual index by using Python's
[https://docs.python.org/2/library/stdtypes.html#str.format|
str.format] method, and it can be formatted using the standard
[https://docs.python.org/2/library/string.html#format-string-syntax|
format string syntax]. The example 3 shows this by setting the width and
the fill character.
If there is a need to write literal _{index}_ or if ``filename``
contains _{_ or _}_ characters, then the braces must be doubled.
Example 1:
| ${file1} = | Capture Windows Screenshot |
| File Should Exist | ${OUTPUTDIR}${/}ldtp-screenshot-1.png |
| Should Be Equal | ${file1} | ${OUTPUTDIR}${/}ldtp-screenshot-1.png |
| ${file2} = | Capture Windows Screenshot |
| File Should Exist | ${OUTPUTDIR}${/}ldtp-screenshot-2.png |
| Should Be Equal | ${file2} | ${OUTPUTDIR}${/}ldtp-screenshot-2.png |
Example 2:
| ${file1} = | Capture Windows Screenshot | ${OTHER_DIR}${/}other-{index}-name.png |
| ${file2} = | Capture Windows Screenshot | ${OTHER_DIR}${/}some-other-name-{index}.png |
| ${file3} = | Capture Windows Screenshot | ${OTHER_DIR}${/}other-{index}-name.png |
| File Should Exist | ${OTHER_DIR}${/}other-1-name.png |
| Should Be Equal | ${file1} | ${OTHER_DIR}${/}other-1-name.png |
| File Should Exist | ${OTHER_DIR}${/}some-other-name-1.png |
| Should Be Equal | ${file2} | ${OTHER_DIR}${/}some-other-name-1.png |
| File Should Exist | ${OTHER_DIR}${/}other-2-name.png |
| Should Be Equal | ${file3} | ${OTHER_DIR}${/}other-2-name.png |
Example 3:
| Capture Windows Screenshot | ${OTHER_DIR}${/}sc-{index:06}.png |
| File Should Exist | ${OTHER_DIR}${/}sc-000001.png |
"""
path, link = self._get_screenshot_paths(filename)
self._create_directory(path)
if hasattr(self._client, 'imagecapture'):
if not self._client.imagecapture(window_name=window_name, out_file=path):
raise RuntimeError('Failed to save screenshot ' + link)
else:
if not self._client.imagecapture(window_name=window_name, out_file=path):
raise RuntimeError('Failed to save screenshot ' + link)
# Image is shown on its own row and thus prev row is closed on purpose
self._html('</td></tr><tr><td colspan="3"><a href="%s">'
'<img src="%s" width="800px"></a>' % (link, link))
return path
# Private
def _create_directory(self, path):
target_dir = os.path.dirname(path)
if not os.path.exists(target_dir):
try:
os.makedirs(target_dir)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(target_dir):
pass
else:
raise
def _get_screenshot_directory(self):
# Use screenshot root directory if set
if self.screenshot_root_directory is not None:
return self.screenshot_root_directory
# Otherwise use RF's log directory
return self._get_log_dir()
# should only be called by set_screenshot_directory
def _restore_screenshot_directory(self):
self.screenshot_root_directory = self._screenshot_path_stack.pop()
def _get_screenshot_paths(self, filename):
filename = filename.format(
index=self._get_screenshot_index(filename))
filename = filename.replace('/', os.sep)
screenshotdir = self._get_screenshot_directory()
logdir = self._get_log_dir()
path = os.path.join(screenshotdir, filename)
link = robot.utils.get_link_path(path, logdir)
return path, link
def _get_screenshot_index(self, filename):
if filename not in self._screenshot_index:
self._screenshot_index[filename] = 0
self._screenshot_index[filename] += 1
return self._screenshot_index[filename] | /robotframework-ldtplibrary-10.2.2.tar.gz/robotframework-ldtplibrary-10.2.2/src/LDTPLibrary/keywords/_screenshot.py | 0.61231 | 0.244566 | _screenshot.py | pypi |
from encodings import utf_8
import os
import sys
import shutil
import glob
import argparse
from datetime import datetime
import robot.libdoc
def toc(links, timestamp, home_page_path, template_file=""):
"""
Returns a HTML source code for TOC (table of contents) page, based on the template and including
the provided `links`, generation `timestamp` and the `home_page_path` HTML file as a landing page.
"""
if template_file == "":
template_file = os.path.join(os.path.dirname(__file__), "toc_template.html")
with open(template_file, encoding="utf8") as f:
html_template = f.read()
# double all brackets to make the further formatting work
html_with_escaped_braces = html_template.replace('{', '{{')
html_with_escaped_braces = html_with_escaped_braces.replace('}', '}}')
# and convert the formatting brackets back
html_with_escaped_braces = html_with_escaped_braces.replace('{{}}', '{}')
return html_with_escaped_braces.format(home_page_path, links, timestamp)
def homepage(timestamp, template_file=""):
"""
Returns a HTML source code for a landing page, based on the template and includig the provided `timestamp`.
"""
if template_file == "":
template_file = os.path.join(os.path.dirname(__file__), "homepage_template.html")
with open(template_file, encoding="utf_8") as f:
html_template = f.read()
return html_template.format(timestamp)
def read_config(config_file):
"""
Parses the content of the `config_file` and returns a dictionary `{"paths":[values], "libs":[values]}`.
The `paths` values are glob patterns, which can be resolved in real paths and used for generating docs using `libdoc`.
The `libs` values are names of Robot Framework libraries with necessary import params - in the way to be also used for docs generation using `libdoc`.
The config file must be formatted like this:
```
# Comments starting with # are ignored
[Paths]
*.resource
**/my_subfolder/*.py
[Libs]
SeleniumLibrary
SomeLibrary::some_import_param
```
"""
sections = {
"paths": {"markers":["[paths]"], "values":[]},
"libs": {"markers": ["[libs]", "[libraries]"], "values":[]}
}
with open(config_file, encoding="utf8") as f:
section_to_add = ""
lines = f.readlines()
for line in lines:
stripped_line = line.strip()
if len(stripped_line) > 0:
if not stripped_line.startswith('#'): # comments
skip_line = False
for section_name, section_content in sections.items():
if stripped_line.lower() in section_content["markers"]:
section_to_add = section_name
skip_line = True
break
if not skip_line and section_to_add:
sections[section_to_add]["values"].append(stripped_line)
return {"paths": sections["paths"]["values"], "libs": sections["libs"]["values"]}
def add_files_from_folder(folder, base_dir_path, root=True):
"""
Creates a HTML source code with links to all HTML files in the `folder` and all it's subfolders.
The links contain file paths relative to the `base_dir_path`.
The `root` parameter is needed for internal usage only - it's set to False during deeper recursive calls.
"""
result_str = ""
if not root: # means we're in the root - no collapsible need in this case
result_str += """<button class="collapsible">{}</button>
""".format(os.path.basename(folder))
result_str += """<div class="collapsible_content">
"""
for item in os.listdir(folder):
item_path = os.path.abspath(os.path.join(folder, item))
if item.endswith(".html"):
name_without_ext = item.split('.')[0]
result_str += """<a class="link_not_selected" href="{}" target="targetFrame">{}</a>
""".format(os.path.relpath(item_path, base_dir_path), name_without_ext)
else:
if os.path.isdir(item_path):
result_str += add_files_from_folder(item_path, base_dir_path, root=False)
if not root:
# end of the "collapsible_content"
result_str += """</div>
"""
return result_str
def create_docs_for_dir(resource_dir, output_dir, config_file):
"""
Creates HTML docs using Robot Framework module `libdoc` for all resources and libraries in the `resource_dir`.
Generated files are placed inside the `output_dir`, keeping the original subfolder tree structure.
Paths of resource/python files and libraries, which the docs should be generated for, are configured using the `config_file`.
The `config_file` must be formatted like this:
```
# Comments starting with # are ignored
[Paths]
*.resource
**/my_subfolder/*.py
[Libs]
SeleniumLibrary
SomeLibrary::some_import_param
```
"""
target_dir = os.path.join(os.path.abspath(output_dir), os.path.basename(resource_dir))
doc_config = read_config(config_file)
resource_path_patterns = doc_config["paths"]
for path_pattern in resource_path_patterns:
for real_path in glob.glob(os.path.join(resource_dir, path_pattern), recursive=True):
relative_path = os.path.relpath(real_path, resource_dir)
target_path = os.path.join(target_dir, relative_path.rpartition('.')[0] + ".html")
print(f">> Generating docs for resource: {relative_path}")
return_code = robot.libdoc.libdoc(real_path, target_path)
if return_code > 0:
raise Exception(f"Libdoc error! Return code: {return_code}")
print("")
libs = doc_config["libs"]
for lib in libs:
lib_str_with_resolved_vars = os.path.expandvars(lib)
target_path = os.path.join(target_dir, lib_str_with_resolved_vars.partition("::")[0] + ".html")
print(f">> Generating docs for library: {lib_str_with_resolved_vars}")
return_code = robot.libdoc.libdoc(lib_str_with_resolved_vars, target_path)
if return_code > 0:
raise Exception(f"Libdoc error! Return code: {return_code}")
print("")
def create_toc(html_docs_dir, toc_file="keyword_docs.html", homepage_file="homepage.html", toc_template="", homepage_template=""):
"""
Generates a `toc_file` (Table of Contents) HTML page with links to all HTML files inside the `html_docs_dir` and all it's subfolders.
The navigation tree structure in the TOC repeats the folder tree structure.
It also creates a `homepage_file` shown as a landing page when opening the TOC.
All the content of the `html_docs_dir` will be moved in the new `src` subfolder, leaving only the `toc_file` directly inside.
"""
print(f">>> Creating TOC in: {os.path.abspath(html_docs_dir)}")
# move all subfolders and files into "src"
src_subdir = os.path.join(html_docs_dir, "src")
os.makedirs(src_subdir, exist_ok=True)
all_docs = os.listdir(html_docs_dir)
for doc_element in all_docs:
if doc_element == "src":
continue
src = os.path.join(html_docs_dir, doc_element)
target = os.path.join(src_subdir, doc_element)
shutil.move(src, target)
# create homepage in "src"
homepage_path = os.path.join(src_subdir, homepage_file)
current_date_time = datetime.now().strftime('%d.%m.%Y %H:%M:%S')
doc_files_links = add_files_from_folder(src_subdir, os.path.abspath(html_docs_dir))
with open(homepage_path, 'w', encoding="utf8") as f:
f.write(homepage(current_date_time, homepage_template))
# create TOC
toc_file_path = os.path.join(html_docs_dir, toc_file)
with open(toc_file_path, 'w', encoding="utf8") as f:
f.write(toc(doc_files_links, current_date_time, os.path.relpath(homepage_path, os.path.abspath(html_docs_dir)), toc_template))
print("Finished. Output file: {}".format(os.path.abspath(toc_file_path)))
def main():
parser = argparse.ArgumentParser(description="Generates keyword docs using libdoc based on config files in direct subfolders of the resources dir and creates a TOC")
parser.add_argument("resources_dir", help="Folder with resources and keywords files")
parser.add_argument("-d", "--output_dir", default="docs", help="Folder to create the docs in")
parser.add_argument("--config_file", default=".libtoc", help="File in each folder with docs generation configs")
parser.add_argument("--toc_file", default="keyword_docs.html", help="Name of the TOC file generated")
parser.add_argument("--toc_template", default="", help = "Custom HTML template for the TOC file")
parser.add_argument("--homepage_template", default="", help = "Custom HTML template for the homepage file")
args = parser.parse_args()
print(f"Creating docs for: {os.path.abspath(args.resources_dir)}")
if os.path.isdir(args.output_dir):
print(f"Output dir already exists, deleting it: {args.output_dir}")
shutil.rmtree(args.output_dir)
for child_element in os.listdir(args.resources_dir):
child_element_path = os.path.join(args.resources_dir, child_element)
if os.path.isdir(child_element_path):
config_file = os.path.join(child_element_path, args.config_file)
if os.path.isfile(config_file):
create_docs_for_dir(child_element_path, args.output_dir, os.path.abspath(config_file))
elif child_element == args.config_file:
create_docs_for_dir(args.resources_dir, args.output_dir, os.path.abspath(os.path.join(args.resources_dir, args.config_file)))
if os.path.isdir(args.output_dir):
create_toc(args.output_dir, args.toc_file, toc_template=args.toc_template, homepage_template=args.homepage_template)
else:
print("No docs were created!")
if __name__ == "__main__":
main() | /robotframework-libtoc-1.2.1.tar.gz/robotframework-libtoc-1.2.1/robotframework_libtoc/libtoc.py | 0.59843 | 0.446253 | libtoc.py | pypi |
from rflint.common import SuiteRule, ERROR, WARNING, normalize_name
from rflint.parser import SettingTable
import re
class PeriodInSuiteName(SuiteRule):
'''Warn about periods in the suite name
Since robot uses "." as a path separator, using a "." in a suite
name can lead to ambiguity.
'''
severity = WARNING
def apply(self,suite):
if "." in suite.name:
self.report(suite, "'.' in suite name '%s'" % suite.name, 0)
class InvalidTable(SuiteRule):
'''Verify that there are no invalid table headers
Parameter robot_level to be set to 'robot3' (default) or 'robot2'.'''
valid_tables_re = None
default_robot_level = "robot3"
def configure(self, robot_level):
valid_tables = ['comments?', 'settings?', 'tasks?', 'test cases?',
'keywords?', 'variables?']
if robot_level == "robot2":
valid_tables += ['cases?', 'metadata', 'user keywords?']
self.valid_tables_re = re.compile('^(' + '|'.join(valid_tables) + ')$',
re.I)
def apply(self, suite):
if not self.valid_tables_re:
self.configure(self.default_robot_level)
for table in suite.tables:
if not self.valid_tables_re.match(table.name):
self.report(suite, "Unknown table name '%s'" % table.name,
table.linenumber)
class DuplicateKeywordNames(SuiteRule):
'''Verify that no keywords have a name of an existing keyword in the same file'''
severity = ERROR
def apply(self, suite):
cache = []
for keyword in suite.keywords:
# normalize the name, so we catch things like
# Smoke Test vs Smoke_Test, vs SmokeTest, which
# robot thinks are all the same
name = normalize_name(keyword.name)
if name in cache:
self.report(suite, "Duplicate keyword name '%s'" % keyword.name, keyword.linenumber)
cache.append(name)
class DuplicateTestNames(SuiteRule):
'''Verify that no tests have a name of an existing test in the same suite'''
severity = ERROR
def apply(self, suite):
cache = []
for testcase in suite.testcases:
# normalize the name, so we catch things like
# Smoke Test vs Smoke_Test, vs SmokeTest, which
# robot thinks are all the same
name = normalize_name(testcase.name)
if name in cache:
self.report(suite, "Duplicate testcase name '%s'" % testcase.name, testcase.linenumber)
cache.append(name)
class RequireSuiteDocumentation(SuiteRule):
'''Verify that a test suite has documentation'''
severity=WARNING
def apply(self, suite):
for table in suite.tables:
if isinstance(table, SettingTable):
for row in table.rows:
if row[0].lower() == "documentation":
return
# we never found documentation; find the first line of the first
# settings table, default to the first line of the file
linenum = 1
for table in suite.tables:
if isinstance(table, SettingTable):
linenum = table.linenumber + 1
break
self.report(suite, "No suite documentation", linenum)
class TooManyTestCases(SuiteRule):
'''
Should not have too many tests in one suite.
The exception is if they are data-driven.
https://code.google.com/p/robotframework/wiki/HowToWriteGoodTestCases#Test_suite_structure
You can configure the maximum number of tests. The default is 10.
'''
severity = WARNING
max_allowed = 10
def configure(self, max_allowed):
self.max_allowed = int(max_allowed)
def apply(self, suite):
# check for template (data-driven tests)
for table in suite.tables:
if isinstance(table, SettingTable):
for row in table.rows:
if row[0].lower() == "test template":
return
# we didn't find a template, so these aren't data-driven
testcases = list(suite.testcases)
if len(testcases) > self.max_allowed:
self.report(
suite, "Too many test cases (%s > %s) in test suite"
% (len(testcases), self.max_allowed), testcases[self.max_allowed].linenumber
) | /robotframework-lint-1.1.tar.gz/robotframework-lint-1.1/rflint/rules/suiteRules.py | 0.555435 | 0.333069 | suiteRules.py | pypi |
from rflint.common import SuiteRule, ResourceRule, ERROR, normalize_name
def check_duplicates(report_duplicate, table,
permitted_dups=None, normalize_itemname=normalize_name):
# `table` is a SettingsTable or a VariableTable; either contains rows,
# but only VariableTable also contains statements.
seen_rows = {}
for row in table.rows:
item = normalize_itemname(row[0])
# skip empty lines, comments and continuation lines
if item == "":
continue
if item.startswith("#"):
continue
if item.startswith("..."):
continue
# some tables allow duplicates
if permitted_dups and item in permitted_dups:
continue
if item in seen_rows:
prev_row = seen_rows[item]
report_duplicate(row, prev_row)
else:
seen_rows[item] = row
class DuplicateSettingsCommon(object):
'''Verify that settings are not repeated in a Settings table
This has been made an error in Robot3.0
https://github.com/robotframework/robotframework/issues/2204'''
severity = ERROR
def apply(self, suite):
def report_duplicate_setting(setting, prev_setting):
self.report(suite,
"Setting '%s' used multiple times (previously used line %d)" % \
(setting[0], prev_setting.linenumber), setting.linenumber)
for table in suite.tables:
if table.name == "Settings":
check_duplicates(report_duplicate_setting, table,
permitted_dups=["library", "resource", "variables"])
class DuplicateSettingsInSuite(DuplicateSettingsCommon, SuiteRule):
pass
class DuplicateSettingsInResource(DuplicateSettingsCommon, ResourceRule):
pass
def strip_variable_name(varname):
return varname.lstrip("${").rstrip("}= ")
def normalize_variable_name(varname):
return normalize_name(strip_variable_name(varname))
class DuplicateVariablesCommon(object):
'''Verify that variables are not defined twice in the same table
This is not an error, but leads to surprising result (first definition
wins, later is ignored).'''
def apply(self, suite):
def report_duplicate_variable(variable, prev_variable):
self.report(suite,
"Variable '%s' defined twice, previous definition line %d" % \
(strip_variable_name(variable[0]), prev_variable.linenumber),
variable.linenumber)
for table in suite.tables:
if table.name == "Variables":
check_duplicates(report_duplicate_variable, table,
normalize_itemname=normalize_variable_name)
class DuplicateVariablesInSuite(DuplicateVariablesCommon, SuiteRule):
pass
class DuplicateVariablesInResource(DuplicateVariablesCommon, ResourceRule):
pass | /robotframework-lint-1.1.tar.gz/robotframework-lint-1.1/rflint/rules/duplicates.py | 0.516839 | 0.153454 | duplicates.py | pypi |
from rflint.common import TestRule, KeywordRule, GeneralRule, ERROR, WARNING
import re
class LineTooLong(GeneralRule):
'''Check that a line is not too long (configurable; default=100)'''
severity = WARNING
maxchars = 100
def configure(self, maxchars):
self.maxchars = int(maxchars)
def apply(self, robot_file):
for linenumber, line in enumerate(robot_file.raw_text.splitlines()):
if len(line) > self.maxchars:
message = "Line is too long (exceeds %s characters)" % self.maxchars
self.report(robot_file, message, linenumber+1, self.maxchars)
class TrailingBlankLines(GeneralRule):
'''Check for multiple blank lines at the end of a file
This is a configurable. The default value is 2.
'''
severity = WARNING
max_allowed = 2
def configure(self, max_allowed):
self.max_allowed=int(max_allowed)
def apply(self, robot_file):
# I realize I'm making two full passes over the data, but
# python is plenty fast enough. Even processing a file with
# over six thousand lines, this takes a couple of
# milliseconds. Plenty fast enough for the intended use case,
# since most files should be about two orders of magnitude
# smaller than that.
match=re.search(r'(\s*)$', robot_file.raw_text)
if match:
count = len(re.findall(r'\n', match.group(0)))
if count > self.max_allowed:
numlines = len(robot_file.raw_text.splitlines())
message = "Too many trailing blank lines"
linenumber = numlines-count
self.report(robot_file, message, linenumber+self.max_allowed, 0)
class TrailingWhitespace(GeneralRule):
severity = WARNING
def apply(self, robot_file):
for linenumber, line in enumerate(robot_file.raw_text.splitlines()):
if len(line) != len(line.rstrip()):
message = "Line has trailing whitespace"
self.report(robot_file, message, linenumber+1)
class FileTooLong(GeneralRule):
'''Verify the file has fewer lines than a given threshold.
You can configure the maximum number of lines. The default is 300.
'''
severity = WARNING
max_allowed = 300
def configure(self, max_allowed):
self.max_allowed = int(max_allowed)
def apply(self, robot_file):
lines = robot_file.raw_text.splitlines()
if len(lines) > self.max_allowed:
message = "File has too many lines (%s)" % len(lines)
linenumber = self.max_allowed+1
self.report(robot_file, message, linenumber, 0) | /robotframework-lint-1.1.tar.gz/robotframework-lint-1.1/rflint/rules/otherRules.py | 0.479747 | 0.199678 | otherRules.py | pypi |
from __future__ import print_function
import re
class RobotStatements(object):
def append(self, linenumber, raw_text, cells):
"""Add another row of data from a test suite"""
self.rows.append(Row(linenumber, raw_text, cells))
@property
def path(self):
# this property exists so that the linter doesn't
# have to have this logic
return self.parent.path
@property
def steps(self):
"""Return a list of steps (statements that are not settings or comments)"""
steps = []
for statement in self.statements:
if ((not statement.is_comment()) and
(not statement.is_setting())):
steps.append(statement)
return steps
@property
def settings(self):
"""Return a list of settings (statements with cell[1] matching \[.*?\])
Note: this returns any statement that *looks* like a setting. If you have
a misspelled or completely bogus setting, it'll return that too
(eg: | | [Blockumentation] | hello, world)
"""
return [statement for statement in self.statements
if (statement.is_setting() and not statement.is_comment())]
@property
def statements(self):
"""Return a list of statements
This is done by joining together any rows that
have continuations
"""
# FIXME: no need to do this every time; we should cache the
# result
if len(self.rows) == 0:
return []
current_statement = Statement(self.rows[0])
current_statement.startline = self.rows[0].linenumber
current_statement.endline = self.rows[0].linenumber
statements = []
for row in self.rows[1:]:
if len(row) > 1 and row[0] == "" and row[1] == "...":
# we found a continuation
current_statement += row[2:]
current_statement.endline = row.linenumber
else:
if len(current_statement) > 0:
# append current statement to the list of statements...
statements.append(current_statement)
# start a new statement
current_statement = Statement(row)
current_statement.startline = row.linenumber
current_statement.endline = row.linenumber
if len(current_statement) > 0:
statements.append(current_statement)
return statements
# TODO: make Row and Statement more similar -- either
# both should inherit from list, or neither should.
class Row(object):
"""A row is made up of a list of cells plus metadata"""
def __init__(self, linenumber, raw_text, cells):
self.linenumber = linenumber
self.raw_text = raw_text
self.cells = cells
def dump(self):
print("|" + " | ".join([cell.strip() for cell in self.cells]))
def __len__(self):
return len(self.cells)
def __setitem__(self, key, value):
self.cells[key] = value
return self.cells[key]
def __getitem__(self, key):
return self.cells[key]
def __repr__(self):
return "<line: %s cells: %s>" % (self.linenumber, str(self.cells))
def __contains__(self, key):
return key in self.cells
class Comment(Row):
# this isn't entirely correct or well thought out.
# I need a way to capture comments rather than
# throw them away (mainly so I can recreate the original
# file from the parsed data)
pass
class Statement(list):
"""A Statement is a list of cells, plus some metadata"""
startline = None
endline = None
def is_setting(self):
if ((len(self) > 1) and
(re.match(r'\[.*?\]', self[1]))):
return True
return False
def is_comment(self):
'''Return True if the first non-empty cell starts with "#"'''
for cell in self[:]:
if cell == "":
continue
# this is the first non-empty cell. Check whether it is
# a comment or not.
if cell.lstrip().startswith("#"):
return True
else:
return False
return False
def __repr__(self):
return "(%.4s-%.4s)%s" % (self.startline, self.endline, list.__repr__(self)) | /robotframework-lint-1.1.tar.gz/robotframework-lint-1.1/rflint/parser/common.py | 0.434821 | 0.413063 | common.py | pypi |
from robot.api import SuiteVisitor
import os
import json
from typing import Set, Dict, Optional
from robocorp_ls_core.robotframework_log import get_logger
from robocorp_ls_core.basic import normalize_filename
log = get_logger(__name__)
class FilteringTestsSuiteVisitor(SuiteVisitor):
def __init__(
self, tests_filtering: Optional[Dict[str, Dict[str, Set[str]]]] = None
) -> None:
log.info("Initializing FilteringTestsSuiteVisitor")
super().__init__()
# filename -> test names
self.include: Dict[str, Set[str]] = {}
self.exclude: Dict[str, Set[str]] = {}
self._include_contains_cache: dict = {}
self._exclude_contains_cache: dict = {}
if tests_filtering is not None:
log.info(
"FilteringTestsSuiteVisitor initial tests_filtering: %s",
tests_filtering,
)
else:
s = os.getenv("RFLS_PRERUN_FILTER_TESTS", None)
if s is None:
log.info(
"RFLS_PRERUN_FILTER_TESTS not specified in environment variables."
)
elif not s:
log.info("RFLS_PRERUN_FILTER_TESTS empty in environment variables.")
else:
log.info("RFLS_PRERUN_FILTER_TESTS environment variable value: %s", s)
try:
tests_filtering = json.loads(s)
except:
log.exception("Error parsing RFLS_PRERUN_FILTER_TESTS as json")
def add(tup, container):
source, test_name = tup
source = self._normalize(source)
s = container.get(source)
if s is None:
s = container[source] = set()
s.add(test_name)
if tests_filtering:
for tup in tests_filtering.get("include", []):
add(tup, self.include)
for tup in tests_filtering.get("exclude", []):
add(tup, self.exclude)
def _normalize(self, source):
return normalize_filename(source)
def _contains(
self, container: dict, source: str, test_name: str, cache: dict
) -> bool:
# Note: we have a cache because _contains_uncached will always check
# the parent structure for hits and whenever we find a hit we
# can skip it.
key = (source, test_name)
ret = cache.get(key)
if ret is not None:
return ret
ret = self._contains_uncached(container, source, test_name, cache)
cache[key] = ret
return ret
def _contains_uncached(
self, container: dict, source: str, test_name: str, cache: dict
) -> bool:
# Try to check for the test directly
test_names = container.get(source)
if not test_names:
dirname = os.path.dirname(source)
if dirname == source or not dirname:
return False
return self._contains(
container,
dirname,
"*", # at a parent level the test name doesn't matter
cache,
)
if "*" in test_names:
return True
if test_name != "*":
return test_name in test_names
return False
def start_suite(self, suite) -> None:
new_tests = []
for t in suite.tests:
source = self._normalize(t.source)
if self.include:
if not self._contains(
self.include, source, t.name, self._include_contains_cache
):
log.debug("Test not in includes: %s - %s", t.source, t.name)
continue
# If we got here it's included, now, check excludes.
if self.exclude:
if self._contains(
self.exclude, source, t.name, self._exclude_contains_cache
):
log.debug("Test in excludes: %s - %s", t.source, t.name)
continue
new_tests.append(t)
suite.tests = new_tests
def end_suite(self, suite):
# We don't want to keep empty suites.
suite.suites = [s for s in suite.suites if s.test_count > 0] | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_debug_adapter/prerun_modifiers.py | 0.693369 | 0.208924 | prerun_modifiers.py | pypi |
from typing import Set, Optional
import os
from robocorp_ls_core.robotframework_log import get_logger
from robotframework_ls.impl.text_utilities import normalize_robot_name
from robocorp_ls_core.options import is_true_in_env
import sys
from robotframework_ls.impl.robot_version import get_robot_major_version
log = get_logger(__name__)
class IgnoreFailuresInStack:
"""
We load the contents from environment variables:
RFLS_IGNORE_FAILURES_IN_KEYWORDS:
A (json-formatted) list of keywords where failures should be ignored.
Note: ignored means they won't be reported as errors and the debugger
won't break on them.
The list below is always ignored by default (so using RFLS_IGNORE_FAILURES_IN_KEYWORDS
it's possible to add other items to that list).
[
"run keyword and continue on failure",
"run keyword and expect error",
"run keyword and ignore error",
"run keyword and warn on failure",
"wait until keyword succeeds",
"try..except",
]
It's also possible to set `RFLS_IGNORE_FAILURES_IN_KEYWORDS_OVERRIDE=1` to provide
all the items if one of those shouldn't be there.
"""
def __init__(self):
import json
from collections import deque
self._stack: "Deque[str]" = deque()
self.ignore_failures_inside: Set[str] = set()
# Add default excludes.
for entry in (
"run keyword and continue on failure",
"run keyword and expect error",
"run keyword and ignore error",
"run keyword and warn on failure",
"wait until keyword succeeds",
"run keyword and return status",
"try..except",
):
self.ignore_failures_inside.add(normalize_robot_name(entry))
if is_true_in_env("RFLS_IGNORE_FAILURES_IN_KEYWORDS_OVERRIDE"):
self.ignore_failures_inside.clear()
# Load additional excludes from the environment.
ignore_failures_inside_in_env = os.getenv("RFLS_IGNORE_FAILURES_IN_KEYWORDS")
if ignore_failures_inside_in_env:
try:
loaded = json.loads(ignore_failures_inside_in_env)
except:
log.exception(
"Error: unable to load RFLS_IGNORE_FAILURES_IN_KEYWORDS (%s) as a json.",
ignore_failures_inside_in_env,
)
else:
if not isinstance(loaded, list):
log.critical(
"Expected RFLS_IGNORE_FAILURES_IN_KEYWORDS to be a json list of strings. Found: %s",
type(loaded),
)
else:
for entry in loaded:
self.ignore_failures_inside.add(normalize_robot_name(entry))
def ignore(self) -> bool:
from types import FrameType
for name in self._stack:
normalized = normalize_robot_name(name)
if normalized in self.ignore_failures_inside:
return True
if get_robot_major_version() >= 5:
# Allow for try..except in RF 5.
if "try..except" in self.ignore_failures_inside:
curframe: Optional[FrameType] = sys._getframe()
while curframe is not None:
# RF makes the try..except invisible for us.
# The listener specifically skips it in
# robot.output.listeners.Listeners.start_keyword
# So, our approach is search whether we're inside some try..except
# using the current stack.
if curframe.f_code.co_name == "_run_try":
return True
curframe = curframe.f_back
return False
def push(self, name: str):
self._stack.append(name)
def pop(self):
try:
self._stack.pop()
except:
log.exception("Error in IgnoreFailuresInStack.pop()") | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_debug_adapter/_ignore_failures_in_stack.py | 0.763748 | 0.179243 | _ignore_failures_in_stack.py | pypi |
import sys
from typing import TypeVar, List, Union, Any, Optional, Iterable
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import StackFrame
from robocorp_ls_core.protocols import IFuture
if sys.version_info[:2] < (3, 8):
class Protocol(object):
pass
else:
from typing import Protocol
T = TypeVar("T")
Y = TypeVar("Y", covariant=True)
class IEvaluationInfo(Protocol):
future: IFuture[Any]
class IRobotBreakpoint(Protocol):
# 1-based line for the breakpoint.
lineno: int
condition: Optional[str]
hit_condition: Optional[int]
log_message: Optional[str]
hits: int
class IBusyWait(Protocol):
before_wait: List[Any]
waited: int
proceeded: int
def pre_wait(self) -> None:
pass
def wait(self) -> None:
pass
def proceed(self) -> None:
pass
class IRobotDebugger(Protocol):
busy_wait: IBusyWait
def reset(self):
pass
def evaluate(
self, frame_id: int, expression: str, context: str = "watch"
) -> IEvaluationInfo:
"""
Asks something to be evaluated.
This is an asynchronous operation and returns an _EvaluationInfo (to get
the result, access _EvaluationInfo.future.result())
"""
def step_continue(self) -> None:
pass
def step_in(self) -> None:
pass
def step_next(self) -> None:
pass
def step_out(self) -> None:
pass
def set_breakpoints(
self,
filename: str,
breakpoints: Union[IRobotBreakpoint, Iterable[IRobotBreakpoint]],
) -> None:
"""
:param str filename:
:param list(RobotBreakpoint) breakpoints:
"""
def get_frames(self, thread_id) -> Optional[List[StackFrame]]:
pass
def iter_frame_ids(self, thread_id) -> Iterable[int]:
pass
def get_current_thread_id(self, thread=None) -> int:
pass
def write_message(self, msg):
pass
def enable_no_debug_mode(self):
pass
class INextId(Protocol):
def __call__(self) -> T:
pass | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_debug_adapter/protocols.py | 0.600071 | 0.23634 | protocols.py | pypi |
import abc
# borrowed from from six
def _with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
class metaclass(meta):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
return type.__new__(metaclass, 'temporary_class', (), {})
# =======================================================================================================================
# AbstractResolver
# =======================================================================================================================
class _AbstractResolver(_with_metaclass(abc.ABCMeta)):
"""
This class exists only for documentation purposes to explain how to create a resolver.
Some examples on how to resolve things:
- list: get_dictionary could return a dict with index->item and use the index to resolve it later
- set: get_dictionary could return a dict with id(object)->object and reiterate in that array to resolve it later
- arbitrary instance: get_dictionary could return dict with attr_name->attr and use getattr to resolve it later
"""
@abc.abstractmethod
def resolve(self, var, attribute):
"""
In this method, we'll resolve some child item given the string representation of the item in the key
representing the previously asked dictionary.
@param var: this is the actual variable to be resolved.
@param attribute: this is the string representation of a key previously returned in get_dictionary.
"""
raise NotImplementedError
@abc.abstractmethod
def get_dictionary(self, var):
"""
@param var: this is the variable that should have its children gotten.
@return: a dictionary where each pair key, value should be shown to the user as children items
in the variables view for the given var.
"""
raise NotImplementedError
class _AbstractProvider(_with_metaclass(abc.ABCMeta)):
@abc.abstractmethod
def can_provide(self, type_object, type_name):
raise NotImplementedError
# =======================================================================================================================
# API CLASSES:
# =======================================================================================================================
class TypeResolveProvider(_AbstractResolver, _AbstractProvider):
"""
Implement this in an extension to provide a custom resolver, see _AbstractResolver
"""
class StrPresentationProvider(_AbstractProvider):
"""
Implement this in an extension to provide a str presentation for a type
"""
@abc.abstractmethod
def get_str(self, val):
raise NotImplementedError
class DebuggerEventHandler(_with_metaclass(abc.ABCMeta)):
"""
Implement this to receive lifecycle events from the debugger
"""
def on_debugger_modules_loaded(self, **kwargs):
"""
This method invoked after all debugger modules are loaded. Useful for importing and/or patching debugger
modules at a safe time
:param kwargs: This is intended to be flexible dict passed from the debugger.
Currently passes the debugger version
""" | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_debug_adapter/vendored/vendored_pydevd/_pydevd_bundle/pydevd_extension_api.py | 0.783988 | 0.285397 | pydevd_extension_api.py | pypi |
from _pydevd_bundle.pydevd_constants import ForkSafeLock, get_global_debugger
import os
import sys
from contextlib import contextmanager
class IORedirector:
'''
This class works to wrap a stream (stdout/stderr) with an additional redirect.
'''
def __init__(self, original, new_redirect, wrap_buffer=False):
'''
:param stream original:
The stream to be wrapped (usually stdout/stderr, but could be None).
:param stream new_redirect:
Usually IOBuf (below).
:param bool wrap_buffer:
Whether to create a buffer attribute (needed to mimick python 3 s
tdout/stderr which has a buffer to write binary data).
'''
self._lock = ForkSafeLock(rlock=True)
self._writing = False
self._redirect_to = (original, new_redirect)
if wrap_buffer and hasattr(original, 'buffer'):
self.buffer = IORedirector(original.buffer, new_redirect.buffer, False)
def write(self, s):
# Note that writing to the original stream may fail for some reasons
# (such as trying to write something that's not a string or having it closed).
with self._lock:
if self._writing:
return
self._writing = True
try:
for r in self._redirect_to:
if hasattr(r, 'write'):
r.write(s)
finally:
self._writing = False
def isatty(self):
for r in self._redirect_to:
if hasattr(r, 'isatty'):
return r.isatty()
return False
def flush(self):
for r in self._redirect_to:
if hasattr(r, 'flush'):
r.flush()
def __getattr__(self, name):
for r in self._redirect_to:
if hasattr(r, name):
return getattr(r, name)
raise AttributeError(name)
class RedirectToPyDBIoMessages(object):
def __init__(self, out_ctx, wrap_stream, wrap_buffer, on_write=None):
'''
:param out_ctx:
1=stdout and 2=stderr
:param wrap_stream:
Either sys.stdout or sys.stderr.
:param bool wrap_buffer:
If True the buffer attribute (which wraps writing bytes) should be
wrapped.
:param callable(str) on_write:
May be a custom callable to be called when to write something.
If not passed the default implementation will create an io message
and send it through the debugger.
'''
encoding = getattr(wrap_stream, 'encoding', None)
if not encoding:
encoding = os.environ.get('PYTHONIOENCODING', 'utf-8')
self.encoding = encoding
self._out_ctx = out_ctx
if wrap_buffer:
self.buffer = RedirectToPyDBIoMessages(out_ctx, wrap_stream, wrap_buffer=False, on_write=on_write)
self._on_write = on_write
def get_pydb(self):
# Note: separate method for mocking on tests.
return get_global_debugger()
def flush(self):
pass # no-op here
def write(self, s):
if self._on_write is not None:
self._on_write(s)
return
if s:
# Need s in str
if isinstance(s, bytes):
s = s.decode(self.encoding, errors='replace')
py_db = self.get_pydb()
if py_db is not None:
# Note that the actual message contents will be a xml with utf-8, although
# the entry is str on py3 and bytes on py2.
cmd = py_db.cmd_factory.make_io_message(s, self._out_ctx)
if py_db.writer is not None:
py_db.writer.add_command(cmd)
class IOBuf:
'''This class works as a replacement for stdio and stderr.
It is a buffer and when its contents are requested, it will erase what
it has so far so that the next return will not return the same contents again.
'''
def __init__(self):
self.buflist = []
import os
self.encoding = os.environ.get('PYTHONIOENCODING', 'utf-8')
def getvalue(self):
b = self.buflist
self.buflist = [] # clear it
return ''.join(b) # bytes on py2, str on py3.
def write(self, s):
if isinstance(s, bytes):
s = s.decode(self.encoding, errors='replace')
self.buflist.append(s)
def isatty(self):
return False
def flush(self):
pass
def empty(self):
return len(self.buflist) == 0
class _RedirectInfo(object):
def __init__(self, original, redirect_to):
self.original = original
self.redirect_to = redirect_to
class _RedirectionsHolder:
_lock = ForkSafeLock(rlock=True)
_stack_stdout = []
_stack_stderr = []
_pydevd_stdout_redirect_ = None
_pydevd_stderr_redirect_ = None
def start_redirect(keep_original_redirection=False, std='stdout', redirect_to=None):
'''
@param std: 'stdout', 'stderr', or 'both'
'''
with _RedirectionsHolder._lock:
if redirect_to is None:
redirect_to = IOBuf()
if std == 'both':
config_stds = ['stdout', 'stderr']
else:
config_stds = [std]
for std in config_stds:
original = getattr(sys, std)
stack = getattr(_RedirectionsHolder, '_stack_%s' % std)
if keep_original_redirection:
wrap_buffer = True if hasattr(redirect_to, 'buffer') else False
new_std_instance = IORedirector(getattr(sys, std), redirect_to, wrap_buffer=wrap_buffer)
setattr(sys, std, new_std_instance)
else:
new_std_instance = redirect_to
setattr(sys, std, redirect_to)
stack.append(_RedirectInfo(original, new_std_instance))
return redirect_to
def end_redirect(std='stdout'):
with _RedirectionsHolder._lock:
if std == 'both':
config_stds = ['stdout', 'stderr']
else:
config_stds = [std]
for std in config_stds:
stack = getattr(_RedirectionsHolder, '_stack_%s' % std)
redirect_info = stack.pop()
setattr(sys, std, redirect_info.original)
def redirect_stream_to_pydb_io_messages(std):
'''
:param std:
'stdout' or 'stderr'
'''
with _RedirectionsHolder._lock:
redirect_to_name = '_pydevd_%s_redirect_' % (std,)
if getattr(_RedirectionsHolder, redirect_to_name) is None:
wrap_buffer = True
original = getattr(sys, std)
redirect_to = RedirectToPyDBIoMessages(1 if std == 'stdout' else 2, original, wrap_buffer)
start_redirect(keep_original_redirection=True, std=std, redirect_to=redirect_to)
stack = getattr(_RedirectionsHolder, '_stack_%s' % std)
setattr(_RedirectionsHolder, redirect_to_name, stack[-1])
return True
return False
def stop_redirect_stream_to_pydb_io_messages(std):
'''
:param std:
'stdout' or 'stderr'
'''
with _RedirectionsHolder._lock:
redirect_to_name = '_pydevd_%s_redirect_' % (std,)
redirect_info = getattr(_RedirectionsHolder, redirect_to_name)
if redirect_info is not None: # :type redirect_info: _RedirectInfo
setattr(_RedirectionsHolder, redirect_to_name, None)
stack = getattr(_RedirectionsHolder, '_stack_%s' % std)
prev_info = stack.pop()
curr = getattr(sys, std)
if curr is redirect_info.redirect_to:
setattr(sys, std, redirect_info.original)
@contextmanager
def redirect_stream_to_pydb_io_messages_context():
with _RedirectionsHolder._lock:
redirecting = []
for std in ('stdout', 'stderr'):
if redirect_stream_to_pydb_io_messages(std):
redirecting.append(std)
try:
yield
finally:
for std in redirecting:
stop_redirect_stream_to_pydb_io_messages(std) | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_debug_adapter/vendored/vendored_pydevd/_pydevd_bundle/pydevd_io.py | 0.454472 | 0.157072 | pydevd_io.py | pypi |
from _pydevd_bundle.pydevd_constants import DebugInfoHolder, \
get_global_debugger, GetGlobalDebugger, set_global_debugger # Keep for backward compatibility @UnusedImport
from _pydevd_bundle.pydevd_utils import quote_smart as quote, to_string
from _pydevd_bundle.pydevd_comm_constants import ID_TO_MEANING, CMD_EXIT
from _pydevd_bundle.pydevd_constants import HTTP_PROTOCOL, HTTP_JSON_PROTOCOL, \
get_protocol, IS_JYTHON, ForkSafeLock
import json
from _pydev_bundle import pydev_log
class _BaseNetCommand(object):
# Command id. Should be set in instance.
id = -1
# Dict representation of the command to be set in instance. Only set for json commands.
as_dict = None
def send(self, *args, **kwargs):
pass
class _NullNetCommand(_BaseNetCommand):
pass
class _NullExitCommand(_NullNetCommand):
id = CMD_EXIT
# Constant meant to be passed to the writer when the command is meant to be ignored.
NULL_NET_COMMAND = _NullNetCommand()
# Exit command -- only internal (we don't want/need to send this to the IDE).
NULL_EXIT_COMMAND = _NullExitCommand()
class NetCommand(_BaseNetCommand):
"""
Commands received/sent over the network.
Command can represent command received from the debugger,
or one to be sent by daemon.
"""
next_seq = 0 # sequence numbers
_showing_debug_info = 0
_show_debug_info_lock = ForkSafeLock(rlock=True)
def __init__(self, cmd_id, seq, text, is_json=False):
"""
If sequence is 0, new sequence will be generated (otherwise, this was the response
to a command from the client).
"""
protocol = get_protocol()
self.id = cmd_id
if seq == 0:
NetCommand.next_seq += 2
seq = NetCommand.next_seq
self.seq = seq
if is_json:
if hasattr(text, 'to_dict'):
as_dict = text.to_dict(update_ids_to_dap=True)
else:
assert isinstance(text, dict)
as_dict = text
as_dict['pydevd_cmd_id'] = cmd_id
as_dict['seq'] = seq
self.as_dict = as_dict
text = json.dumps(as_dict)
assert isinstance(text, str)
if DebugInfoHolder.DEBUG_TRACE_LEVEL >= 1:
self._show_debug_info(cmd_id, seq, text)
if is_json:
msg = text
else:
if protocol not in (HTTP_PROTOCOL, HTTP_JSON_PROTOCOL):
encoded = quote(to_string(text), '/<>_=" \t')
msg = '%s\t%s\t%s\n' % (cmd_id, seq, encoded)
else:
msg = '%s\t%s\t%s' % (cmd_id, seq, text)
if isinstance(msg, str):
msg = msg.encode('utf-8')
assert isinstance(msg, bytes)
as_bytes = msg
self._as_bytes = as_bytes
def send(self, sock):
as_bytes = self._as_bytes
try:
if get_protocol() in (HTTP_PROTOCOL, HTTP_JSON_PROTOCOL):
sock.sendall(('Content-Length: %s\r\n\r\n' % len(as_bytes)).encode('ascii'))
sock.sendall(as_bytes)
except:
if IS_JYTHON:
# Ignore errors in sock.sendall in Jython (seems to be common for Jython to
# give spurious exceptions at interpreter shutdown here).
pass
else:
raise
@classmethod
def _show_debug_info(cls, cmd_id, seq, text):
with cls._show_debug_info_lock:
# Only one thread each time (rlock).
if cls._showing_debug_info:
# avoid recursing in the same thread (just printing could create
# a new command when redirecting output).
return
cls._showing_debug_info += 1
try:
out_message = 'sending cmd (%s) --> ' % (get_protocol(),)
out_message += "%20s" % ID_TO_MEANING.get(str(cmd_id), 'UNKNOWN')
out_message += ' '
out_message += text.replace('\n', ' ')
try:
pydev_log.critical('%s\n', out_message)
except:
pass
finally:
cls._showing_debug_info -= 1 | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_debug_adapter/vendored/vendored_pydevd/_pydevd_bundle/pydevd_net_command.py | 0.459319 | 0.1712 | pydevd_net_command.py | pypi |
import sys
import traceback
# START --------------------------- from codeop import CommandCompiler, compile_command
# START --------------------------- from codeop import CommandCompiler, compile_command
# START --------------------------- from codeop import CommandCompiler, compile_command
# START --------------------------- from codeop import CommandCompiler, compile_command
# START --------------------------- from codeop import CommandCompiler, compile_command
r"""Utilities to compile possibly incomplete Python source code.
This module provides two interfaces, broadly similar to the builtin
function compile(), which take program text, a filename and a 'mode'
and:
- Return code object if the command is complete and valid
- Return None if the command is incomplete
- Raise SyntaxError, ValueError or OverflowError if the command is a
syntax error (OverflowError and ValueError can be produced by
malformed literals).
Approach:
First, check if the source consists entirely of blank lines and
comments; if so, replace it with 'pass', because the built-in
parser doesn't always do the right thing for these.
Compile three times: as is, with \n, and with \n\n appended. If it
compiles as is, it's complete. If it compiles with one \n appended,
we expect more. If it doesn't compile either way, we compare the
error we get when compiling with \n or \n\n appended. If the errors
are the same, the code is broken. But if the errors are different, we
expect more. Not intuitive; not even guaranteed to hold in future
releases; but this matches the compiler's behavior from Python 1.4
through 2.2, at least.
Caveat:
It is possible (but not likely) that the parser stops parsing with a
successful outcome before reaching the end of the source; in this
case, trailing symbols may be ignored instead of causing an error.
For example, a backslash followed by two newlines may be followed by
arbitrary garbage. This will be fixed once the API for the parser is
better.
The two interfaces are:
compile_command(source, filename, symbol):
Compiles a single command in the manner described above.
CommandCompiler():
Instances of this class have __call__ methods identical in
signature to compile_command; the difference is that if the
instance compiles program text containing a __future__ statement,
the instance 'remembers' and compiles all subsequent program texts
with the statement in force.
The module also provides another class:
Compile():
Instances of this class act like the built-in function compile,
but with 'memory' in the sense described above.
"""
import __future__
_features = [getattr(__future__, fname)
for fname in __future__.all_feature_names]
__all__ = ["compile_command", "Compile", "CommandCompiler"]
PyCF_DONT_IMPLY_DEDENT = 0x200 # Matches pythonrun.h
def _maybe_compile(compiler, source, filename, symbol):
# Check for source consisting of only blank lines and comments
for line in source.split("\n"):
line = line.strip()
if line and line[0] != '#':
break # Leave it alone
else:
if symbol != "eval":
source = "pass" # Replace it with a 'pass' statement
err = err1 = err2 = None
code = code1 = code2 = None
try:
code = compiler(source, filename, symbol)
except SyntaxError as err:
pass
try:
code1 = compiler(source + "\n", filename, symbol)
except SyntaxError as err1:
pass
try:
code2 = compiler(source + "\n\n", filename, symbol)
except SyntaxError as err2:
pass
if code:
return code
if not code1 and repr(err1) == repr(err2):
raise SyntaxError(err1)
def _compile(source, filename, symbol):
return compile(source, filename, symbol, PyCF_DONT_IMPLY_DEDENT)
def compile_command(source, filename="<input>", symbol="single"):
r"""Compile a command and determine whether it is incomplete.
Arguments:
source -- the source string; may contain \n characters
filename -- optional filename from which source was read; default
"<input>"
symbol -- optional grammar start symbol; "single" (default) or "eval"
Return value / exceptions raised:
- Return a code object if the command is complete and valid
- Return None if the command is incomplete
- Raise SyntaxError, ValueError or OverflowError if the command is a
syntax error (OverflowError and ValueError can be produced by
malformed literals).
"""
return _maybe_compile(_compile, source, filename, symbol)
class Compile:
"""Instances of this class behave much like the built-in compile
function, but if one is used to compile text containing a future
statement, it "remembers" and compiles all subsequent program texts
with the statement in force."""
def __init__(self):
self.flags = PyCF_DONT_IMPLY_DEDENT
def __call__(self, source, filename, symbol):
codeob = compile(source, filename, symbol, self.flags, 1)
for feature in _features:
if codeob.co_flags & feature.compiler_flag:
self.flags |= feature.compiler_flag
return codeob
class CommandCompiler:
"""Instances of this class have __call__ methods identical in
signature to compile_command; the difference is that if the
instance compiles program text containing a __future__ statement,
the instance 'remembers' and compiles all subsequent program texts
with the statement in force."""
def __init__(self,):
self.compiler = Compile()
def __call__(self, source, filename="<input>", symbol="single"):
r"""Compile a command and determine whether it is incomplete.
Arguments:
source -- the source string; may contain \n characters
filename -- optional filename from which source was read;
default "<input>"
symbol -- optional grammar start symbol; "single" (default) or
"eval"
Return value / exceptions raised:
- Return a code object if the command is complete and valid
- Return None if the command is incomplete
- Raise SyntaxError, ValueError or OverflowError if the command is a
syntax error (OverflowError and ValueError can be produced by
malformed literals).
"""
return _maybe_compile(self.compiler, source, filename, symbol)
# END --------------------------- from codeop import CommandCompiler, compile_command
# END --------------------------- from codeop import CommandCompiler, compile_command
# END --------------------------- from codeop import CommandCompiler, compile_command
# END --------------------------- from codeop import CommandCompiler, compile_command
# END --------------------------- from codeop import CommandCompiler, compile_command
__all__ = ["InteractiveInterpreter", "InteractiveConsole", "interact",
"compile_command"]
def softspace(file, newvalue):
oldvalue = 0
try:
oldvalue = file.softspace
except AttributeError:
pass
try:
file.softspace = newvalue
except (AttributeError, TypeError):
# "attribute-less object" or "read-only attributes"
pass
return oldvalue
class InteractiveInterpreter:
"""Base class for InteractiveConsole.
This class deals with parsing and interpreter state (the user's
namespace); it doesn't deal with input buffering or prompting or
input file naming (the filename is always passed in explicitly).
"""
def __init__(self, locals=None):
"""Constructor.
The optional 'locals' argument specifies the dictionary in
which code will be executed; it defaults to a newly created
dictionary with key "__name__" set to "__console__" and key
"__doc__" set to None.
"""
if locals is None:
locals = {"__name__": "__console__", "__doc__": None}
self.locals = locals
self.compile = CommandCompiler()
def runsource(self, source, filename="<input>", symbol="single"):
"""Compile and run some source in the interpreter.
Arguments are as for compile_command().
One several things can happen:
1) The input is incorrect; compile_command() raised an
exception (SyntaxError or OverflowError). A syntax traceback
will be printed by calling the showsyntaxerror() method.
2) The input is incomplete, and more input is required;
compile_command() returned None. Nothing happens.
3) The input is complete; compile_command() returned a code
object. The code is executed by calling self.runcode() (which
also handles run-time exceptions, except for SystemExit).
The return value is True in case 2, False in the other cases (unless
an exception is raised). The return value can be used to
decide whether to use sys.ps1 or sys.ps2 to prompt the next
line.
"""
try:
code = self.compile(source, filename, symbol)
except (OverflowError, SyntaxError, ValueError):
# Case 1
self.showsyntaxerror(filename)
return False
if code is None:
# Case 2
return True
# Case 3
self.runcode(code)
return False
def runcode(self, code):
"""Execute a code object.
When an exception occurs, self.showtraceback() is called to
display a traceback. All exceptions are caught except
SystemExit, which is reraised.
A note about KeyboardInterrupt: this exception may occur
elsewhere in this code, and may not always be caught. The
caller should be prepared to deal with it.
"""
try:
exec(code, self.locals)
except SystemExit:
raise
except:
self.showtraceback()
else:
if softspace(sys.stdout, 0):
sys.stdout.write('\n')
def showsyntaxerror(self, filename=None):
"""Display the syntax error that just occurred.
This doesn't display a stack trace because there isn't one.
If a filename is given, it is stuffed in the exception instead
of what was there before (because Python's parser always uses
"<string>" when reading from a string).
The output is written by self.write(), below.
"""
type, value, sys.last_traceback = sys.exc_info()
sys.last_type = type
sys.last_value = value
if filename and type is SyntaxError:
# Work hard to stuff the correct filename in the exception
try:
msg, (dummy_filename, lineno, offset, line) = value
except:
# Not the format we expect; leave it alone
pass
else:
# Stuff in the right filename
value = SyntaxError(msg, (filename, lineno, offset, line))
sys.last_value = value
list = traceback.format_exception_only(type, value)
map(self.write, list)
def showtraceback(self, *args, **kwargs):
"""Display the exception that just occurred.
We remove the first stack item because it is our own code.
The output is written by self.write(), below.
"""
try:
type, value, tb = sys.exc_info()
sys.last_type = type
sys.last_value = value
sys.last_traceback = tb
tblist = traceback.extract_tb(tb)
del tblist[:1]
list = traceback.format_list(tblist)
if list:
list.insert(0, "Traceback (most recent call last):\n")
list[len(list):] = traceback.format_exception_only(type, value)
finally:
tblist = tb = None
map(self.write, list)
def write(self, data):
"""Write a string.
The base implementation writes to sys.stderr; a subclass may
replace this with a different implementation.
"""
sys.stderr.write(data)
class InteractiveConsole(InteractiveInterpreter):
"""Closely emulate the behavior of the interactive Python interpreter.
This class builds on InteractiveInterpreter and adds prompting
using the familiar sys.ps1 and sys.ps2, and input buffering.
"""
def __init__(self, locals=None, filename="<console>"):
"""Constructor.
The optional locals argument will be passed to the
InteractiveInterpreter base class.
The optional filename argument should specify the (file)name
of the input stream; it will show up in tracebacks.
"""
InteractiveInterpreter.__init__(self, locals)
self.filename = filename
self.resetbuffer()
def resetbuffer(self):
"""Reset the input buffer."""
self.buffer = []
def interact(self, banner=None):
"""Closely emulate the interactive Python console.
The optional banner argument specify the banner to print
before the first interaction; by default it prints a banner
similar to the one printed by the real Python interpreter,
followed by the current class name in parentheses (so as not
to confuse this with the real interpreter -- since it's so
close!).
"""
try:
sys.ps1 # @UndefinedVariable
except AttributeError:
sys.ps1 = ">>> "
try:
sys.ps2 # @UndefinedVariable
except AttributeError:
sys.ps2 = "... "
cprt = 'Type "help", "copyright", "credits" or "license" for more information.'
if banner is None:
self.write("Python %s on %s\n%s\n(%s)\n" %
(sys.version, sys.platform, cprt,
self.__class__.__name__))
else:
self.write("%s\n" % str(banner))
more = 0
while 1:
try:
if more:
prompt = sys.ps2 # @UndefinedVariable
else:
prompt = sys.ps1 # @UndefinedVariable
try:
line = self.raw_input(prompt)
# Can be None if sys.stdin was redefined
encoding = getattr(sys.stdin, "encoding", None)
if encoding and not isinstance(line, str):
line = line.decode(encoding)
except EOFError:
self.write("\n")
break
else:
more = self.push(line)
except KeyboardInterrupt:
self.write("\nKeyboardInterrupt\n")
self.resetbuffer()
more = 0
def push(self, line):
"""Push a line to the interpreter.
The line should not have a trailing newline; it may have
internal newlines. The line is appended to a buffer and the
interpreter's runsource() method is called with the
concatenated contents of the buffer as source. If this
indicates that the command was executed or invalid, the buffer
is reset; otherwise, the command is incomplete, and the buffer
is left as it was after the line was appended. The return
value is 1 if more input is required, 0 if the line was dealt
with in some way (this is the same as runsource()).
"""
self.buffer.append(line)
source = "\n".join(self.buffer)
more = self.runsource(source, self.filename)
if not more:
self.resetbuffer()
return more
def raw_input(self, prompt=""):
"""Write a prompt and read a line.
The returned line does not include the trailing newline.
When the user enters the EOF key sequence, EOFError is raised.
The base implementation uses the built-in function
raw_input(); a subclass may replace this with a different
implementation.
"""
return input(prompt)
def interact(banner=None, readfunc=None, local=None):
"""Closely emulate the interactive Python interpreter.
This is a backwards compatible interface to the InteractiveConsole
class. When readfunc is not specified, it attempts to import the
readline module to enable GNU readline if it is available.
Arguments (all optional, all default to None):
banner -- passed to InteractiveConsole.interact()
readfunc -- if not None, replaces InteractiveConsole.raw_input()
local -- passed to InteractiveInterpreter.__init__()
"""
console = InteractiveConsole(local)
if readfunc is not None:
console.raw_input = readfunc
else:
try:
import readline
except ImportError:
pass
console.interact(banner)
if __name__ == '__main__':
import pdb
pdb.run("interact()\n") | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_debug_adapter/vendored/vendored_pydevd/_pydevd_bundle/pydevconsole_code_for_ironpython.py | 0.542863 | 0.295222 | pydevconsole_code_for_ironpython.py | pypi |
import bisect
from _pydevd_bundle.pydevd_constants import NULL, KeyifyList
import pydevd_file_utils
class SourceMappingEntry(object):
__slots__ = ['source_filename', 'line', 'end_line', 'runtime_line', 'runtime_source']
def __init__(self, line, end_line, runtime_line, runtime_source):
assert isinstance(runtime_source, str)
self.line = int(line)
self.end_line = int(end_line)
self.runtime_line = int(runtime_line)
self.runtime_source = runtime_source # Something as <ipython-cell-xxx>
# Should be set after translated to server (absolute_source_filename).
# This is what's sent to the client afterwards (so, its case should not be normalized).
self.source_filename = None
def contains_line(self, i):
return self.line <= i <= self.end_line
def contains_runtime_line(self, i):
line_count = self.end_line + self.line
runtime_end_line = self.runtime_line + line_count
return self.runtime_line <= i <= runtime_end_line
def __str__(self):
return 'SourceMappingEntry(%s)' % (
', '.join('%s=%r' % (attr, getattr(self, attr)) for attr in self.__slots__))
__repr__ = __str__
class SourceMapping(object):
def __init__(self, on_source_mapping_changed=NULL):
self._mappings_to_server = {} # dict(normalized(file.py) to [SourceMappingEntry])
self._mappings_to_client = {} # dict(<cell> to File.py)
self._cache = {}
self._on_source_mapping_changed = on_source_mapping_changed
def set_source_mapping(self, absolute_filename, mapping):
'''
:param str absolute_filename:
The filename for the source mapping (bytes on py2 and str on py3).
:param list(SourceMappingEntry) mapping:
A list with the source mapping entries to be applied to the given filename.
:return str:
An error message if it was not possible to set the mapping or an empty string if
everything is ok.
'''
# Let's first validate if it's ok to apply that mapping.
# File mappings must be 1:N, not M:N (i.e.: if there's a mapping from file1.py to <cell1>,
# there can be no other mapping from any other file to <cell1>).
# This is a limitation to make it easier to remove existing breakpoints when new breakpoints are
# set to a file (so, any file matching that breakpoint can be removed instead of needing to check
# which lines are corresponding to that file).
for map_entry in mapping:
existing_source_filename = self._mappings_to_client.get(map_entry.runtime_source)
if existing_source_filename and existing_source_filename != absolute_filename:
return 'Cannot apply mapping from %s to %s (it conflicts with mapping: %s to %s)' % (
absolute_filename, map_entry.runtime_source, existing_source_filename, map_entry.runtime_source)
try:
absolute_normalized_filename = pydevd_file_utils.normcase(absolute_filename)
current_mapping = self._mappings_to_server.get(absolute_normalized_filename, [])
for map_entry in current_mapping:
del self._mappings_to_client[map_entry.runtime_source]
self._mappings_to_server[absolute_normalized_filename] = sorted(mapping, key=lambda entry:entry.line)
for map_entry in mapping:
self._mappings_to_client[map_entry.runtime_source] = absolute_filename
finally:
self._cache.clear()
self._on_source_mapping_changed()
return ''
def map_to_client(self, runtime_source_filename, lineno):
key = (lineno, 'client', runtime_source_filename)
try:
return self._cache[key]
except KeyError:
for _, mapping in list(self._mappings_to_server.items()):
for map_entry in mapping:
if map_entry.runtime_source == runtime_source_filename: # <cell1>
if map_entry.contains_runtime_line(lineno): # matches line range
self._cache[key] = (map_entry.source_filename, map_entry.line + (lineno - map_entry.runtime_line), True)
return self._cache[key]
self._cache[key] = (runtime_source_filename, lineno, False) # Mark that no translation happened in the cache.
return self._cache[key]
def has_mapping_entry(self, runtime_source_filename):
'''
:param runtime_source_filename:
Something as <ipython-cell-xxx>
'''
# Note that we're not interested in the line here, just on knowing if a given filename
# (from the server) has a mapping for it.
key = ('has_entry', runtime_source_filename)
try:
return self._cache[key]
except KeyError:
for _absolute_normalized_filename, mapping in list(self._mappings_to_server.items()):
for map_entry in mapping:
if map_entry.runtime_source == runtime_source_filename:
self._cache[key] = True
return self._cache[key]
self._cache[key] = False
return self._cache[key]
def map_to_server(self, absolute_filename, lineno):
'''
Convert something as 'file1.py' at line 10 to '<ipython-cell-xxx>' at line 2.
Note that the name should be already normalized at this point.
'''
absolute_normalized_filename = pydevd_file_utils.normcase(absolute_filename)
changed = False
mappings = self._mappings_to_server.get(absolute_normalized_filename)
if mappings:
i = bisect.bisect(KeyifyList(mappings, lambda entry:entry.line), lineno)
if i >= len(mappings):
i -= 1
if i == 0:
entry = mappings[i]
else:
entry = mappings[i - 1]
if not entry.contains_line(lineno):
entry = mappings[i]
if not entry.contains_line(lineno):
entry = None
if entry is not None:
lineno = entry.runtime_line + (lineno - entry.line)
absolute_filename = entry.runtime_source
changed = True
return absolute_filename, lineno, changed | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_debug_adapter/vendored/vendored_pydevd/_pydevd_bundle/pydevd_source_mapping.py | 0.707809 | 0.199464 | pydevd_source_mapping.py | pypi |
def is_variable_to_translate(cls_name, var_name):
if var_name in ('variablesReference', 'frameId', 'threadId'):
return True
if cls_name == 'StackFrame' and var_name == 'id':
# It's frameId everywhere except on StackFrame.
return True
if cls_name == 'Thread' and var_name == 'id':
# It's threadId everywhere except on Thread.
return True
return False
def _get_noqa_for_var(prop_name):
return ' # noqa (assign to builtin)' if prop_name in ('type', 'format', 'id', 'hex', 'breakpoint', 'filter') else ''
class _OrderedSet(object):
# Not a good ordered set (just something to be small without adding any deps)
def __init__(self, initial_contents=None):
self._contents = []
self._contents_as_set = set()
if initial_contents is not None:
for x in initial_contents:
self.add(x)
def add(self, x):
if x not in self._contents_as_set:
self._contents_as_set.add(x)
self._contents.append(x)
def discard(self, x):
if x in self._contents_as_set:
self._contents_as_set.remove(x)
self._contents.remove(x)
def copy(self):
return _OrderedSet(self._contents)
def update(self, contents):
for x in contents:
self.add(x)
def __iter__(self):
return iter(self._contents)
def __contains__(self, item):
return item in self._contents_as_set
def __len__(self):
return len(self._contents)
def set_repr(self):
if len(self) == 0:
return 'set()'
lst = [repr(x) for x in self]
return 'set([' + ', '.join(lst) + '])'
class Ref(object):
def __init__(self, ref, ref_data):
self.ref = ref
self.ref_data = ref_data
def __str__(self):
return self.ref
def load_schema_data():
import os.path
import json
json_file = os.path.join(os.path.dirname(__file__), 'debugProtocol.json')
if not os.path.exists(json_file):
import requests
req = requests.get('https://raw.githubusercontent.com/microsoft/debug-adapter-protocol/gh-pages/debugAdapterProtocol.json')
assert req.status_code == 200
with open(json_file, 'wb') as stream:
stream.write(req.content)
with open(json_file, 'rb') as json_contents:
json_schema_data = json.loads(json_contents.read())
return json_schema_data
def load_custom_schema_data():
import os.path
import json
json_file = os.path.join(os.path.dirname(__file__), 'debugProtocolCustom.json')
with open(json_file, 'rb') as json_contents:
json_schema_data = json.loads(json_contents.read())
return json_schema_data
def create_classes_to_generate_structure(json_schema_data):
definitions = json_schema_data['definitions']
class_to_generatees = {}
for name, definition in definitions.items():
all_of = definition.get('allOf')
description = definition.get('description')
is_enum = definition.get('type') == 'string' and 'enum' in definition
enum_values = None
if is_enum:
enum_values = definition['enum']
properties = {}
properties.update(definition.get('properties', {}))
required = _OrderedSet(definition.get('required', _OrderedSet()))
base_definitions = []
if all_of is not None:
for definition in all_of:
ref = definition.get('$ref')
if ref is not None:
assert ref.startswith('#/definitions/')
ref = ref[len('#/definitions/'):]
base_definitions.append(ref)
else:
if not description:
description = definition.get('description')
properties.update(definition.get('properties', {}))
required.update(_OrderedSet(definition.get('required', _OrderedSet())))
if isinstance(description, (list, tuple)):
description = '\n'.join(description)
if name == 'ModulesRequest': # Hack to accept modules request without arguments (ptvsd: 2050).
required.discard('arguments')
class_to_generatees[name] = dict(
name=name,
properties=properties,
base_definitions=base_definitions,
description=description,
required=required,
is_enum=is_enum,
enum_values=enum_values
)
return class_to_generatees
def collect_bases(curr_class, classes_to_generate, memo=None):
ret = []
if memo is None:
memo = {}
base_definitions = curr_class['base_definitions']
for base_definition in base_definitions:
if base_definition not in memo:
ret.append(base_definition)
ret.extend(collect_bases(classes_to_generate[base_definition], classes_to_generate, memo))
return ret
def fill_properties_and_required_from_base(classes_to_generate):
# Now, resolve properties based on refs
for class_to_generate in classes_to_generate.values():
dct = {}
s = _OrderedSet()
for base_definition in reversed(collect_bases(class_to_generate, classes_to_generate)):
# Note: go from base to current so that the initial order of the properties has that
# same order.
dct.update(classes_to_generate[base_definition].get('properties', {}))
s.update(classes_to_generate[base_definition].get('required', _OrderedSet()))
dct.update(class_to_generate['properties'])
class_to_generate['properties'] = dct
s.update(class_to_generate['required'])
class_to_generate['required'] = s
return class_to_generate
def update_class_to_generate_description(class_to_generate):
import textwrap
description = class_to_generate['description']
lines = []
for line in description.splitlines():
wrapped = textwrap.wrap(line.strip(), 100)
lines.extend(wrapped)
lines.append('')
while lines and lines[-1] == '':
lines = lines[:-1]
class_to_generate['description'] = ' ' + ('\n '.join(lines))
def update_class_to_generate_type(classes_to_generate, class_to_generate):
properties = class_to_generate.get('properties')
for _prop_name, prop_val in properties.items():
prop_type = prop_val.get('type', '')
if not prop_type:
prop_type = prop_val.pop('$ref', '')
if prop_type:
assert prop_type.startswith('#/definitions/')
prop_type = prop_type[len('#/definitions/'):]
prop_val['type'] = Ref(prop_type, classes_to_generate[prop_type])
def update_class_to_generate_register_dec(classes_to_generate, class_to_generate):
# Default
class_to_generate['register_request'] = ''
class_to_generate['register_dec'] = '@register'
properties = class_to_generate.get('properties')
enum_type = properties.get('type', {}).get('enum')
command = None
event = None
if enum_type and len(enum_type) == 1 and next(iter(enum_type)) in ("request", "response", "event"):
msg_type = next(iter(enum_type))
if msg_type == 'response':
# The actual command is typed in the request
response_name = class_to_generate['name']
request_name = response_name[:-len('Response')] + 'Request'
if request_name in classes_to_generate:
command = classes_to_generate[request_name]['properties'].get('command')
else:
if response_name == 'ErrorResponse':
command = {'enum': ['error']}
else:
raise AssertionError('Unhandled: %s' % (response_name,))
elif msg_type == 'request':
command = properties.get('command')
elif msg_type == 'event':
command = properties.get('event')
else:
raise AssertionError('Unexpected condition.')
if command:
enum = command.get('enum')
if enum and len(enum) == 1:
class_to_generate['register_request'] = '@register_%s(%r)\n' % (msg_type, enum[0])
def extract_prop_name_and_prop(class_to_generate):
properties = class_to_generate.get('properties')
required = _OrderedSet(class_to_generate.get('required', _OrderedSet()))
# Sort so that required come first
prop_name_and_prop = list(properties.items())
def compute_sort_key(x):
key = x[0]
if key in required:
if key == 'seq':
return 0.5 # seq when required is after the other required keys (to have a default of -1).
return 0
return 1
prop_name_and_prop.sort(key=compute_sort_key)
return prop_name_and_prop
def update_class_to_generate_to_json(class_to_generate):
required = _OrderedSet(class_to_generate.get('required', _OrderedSet()))
prop_name_and_prop = extract_prop_name_and_prop(class_to_generate)
to_dict_body = ['def to_dict(self, update_ids_to_dap=False): # noqa (update_ids_to_dap may be unused)']
translate_prop_names = []
for prop_name, prop in prop_name_and_prop:
if is_variable_to_translate(class_to_generate['name'], prop_name):
translate_prop_names.append(prop_name)
for prop_name, prop in prop_name_and_prop:
namespace = dict(prop_name=prop_name, noqa=_get_noqa_for_var(prop_name))
to_dict_body.append(' %(prop_name)s = self.%(prop_name)s%(noqa)s' % namespace)
if prop.get('type') == 'array':
to_dict_body.append(' if %(prop_name)s and hasattr(%(prop_name)s[0], "to_dict"):' % namespace)
to_dict_body.append(' %(prop_name)s = [x.to_dict() for x in %(prop_name)s]' % namespace)
if translate_prop_names:
to_dict_body.append(' if update_ids_to_dap:')
for prop_name in translate_prop_names:
namespace = dict(prop_name=prop_name, noqa=_get_noqa_for_var(prop_name))
to_dict_body.append(' if %(prop_name)s is not None:' % namespace)
to_dict_body.append(' %(prop_name)s = self._translate_id_to_dap(%(prop_name)s)%(noqa)s' % namespace)
if not translate_prop_names:
update_dict_ids_from_dap_body = []
else:
update_dict_ids_from_dap_body = ['', '', '@classmethod', 'def update_dict_ids_from_dap(cls, dct):']
for prop_name in translate_prop_names:
namespace = dict(prop_name=prop_name)
update_dict_ids_from_dap_body.append(' if %(prop_name)r in dct:' % namespace)
update_dict_ids_from_dap_body.append(' dct[%(prop_name)r] = cls._translate_id_from_dap(dct[%(prop_name)r])' % namespace)
update_dict_ids_from_dap_body.append(' return dct')
class_to_generate['update_dict_ids_from_dap'] = _indent_lines('\n'.join(update_dict_ids_from_dap_body))
to_dict_body.append(' dct = {')
first_not_required = False
for prop_name, prop in prop_name_and_prop:
use_to_dict = prop['type'].__class__ == Ref and not prop['type'].ref_data.get('is_enum', False)
is_array = prop['type'] == 'array'
ref_array_cls_name = ''
if is_array:
ref = prop['items'].get('$ref')
if ref is not None:
ref_array_cls_name = ref.split('/')[-1]
namespace = dict(prop_name=prop_name, ref_array_cls_name=ref_array_cls_name)
if prop_name in required:
if use_to_dict:
to_dict_body.append(' %(prop_name)r: %(prop_name)s.to_dict(update_ids_to_dap=update_ids_to_dap),' % namespace)
else:
if ref_array_cls_name:
to_dict_body.append(' %(prop_name)r: [%(ref_array_cls_name)s.update_dict_ids_to_dap(o) for o in %(prop_name)s] if (update_ids_to_dap and %(prop_name)s) else %(prop_name)s,' % namespace)
else:
to_dict_body.append(' %(prop_name)r: %(prop_name)s,' % namespace)
else:
if not first_not_required:
first_not_required = True
to_dict_body.append(' }')
to_dict_body.append(' if %(prop_name)s is not None:' % namespace)
if use_to_dict:
to_dict_body.append(' dct[%(prop_name)r] = %(prop_name)s.to_dict(update_ids_to_dap=update_ids_to_dap)' % namespace)
else:
if ref_array_cls_name:
to_dict_body.append(' dct[%(prop_name)r] = [%(ref_array_cls_name)s.update_dict_ids_to_dap(o) for o in %(prop_name)s] if (update_ids_to_dap and %(prop_name)s) else %(prop_name)s' % namespace)
else:
to_dict_body.append(' dct[%(prop_name)r] = %(prop_name)s' % namespace)
if not first_not_required:
first_not_required = True
to_dict_body.append(' }')
to_dict_body.append(' dct.update(self.kwargs)')
to_dict_body.append(' return dct')
class_to_generate['to_dict'] = _indent_lines('\n'.join(to_dict_body))
if not translate_prop_names:
update_dict_ids_to_dap_body = []
else:
update_dict_ids_to_dap_body = ['', '', '@classmethod', 'def update_dict_ids_to_dap(cls, dct):']
for prop_name in translate_prop_names:
namespace = dict(prop_name=prop_name)
update_dict_ids_to_dap_body.append(' if %(prop_name)r in dct:' % namespace)
update_dict_ids_to_dap_body.append(' dct[%(prop_name)r] = cls._translate_id_to_dap(dct[%(prop_name)r])' % namespace)
update_dict_ids_to_dap_body.append(' return dct')
class_to_generate['update_dict_ids_to_dap'] = _indent_lines('\n'.join(update_dict_ids_to_dap_body))
def update_class_to_generate_init(class_to_generate):
args = []
init_body = []
docstring = []
required = _OrderedSet(class_to_generate.get('required', _OrderedSet()))
prop_name_and_prop = extract_prop_name_and_prop(class_to_generate)
translate_prop_names = []
for prop_name, prop in prop_name_and_prop:
if is_variable_to_translate(class_to_generate['name'], prop_name):
translate_prop_names.append(prop_name)
enum = prop.get('enum')
if enum and len(enum) == 1:
init_body.append(' self.%(prop_name)s = %(enum)r' % dict(prop_name=prop_name, enum=next(iter(enum))))
else:
if prop_name in required:
if prop_name == 'seq':
args.append(prop_name + '=-1')
else:
args.append(prop_name)
else:
args.append(prop_name + '=None')
if prop['type'].__class__ == Ref:
ref = prop['type']
ref_data = ref.ref_data
if ref_data.get('is_enum', False):
init_body.append(' if %s is not None:' % (prop_name,))
init_body.append(' assert %s in %s.VALID_VALUES' % (prop_name, str(ref)))
init_body.append(' self.%(prop_name)s = %(prop_name)s' % dict(
prop_name=prop_name))
else:
namespace = dict(
prop_name=prop_name,
ref_name=str(ref)
)
init_body.append(' if %(prop_name)s is None:' % namespace)
init_body.append(' self.%(prop_name)s = %(ref_name)s()' % namespace)
init_body.append(' else:')
init_body.append(' self.%(prop_name)s = %(ref_name)s(update_ids_from_dap=update_ids_from_dap, **%(prop_name)s) if %(prop_name)s.__class__ != %(ref_name)s else %(prop_name)s' % namespace
)
else:
init_body.append(' self.%(prop_name)s = %(prop_name)s' % dict(prop_name=prop_name))
if prop['type'] == 'array':
ref = prop['items'].get('$ref')
if ref is not None:
ref_array_cls_name = ref.split('/')[-1]
init_body.append(' if update_ids_from_dap and self.%(prop_name)s:' % dict(prop_name=prop_name))
init_body.append(' for o in self.%(prop_name)s:' % dict(prop_name=prop_name))
init_body.append(' %(ref_array_cls_name)s.update_dict_ids_from_dap(o)' % dict(ref_array_cls_name=ref_array_cls_name))
prop_type = prop['type']
prop_description = prop.get('description', '')
if isinstance(prop_description, (list, tuple)):
prop_description = '\n '.join(prop_description)
docstring.append(':param %(prop_type)s %(prop_name)s: %(prop_description)s' % dict(
prop_type=prop_type, prop_name=prop_name, prop_description=prop_description))
if translate_prop_names:
init_body.append(' if update_ids_from_dap:')
for prop_name in translate_prop_names:
init_body.append(' self.%(prop_name)s = self._translate_id_from_dap(self.%(prop_name)s)' % dict(prop_name=prop_name))
docstring = _indent_lines('\n'.join(docstring))
init_body = '\n'.join(init_body)
# Actually bundle the whole __init__ from the parts.
args = ', '.join(args)
if args:
args = ', ' + args
# Note: added kwargs because some messages are expected to be extended by the user (so, we'll actually
# make all extendable so that we don't have to worry about which ones -- we loose a little on typing,
# but may be better than doing a allow list based on something only pointed out in the documentation).
class_to_generate['init'] = '''def __init__(self%(args)s, update_ids_from_dap=False, **kwargs): # noqa (update_ids_from_dap may be unused)
"""
%(docstring)s
"""
%(init_body)s
self.kwargs = kwargs
''' % dict(args=args, init_body=init_body, docstring=docstring)
class_to_generate['init'] = _indent_lines(class_to_generate['init'])
def update_class_to_generate_props(class_to_generate):
import json
def default(o):
if isinstance(o, Ref):
return o.ref
raise AssertionError('Unhandled: %s' % (o,))
properties = class_to_generate['properties']
class_to_generate['props'] = ' __props__ = %s' % _indent_lines(
json.dumps(properties, indent=4, default=default)).strip()
def update_class_to_generate_refs(class_to_generate):
properties = class_to_generate['properties']
class_to_generate['refs'] = ' __refs__ = %s' % _OrderedSet(
key for (key, val) in properties.items() if val['type'].__class__ == Ref).set_repr()
def update_class_to_generate_enums(class_to_generate):
class_to_generate['enums'] = ''
if class_to_generate.get('is_enum', False):
enums = ''
for enum in class_to_generate['enum_values']:
enums += ' %s = %r\n' % (enum.upper(), enum)
enums += '\n'
enums += ' VALID_VALUES = %s\n\n' % _OrderedSet(class_to_generate['enum_values']).set_repr()
class_to_generate['enums'] = enums
def update_class_to_generate_objects(classes_to_generate, class_to_generate):
properties = class_to_generate['properties']
for key, val in properties.items():
if 'type' not in val:
val['type'] = 'TypeNA'
continue
if val['type'] == 'object':
create_new = val.copy()
create_new.update({
'name': '%s%s' % (class_to_generate['name'], key.title()),
'description': ' "%s" of %s' % (key, class_to_generate['name'])
})
if 'properties' not in create_new:
create_new['properties'] = {}
assert create_new['name'] not in classes_to_generate
classes_to_generate[create_new['name']] = create_new
update_class_to_generate_type(classes_to_generate, create_new)
update_class_to_generate_props(create_new)
# Update nested object types
update_class_to_generate_objects(classes_to_generate, create_new)
val['type'] = Ref(create_new['name'], classes_to_generate[create_new['name']])
val.pop('properties', None)
def gen_debugger_protocol():
import os.path
import sys
if sys.version_info[:2] < (3, 6):
raise AssertionError('Must be run with Python 3.6 onwards (to keep dict order).')
classes_to_generate = create_classes_to_generate_structure(load_schema_data())
classes_to_generate.update(create_classes_to_generate_structure(load_custom_schema_data()))
class_to_generate = fill_properties_and_required_from_base(classes_to_generate)
for class_to_generate in list(classes_to_generate.values()):
update_class_to_generate_description(class_to_generate)
update_class_to_generate_type(classes_to_generate, class_to_generate)
update_class_to_generate_props(class_to_generate)
update_class_to_generate_objects(classes_to_generate, class_to_generate)
for class_to_generate in classes_to_generate.values():
update_class_to_generate_refs(class_to_generate)
update_class_to_generate_init(class_to_generate)
update_class_to_generate_enums(class_to_generate)
update_class_to_generate_to_json(class_to_generate)
update_class_to_generate_register_dec(classes_to_generate, class_to_generate)
class_template = '''
%(register_request)s%(register_dec)s
class %(name)s(BaseSchema):
"""
%(description)s
Note: automatically generated code. Do not edit manually.
"""
%(enums)s%(props)s
%(refs)s
__slots__ = list(__props__.keys()) + ['kwargs']
%(init)s%(update_dict_ids_from_dap)s
%(to_dict)s%(update_dict_ids_to_dap)s
'''
contents = []
contents.append('# coding: utf-8')
contents.append('# Automatically generated code.')
contents.append('# Do not edit manually.')
contents.append('# Generated by running: %s' % os.path.basename(__file__))
contents.append('from .pydevd_base_schema import BaseSchema, register, register_request, register_response, register_event')
contents.append('')
for class_to_generate in classes_to_generate.values():
contents.append(class_template % class_to_generate)
parent_dir = os.path.dirname(__file__)
schema = os.path.join(parent_dir, 'pydevd_schema.py')
with open(schema, 'w', encoding='utf-8') as stream:
stream.write('\n'.join(contents))
def _indent_lines(lines, indent=' '):
out_lines = []
for line in lines.splitlines(keepends=True):
out_lines.append(indent + line)
return ''.join(out_lines)
if __name__ == '__main__':
gen_debugger_protocol() | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_debug_adapter/vendored/vendored_pydevd/_pydevd_bundle/_debug_adapter/__main__pydevd_gen_debug_adapter_protocol.py | 0.420957 | 0.226099 | __main__pydevd_gen_debug_adapter_protocol.py | pypi |
def __unicode__(self):
"""
Return "app_label.model_label.manager_name".
"""
def _copy_to_model(self, model):
"""
Makes a copy of the manager and assigns it to 'model', which should be
a child of the existing model (used when inheriting a manager from an
abstract base class).
"""
def _db(self):
"""
"""
def _get_queryset_methods(cls, queryset_class):
"""
"""
def _hints(self):
"""
dict() -> new empty dictionary
dict(mapping) -> new dictionary initialized from a mapping object's
(key, value) pairs
dict(iterable) -> new dictionary initialized as if via:
d = {}
for k, v in iterable:
d[k] = v
dict(**kwargs) -> new dictionary initialized with the name=value pairs
in the keyword argument list. For example: dict(one=1, two=2)
"""
def _inherited(self):
"""
"""
def _insert(self, *args, **kwargs):
"""
Inserts a new record for the given model. This provides an interface to
the InsertQuery class and is how Model.save() is implemented.
"""
def _queryset_class(self):
"""
Represents a lazy database lookup for a set of objects.
"""
def _set_creation_counter(self):
"""
Sets the creation counter value for this instance and increments the
class-level copy.
"""
def _update(self, *args, **kwargs):
"""
A version of update that accepts field objects instead of field names.
Used primarily for model saving and not intended for use by general
code (it requires too much poking around at model internals to be
useful at that level).
"""
def aggregate(self, *args, **kwargs):
"""
Returns a dictionary containing the calculations (aggregation)
over the current queryset
If args is present the expression is passed as a kwarg using
the Aggregate object's default alias.
"""
def all(self):
"""
@rtype: django.db.models.query.QuerySet
"""
def annotate(self, *args, **kwargs):
"""
Return a query set in which the returned objects have been annotated
with data aggregated from related fields.
"""
def bulk_create(self, *args, **kwargs):
"""
Inserts each of the instances into the database. This does *not* call
save() on each of the instances, does not send any pre/post save
signals, and does not set the primary key attribute if it is an
autoincrement field.
"""
def check(self, **kwargs):
"""
"""
def complex_filter(self, *args, **kwargs):
"""
Returns a new QuerySet instance with filter_obj added to the filters.
filter_obj can be a Q object (or anything with an add_to_query()
method) or a dictionary of keyword lookup arguments.
This exists to support framework features such as 'limit_choices_to',
and usually it will be more natural to use other methods.
@rtype: django.db.models.query.QuerySet
"""
def contribute_to_class(self, model, name):
"""
"""
def count(self, *args, **kwargs):
"""
Performs a SELECT COUNT() and returns the number of records as an
integer.
If the QuerySet is already fully cached this simply returns the length
of the cached results set to avoid multiple SELECT COUNT(*) calls.
"""
def create(self, *args, **kwargs):
"""
Creates a new object with the given kwargs, saving it to the database
and returning the created object.
"""
def creation_counter(self):
"""
"""
def dates(self, *args, **kwargs):
"""
Returns a list of date objects representing all available dates for
the given field_name, scoped to 'kind'.
"""
def datetimes(self, *args, **kwargs):
"""
Returns a list of datetime objects representing all available
datetimes for the given field_name, scoped to 'kind'.
"""
def db(self):
"""
"""
def db_manager(self, using=None, hints=None):
"""
"""
def defer(self, *args, **kwargs):
"""
Defers the loading of data for certain fields until they are accessed.
The set of fields to defer is added to any existing set of deferred
fields. The only exception to this is if None is passed in as the only
parameter, in which case all deferrals are removed (None acts as a
reset option).
"""
def distinct(self, *args, **kwargs):
"""
Returns a new QuerySet instance that will select only distinct results.
@rtype: django.db.models.query.QuerySet
"""
def earliest(self, *args, **kwargs):
"""
"""
def exclude(self, *args, **kwargs):
"""
Returns a new QuerySet instance with NOT (args) ANDed to the existing
set.
@rtype: django.db.models.query.QuerySet
"""
def exists(self, *args, **kwargs):
"""
"""
def extra(self, *args, **kwargs):
"""
Adds extra SQL fragments to the query.
"""
def filter(self, *args, **kwargs):
"""
Returns a new QuerySet instance with the args ANDed to the existing
set.
@rtype: django.db.models.query.QuerySet
"""
def first(self, *args, **kwargs):
"""
Returns the first object of a query, returns None if no match is found.
"""
def from_queryset(cls, queryset_class, class_name=None):
"""
"""
def get(self, *args, **kwargs):
"""
Performs the query and returns a single object matching the given
keyword arguments.
"""
def get_or_create(self, *args, **kwargs):
"""
Looks up an object with the given kwargs, creating one if necessary.
Returns a tuple of (object, created), where created is a boolean
specifying whether an object was created.
"""
def get_queryset(self):
"""
Returns a new QuerySet object. Subclasses can override this method to
easily customize the behavior of the Manager.
@rtype: django.db.models.query.QuerySet
"""
def in_bulk(self, *args, **kwargs):
"""
Returns a dictionary mapping each of the given IDs to the object with
that ID.
"""
def iterator(self, *args, **kwargs):
"""
An iterator over the results from applying this QuerySet to the
database.
"""
def last(self, *args, **kwargs):
"""
Returns the last object of a query, returns None if no match is found.
"""
def latest(self, *args, **kwargs):
"""
"""
def model(self):
"""
MyModel(id)
"""
def none(self, *args, **kwargs):
"""
Returns an empty QuerySet.
@rtype: django.db.models.query.QuerySet
"""
def only(self, *args, **kwargs):
"""
Essentially, the opposite of defer. Only the fields passed into this
method and that are not already specified as deferred are loaded
immediately when the queryset is evaluated.
"""
def order_by(self, *args, **kwargs):
"""
Returns a new QuerySet instance with the ordering changed.
@rtype: django.db.models.query.QuerySet
"""
def prefetch_related(self, *args, **kwargs):
"""
Returns a new QuerySet instance that will prefetch the specified
Many-To-One and Many-To-Many related objects when the QuerySet is
evaluated.
When prefetch_related() is called more than once, the list of lookups to
prefetch is appended to. If prefetch_related(None) is called, the list
is cleared.
@rtype: django.db.models.query.QuerySet
"""
def raw(self, *args, **kwargs):
"""
"""
def reverse(self, *args, **kwargs):
"""
Reverses the ordering of the QuerySet.
@rtype: django.db.models.query.QuerySet
"""
def select_for_update(self, *args, **kwargs):
"""
Returns a new QuerySet instance that will select objects with a
FOR UPDATE lock.
@rtype: django.db.models.query.QuerySet
"""
def select_related(self, *args, **kwargs):
"""
Returns a new QuerySet instance that will select related objects.
If fields are specified, they must be ForeignKey fields and only those
related objects are included in the selection.
If select_related(None) is called, the list is cleared.
@rtype: django.db.models.query.QuerySet
"""
def update(self, *args, **kwargs):
"""
Updates all elements in the current QuerySet, setting all the given
fields to the appropriate values.
"""
def update_or_create(self, *args, **kwargs):
"""
Looks up an object with the given kwargs, updating one with defaults
if it exists, otherwise creates a new one.
Returns a tuple (object, created), where created is a boolean
specifying whether an object was created.
"""
def using(self, *args, **kwargs):
"""
Selects which database this QuerySet should execute its query against.
@rtype: django.db.models.query.QuerySet
"""
def values(self, *args, **kwargs):
"""
"""
def values_list(self, *args, **kwargs):
"""
""" | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_debug_adapter/vendored/vendored_pydevd/stubs/_django_manager_body.py | 0.849379 | 0.667514 | _django_manager_body.py | pypi |
import sys
from enum import IntFlag
from _pydevd_frame_eval.vendored import bytecode as _bytecode
class CompilerFlags(IntFlag):
"""Possible values of the co_flags attribute of Code object.
Note: We do not rely on inspect values here as some of them are missing and
furthermore would be version dependent.
"""
OPTIMIZED = 0x00001 # noqa
NEWLOCALS = 0x00002 # noqa
VARARGS = 0x00004 # noqa
VARKEYWORDS = 0x00008 # noqa
NESTED = 0x00010 # noqa
GENERATOR = 0x00020 # noqa
NOFREE = 0x00040 # noqa
# New in Python 3.5
# Used for coroutines defined using async def ie native coroutine
COROUTINE = 0x00080 # noqa
# Used for coroutines defined as a generator and then decorated using
# types.coroutine
ITERABLE_COROUTINE = 0x00100 # noqa
# New in Python 3.6
# Generator defined in an async def function
ASYNC_GENERATOR = 0x00200 # noqa
# __future__ flags
# future flags changed in Python 3.9
if sys.version_info < (3, 9):
FUTURE_GENERATOR_STOP = 0x80000 # noqa
if sys.version_info > (3, 6):
FUTURE_ANNOTATIONS = 0x100000
else:
FUTURE_GENERATOR_STOP = 0x800000 # noqa
FUTURE_ANNOTATIONS = 0x1000000
def infer_flags(bytecode, is_async=None):
"""Infer the proper flags for a bytecode based on the instructions.
Because the bytecode does not have enough context to guess if a function
is asynchronous the algorithm tries to be conservative and will never turn
a previously async code into a sync one.
Parameters
----------
bytecode : Bytecode | ConcreteBytecode | ControlFlowGraph
Bytecode for which to infer the proper flags
is_async : bool | None, optional
Force the code to be marked as asynchronous if True, prevent it from
being marked as asynchronous if False and simply infer the best
solution based on the opcode and the existing flag if None.
"""
flags = CompilerFlags(0)
if not isinstance(
bytecode,
(_bytecode.Bytecode, _bytecode.ConcreteBytecode, _bytecode.ControlFlowGraph),
):
msg = (
"Expected a Bytecode, ConcreteBytecode or ControlFlowGraph "
"instance not %s"
)
raise ValueError(msg % bytecode)
instructions = (
bytecode.get_instructions()
if isinstance(bytecode, _bytecode.ControlFlowGraph)
else bytecode
)
instr_names = {
i.name
for i in instructions
if not isinstance(i, (_bytecode.SetLineno, _bytecode.Label))
}
# Identify optimized code
if not (instr_names & {"STORE_NAME", "LOAD_NAME", "DELETE_NAME"}):
flags |= CompilerFlags.OPTIMIZED
# Check for free variables
if not (
instr_names
& {
"LOAD_CLOSURE",
"LOAD_DEREF",
"STORE_DEREF",
"DELETE_DEREF",
"LOAD_CLASSDEREF",
}
):
flags |= CompilerFlags.NOFREE
# Copy flags for which we cannot infer the right value
flags |= bytecode.flags & (
CompilerFlags.NEWLOCALS
| CompilerFlags.VARARGS
| CompilerFlags.VARKEYWORDS
| CompilerFlags.NESTED
)
sure_generator = instr_names & {"YIELD_VALUE"}
maybe_generator = instr_names & {"YIELD_VALUE", "YIELD_FROM"}
sure_async = instr_names & {
"GET_AWAITABLE",
"GET_AITER",
"GET_ANEXT",
"BEFORE_ASYNC_WITH",
"SETUP_ASYNC_WITH",
"END_ASYNC_FOR",
}
# If performing inference or forcing an async behavior, first inspect
# the flags since this is the only way to identify iterable coroutines
if is_async in (None, True):
if bytecode.flags & CompilerFlags.COROUTINE:
if sure_generator:
flags |= CompilerFlags.ASYNC_GENERATOR
else:
flags |= CompilerFlags.COROUTINE
elif bytecode.flags & CompilerFlags.ITERABLE_COROUTINE:
if sure_async:
msg = (
"The ITERABLE_COROUTINE flag is set but bytecode that"
"can only be used in async functions have been "
"detected. Please unset that flag before performing "
"inference."
)
raise ValueError(msg)
flags |= CompilerFlags.ITERABLE_COROUTINE
elif bytecode.flags & CompilerFlags.ASYNC_GENERATOR:
if not sure_generator:
flags |= CompilerFlags.COROUTINE
else:
flags |= CompilerFlags.ASYNC_GENERATOR
# If the code was not asynchronous before determine if it should now be
# asynchronous based on the opcode and the is_async argument.
else:
if sure_async:
# YIELD_FROM is not allowed in async generator
if sure_generator:
flags |= CompilerFlags.ASYNC_GENERATOR
else:
flags |= CompilerFlags.COROUTINE
elif maybe_generator:
if is_async:
if sure_generator:
flags |= CompilerFlags.ASYNC_GENERATOR
else:
flags |= CompilerFlags.COROUTINE
else:
flags |= CompilerFlags.GENERATOR
elif is_async:
flags |= CompilerFlags.COROUTINE
# If the code should not be asynchronous, check first it is possible and
# next set the GENERATOR flag if relevant
else:
if sure_async:
raise ValueError(
"The is_async argument is False but bytecodes "
"that can only be used in async functions have "
"been detected."
)
if maybe_generator:
flags |= CompilerFlags.GENERATOR
flags |= bytecode.flags & CompilerFlags.FUTURE_GENERATOR_STOP
return flags | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_debug_adapter/vendored/vendored_pydevd/_pydevd_frame_eval/vendored/bytecode/flags.py | 0.501709 | 0.244916 | flags.py | pypi |
from __future__ import absolute_import
import collections
import sys
__version__ = "1.1.0"
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
VERSION = sys.version_info
__all__ = ['PY2', 'PY3', 'lru_cache', 'apply_changes_to_python_environment']
if PY3:
input = input
def apply_changes_to_python_environment():
pass
else:
input = raw_input # noqa: F821
python_environment_changes_applied = False
import sys
stdout = sys.stdout
stderr = sys.stderr
def apply_changes_to_python_environment():
global python_environment_changes_applied
if python_environment_changes_applied or sys.getdefaultencoding() == 'utf-8':
python_environment_changes_applied = True
return
try:
reload(sys)
sys.stdout = stdout
sys.stderr = stderr
sys.setdefaultencoding('utf-8')
except NameError: # Python 3
sys.exit('This should not happen!')
python_environment_changes_applied = True
if sys.version_info < (3, 2):
try:
from threading import Lock
except ImportError:
from dummy_threading import Lock
from functools import wraps
_CacheInfo = collections.namedtuple("CacheInfo", "hits misses maxsize currsize")
def lru_cache(maxsize=100):
"""Least-recently-used cache decorator.
Taking from: https://github.com/MiCHiLU/python-functools32/blob/master/functools32/functools32.py
with slight modifications.
If *maxsize* is set to None, the LRU features are disabled and the cache
can grow without bound.
Arguments to the cached function must be hashable.
View the cache statistics named tuple (hits, misses, maxsize, currsize) with
f.cache_info(). Clear the cache and statistics with f.cache_clear().
Access the underlying function with f.__wrapped__.
See: https://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used
"""
def decorating_function(user_function, tuple=tuple, sorted=sorted, len=len, KeyError=KeyError):
hits, misses = [0], [0]
kwd_mark = (object(),) # separates positional and keyword args
lock = Lock()
if maxsize is None:
CACHE = {}
@wraps(user_function)
def wrapper(*args, **kwds):
key = args
if kwds:
key += kwd_mark + tuple(sorted(kwds.items()))
try:
result = CACHE[key]
hits[0] += 1
return result
except KeyError:
pass
result = user_function(*args, **kwds)
CACHE[key] = result
misses[0] += 1
return result
else:
CACHE = collections.OrderedDict()
@wraps(user_function)
def wrapper(*args, **kwds):
key = args
if kwds:
key += kwd_mark + tuple(sorted(kwds.items()))
with lock:
cached = CACHE.get(key, None)
if cached:
del CACHE[key]
CACHE[key] = cached
hits[0] += 1
return cached
result = user_function(*args, **kwds)
with lock:
CACHE[key] = result # record recent use of this key
misses[0] += 1
while len(CACHE) > maxsize:
CACHE.popitem(last=False)
return result
def cache_info():
"""Report CACHE statistics."""
with lock:
return _CacheInfo(hits[0], misses[0], maxsize, len(CACHE))
def cache_clear():
"""Clear the CACHE and CACHE statistics."""
with lock:
CACHE.clear()
hits[0] = misses[0] = 0
wrapper.cache_info = cache_info
wrapper.cache_clear = cache_clear
return wrapper
return decorating_function
else:
from functools import lru_cache | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_debug_adapter/vendored/vendored_pydevd/third_party/isort_container/isort/pie_slice.py | 0.430626 | 0.167797 | pie_slice.py | pypi |
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import functools
import glob
import os
import re
import sys
import setuptools
from isort import SortImports, __version__
from isort.settings import DEFAULT_SECTIONS, WrapModes, default, from_path, should_skip
INTRO = r"""
/#######################################################################\
`sMMy`
.yyyy- `
##soos## ./o.
` ``..-..` ``...`.`` ` ```` ``-ssso```
.s:-y- .+osssssso/. ./ossss+:so+:` :+o-`/osso:+sssssssso/
.s::y- osss+.``.`` -ssss+-.`-ossso` ssssso/::..::+ssss:::.
.s::y- /ssss+//:-.` `ssss+ `ssss+ sssso` :ssss`
.s::y- `-/+oossssso/ `ssss/ sssso ssss/ :ssss`
.y-/y- ````:ssss` ossso. :ssss: ssss/ :ssss.
`/so:` `-//::/osss+ `+ssss+-/ossso: /sso- `osssso/.
\/ `-/oooo++/- .:/++:/++/-` .. `://++/.
isort your Python imports for you so you don't have to
VERSION {0}
\########################################################################/
""".format(__version__)
shebang_re = re.compile(br'^#!.*\bpython[23w]?\b')
def is_python_file(path):
_root, ext = os.path.splitext(path)
if ext in ('.py', '.pyi'):
return True
if ext in ('.pex', ):
return False
# Skip editor backup files.
if path.endswith('~'):
return False
try:
with open(path, 'rb') as fp:
line = fp.readline(100)
except IOError:
return False
else:
return bool(shebang_re.match(line))
class SortAttempt(object):
def __init__(self, incorrectly_sorted, skipped):
self.incorrectly_sorted = incorrectly_sorted
self.skipped = skipped
def sort_imports(file_name, **arguments):
try:
result = SortImports(file_name, **arguments)
return SortAttempt(result.incorrectly_sorted, result.skipped)
except IOError as e:
print("WARNING: Unable to parse file {0} due to {1}".format(file_name, e))
return None
def iter_source_code(paths, config, skipped):
"""Iterate over all Python source files defined in paths."""
if 'not_skip' in config:
config['skip'] = list(set(config['skip']).difference(config['not_skip']))
for path in paths:
if os.path.isdir(path):
for dirpath, dirnames, filenames in os.walk(path, topdown=True, followlinks=True):
for dirname in list(dirnames):
if should_skip(dirname, config, dirpath):
skipped.append(dirname)
dirnames.remove(dirname)
for filename in filenames:
filepath = os.path.join(dirpath, filename)
if is_python_file(filepath):
relative_file = os.path.relpath(filepath, path)
if should_skip(relative_file, config, path):
skipped.append(filename)
else:
yield filepath
else:
yield path
class ISortCommand(setuptools.Command):
"""The :class:`ISortCommand` class is used by setuptools to perform
imports checks on registered modules.
"""
description = "Run isort on modules registered in setuptools"
user_options = []
def initialize_options(self):
default_settings = default.copy()
for key, value in default_settings.items():
setattr(self, key, value)
def finalize_options(self):
"Get options from config files."
self.arguments = {}
computed_settings = from_path(os.getcwd())
for key, value in computed_settings.items():
self.arguments[key] = value
def distribution_files(self):
"""Find distribution packages."""
# This is verbatim from flake8
if self.distribution.packages:
package_dirs = self.distribution.package_dir or {}
for package in self.distribution.packages:
pkg_dir = package
if package in package_dirs:
pkg_dir = package_dirs[package]
elif '' in package_dirs:
pkg_dir = package_dirs[''] + os.path.sep + pkg_dir
yield pkg_dir.replace('.', os.path.sep)
if self.distribution.py_modules:
for filename in self.distribution.py_modules:
yield "%s.py" % filename
# Don't miss the setup.py file itself
yield "setup.py"
def run(self):
arguments = self.arguments
wrong_sorted_files = False
arguments['check'] = True
for path in self.distribution_files():
for python_file in glob.iglob(os.path.join(path, '*.py')):
try:
incorrectly_sorted = SortImports(python_file, **arguments).incorrectly_sorted
if incorrectly_sorted:
wrong_sorted_files = True
except IOError as e:
print("WARNING: Unable to parse file {0} due to {1}".format(python_file, e))
if wrong_sorted_files:
sys.exit(1)
def parse_args(argv=None):
parser = argparse.ArgumentParser(description='Sort Python import definitions alphabetically '
'within logical sections. Run with no arguments to run '
'interactively. Run with `-` as the first argument to read from '
'stdin. Otherwise provide a list of files to sort.')
inline_args_group = parser.add_mutually_exclusive_group()
parser.add_argument('-a', '--add-import', dest='add_imports', action='append',
help='Adds the specified import line to all files, '
'automatically determining correct placement.')
parser.add_argument('-ac', '--atomic', dest='atomic', action='store_true',
help="Ensures the output doesn't save if the resulting file contains syntax errors.")
parser.add_argument('-af', '--force-adds', dest='force_adds', action='store_true',
help='Forces import adds even if the original file is empty.')
parser.add_argument('-b', '--builtin', dest='known_standard_library', action='append',
help='Force sortImports to recognize a module as part of the python standard library.')
parser.add_argument('-c', '--check-only', action='store_true', dest="check",
help='Checks the file for unsorted / unformatted imports and prints them to the '
'command line without modifying the file.')
parser.add_argument('-ca', '--combine-as', dest='combine_as_imports', action='store_true',
help="Combines as imports on the same line.")
parser.add_argument('-cs', '--combine-star', dest='combine_star', action='store_true',
help="Ensures that if a star import is present, nothing else is imported from that namespace.")
parser.add_argument('-d', '--stdout', help='Force resulting output to stdout, instead of in-place.',
dest='write_to_stdout', action='store_true')
parser.add_argument('-df', '--diff', dest='show_diff', action='store_true',
help="Prints a diff of all the changes isort would make to a file, instead of "
"changing it in place")
parser.add_argument('-ds', '--no-sections', help='Put all imports into the same section bucket', dest='no_sections',
action='store_true')
parser.add_argument('-dt', '--dont-order-by-type', dest='dont_order_by_type',
action='store_true', help='Only order imports alphabetically, do not attempt type ordering')
parser.add_argument('-e', '--balanced', dest='balanced_wrapping', action='store_true',
help='Balances wrapping to produce the most consistent line length possible')
parser.add_argument('-f', '--future', dest='known_future_library', action='append',
help='Force sortImports to recognize a module as part of the future compatibility libraries.')
parser.add_argument('-fas', '--force-alphabetical-sort', action='store_true', dest="force_alphabetical_sort",
help='Force all imports to be sorted as a single section')
parser.add_argument('-fass', '--force-alphabetical-sort-within-sections', action='store_true',
dest="force_alphabetical_sort", help='Force all imports to be sorted alphabetically within a '
'section')
parser.add_argument('-ff', '--from-first', dest='from_first',
help="Switches the typical ordering preference, showing from imports first then straight ones.")
parser.add_argument('-fgw', '--force-grid-wrap', nargs='?', const=2, type=int, dest="force_grid_wrap",
help='Force number of from imports (defaults to 2) to be grid wrapped regardless of line '
'length')
parser.add_argument('-fss', '--force-sort-within-sections', action='store_true', dest="force_sort_within_sections",
help='Force imports to be sorted by module, independent of import_type')
parser.add_argument('-i', '--indent', help='String to place for indents defaults to " " (4 spaces).',
dest='indent', type=str)
parser.add_argument('-j', '--jobs', help='Number of files to process in parallel.',
dest='jobs', type=int)
parser.add_argument('-k', '--keep-direct-and-as', dest='keep_direct_and_as_imports', action='store_true',
help="Turns off default behavior that removes direct imports when as imports exist.")
parser.add_argument('-l', '--lines', help='[Deprecated] The max length of an import line (used for wrapping '
'long imports).',
dest='line_length', type=int)
parser.add_argument('-lai', '--lines-after-imports', dest='lines_after_imports', type=int)
parser.add_argument('-lbt', '--lines-between-types', dest='lines_between_types', type=int)
parser.add_argument('-le', '--line-ending', dest='line_ending',
help="Forces line endings to the specified value. If not set, values will be guessed per-file.")
parser.add_argument('-ls', '--length-sort', help='Sort imports by their string length.',
dest='length_sort', action='store_true')
parser.add_argument('-m', '--multi-line', dest='multi_line_output', type=int, choices=range(len(WrapModes)),
help='Multi line output (0-grid, 1-vertical, 2-hanging, 3-vert-hanging, 4-vert-grid, '
'5-vert-grid-grouped, 6-vert-grid-grouped-no-comma).')
inline_args_group.add_argument('-nis', '--no-inline-sort', dest='no_inline_sort', action='store_true',
help='Leaves `from` imports with multiple imports \'as-is\' (e.g. `from foo import a, c ,b`).')
parser.add_argument('-nlb', '--no-lines-before', help='Sections which should not be split with previous by empty lines',
dest='no_lines_before', action='append')
parser.add_argument('-ns', '--dont-skip', help='Files that sort imports should never skip over.',
dest='not_skip', action='append')
parser.add_argument('-o', '--thirdparty', dest='known_third_party', action='append',
help='Force sortImports to recognize a module as being part of a third party library.')
parser.add_argument('-ot', '--order-by-type', dest='order_by_type',
action='store_true', help='Order imports by type in addition to alphabetically')
parser.add_argument('-p', '--project', dest='known_first_party', action='append',
help='Force sortImports to recognize a module as being part of the current python project.')
parser.add_argument('-q', '--quiet', action='store_true', dest="quiet",
help='Shows extra quiet output, only errors are outputted.')
parser.add_argument('-r', dest='ambiguous_r_flag', action='store_true')
parser.add_argument('-rm', '--remove-import', dest='remove_imports', action='append',
help='Removes the specified import from all files.')
parser.add_argument('-rr', '--reverse-relative', dest='reverse_relative', action='store_true',
help='Reverse order of relative imports.')
parser.add_argument('-rc', '--recursive', dest='recursive', action='store_true',
help='Recursively look for Python files of which to sort imports')
parser.add_argument('-s', '--skip', help='Files that sort imports should skip over. If you want to skip multiple '
'files you should specify twice: --skip file1 --skip file2.', dest='skip', action='append')
parser.add_argument('-sd', '--section-default', dest='default_section',
help='Sets the default section for imports (by default FIRSTPARTY) options: ' +
str(DEFAULT_SECTIONS))
parser.add_argument('-sg', '--skip-glob', help='Files that sort imports should skip over.', dest='skip_glob',
action='append')
inline_args_group.add_argument('-sl', '--force-single-line-imports', dest='force_single_line', action='store_true',
help='Forces all from imports to appear on their own line')
parser.add_argument('-sp', '--settings-path', dest="settings_path",
help='Explicitly set the settings path instead of auto determining based on file location.')
parser.add_argument('-t', '--top', help='Force specific imports to the top of their appropriate section.',
dest='force_to_top', action='append')
parser.add_argument('-tc', '--trailing-comma', dest='include_trailing_comma', action='store_true',
help='Includes a trailing comma on multi line imports that include parentheses.')
parser.add_argument('-up', '--use-parentheses', dest='use_parentheses', action='store_true',
help='Use parenthesis for line continuation on length limit instead of slashes.')
parser.add_argument('-v', '--version', action='store_true', dest='show_version')
parser.add_argument('-vb', '--verbose', action='store_true', dest="verbose",
help='Shows verbose output, such as when files are skipped or when a check is successful.')
parser.add_argument('--virtual-env', dest='virtual_env',
help='Virtual environment to use for determining whether a package is third-party')
parser.add_argument('--conda-env', dest='conda_env',
help='Conda environment to use for determining whether a package is third-party')
parser.add_argument('-vn', '--version-number', action='version', version=__version__,
help='Returns just the current version number without the logo')
parser.add_argument('-w', '--line-width', help='The max length of an import line (used for wrapping long imports).',
dest='line_length', type=int)
parser.add_argument('-wl', '--wrap-length', dest='wrap_length',
help="Specifies how long lines that are wrapped should be, if not set line_length is used.")
parser.add_argument('-ws', '--ignore-whitespace', action='store_true', dest="ignore_whitespace",
help='Tells isort to ignore whitespace differences when --check-only is being used.')
parser.add_argument('-y', '--apply', dest='apply', action='store_true',
help='Tells isort to apply changes recursively without asking')
parser.add_argument('--unsafe', dest='unsafe', action='store_true',
help='Tells isort to look for files in standard library directories, etc. '
'where it may not be safe to operate in')
parser.add_argument('--case-sensitive', dest='case_sensitive', action='store_true',
help='Tells isort to include casing when sorting module names')
parser.add_argument('--filter-files', dest='filter_files', action='store_true',
help='Tells isort to filter files even when they are explicitly passed in as part of the command')
parser.add_argument('files', nargs='*', help='One or more Python source files that need their imports sorted.')
arguments = {key: value for key, value in vars(parser.parse_args(argv)).items() if value}
if 'dont_order_by_type' in arguments:
arguments['order_by_type'] = False
if arguments.pop('unsafe', False):
arguments['safety_excludes'] = False
return arguments
def main(argv=None):
arguments = parse_args(argv)
if arguments.get('show_version'):
print(INTRO)
return
if arguments.get('ambiguous_r_flag'):
print('ERROR: Deprecated -r flag set. This flag has been replaced with -rm to remove ambiguity between it and '
'-rc for recursive')
sys.exit(1)
arguments['check_skip'] = False
if 'settings_path' in arguments:
sp = arguments['settings_path']
arguments['settings_path'] = os.path.abspath(sp) if os.path.isdir(sp) else os.path.dirname(os.path.abspath(sp))
if not os.path.isdir(arguments['settings_path']):
print("WARNING: settings_path dir does not exist: {0}".format(arguments['settings_path']))
if 'virtual_env' in arguments:
venv = arguments['virtual_env']
arguments['virtual_env'] = os.path.abspath(venv)
if not os.path.isdir(arguments['virtual_env']):
print("WARNING: virtual_env dir does not exist: {0}".format(arguments['virtual_env']))
file_names = arguments.pop('files', [])
if file_names == ['-']:
try:
# python 3
file_ = sys.stdin.buffer
except AttributeError:
# python 2
file_ = sys.stdin
SortImports(file_=file_, write_to_stdout=True, **arguments)
else:
if not file_names:
file_names = ['.']
arguments['recursive'] = True
if not arguments.get('apply', False):
arguments['ask_to_apply'] = True
config = from_path(arguments.get('settings_path', '') or os.path.abspath(file_names[0]) or os.getcwd()).copy()
config.update(arguments)
wrong_sorted_files = False
skipped = []
if config.get('filter_files'):
filtered_files = []
for file_name in file_names:
if should_skip(file_name, config):
skipped.append(file_name)
else:
filtered_files.append(file_name)
file_names = filtered_files
if arguments.get('recursive', False):
file_names = iter_source_code(file_names, config, skipped)
num_skipped = 0
if config['verbose'] or config.get('show_logo', False):
print(INTRO)
jobs = arguments.get('jobs')
if jobs:
import multiprocessing
executor = multiprocessing.Pool(jobs)
attempt_iterator = executor.imap(functools.partial(sort_imports, **arguments), file_names)
else:
attempt_iterator = (sort_imports(file_name, **arguments) for file_name in file_names)
for sort_attempt in attempt_iterator:
if not sort_attempt:
continue
incorrectly_sorted = sort_attempt.incorrectly_sorted
if arguments.get('check', False) and incorrectly_sorted:
wrong_sorted_files = True
if sort_attempt.skipped:
num_skipped += 1
if wrong_sorted_files:
sys.exit(1)
num_skipped += len(skipped)
if num_skipped and not arguments.get('quiet', False):
if config['verbose']:
for was_skipped in skipped:
print("WARNING: {0} was skipped as it's listed in 'skip' setting"
" or matches a glob in 'skip_glob' setting".format(was_skipped))
print("Skipped {0} files".format(num_skipped))
if __name__ == "__main__":
main() | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_debug_adapter/vendored/vendored_pydevd/third_party/isort_container/isort/main.py | 0.42919 | 0.261671 | main.py | pypi |
from __future__ import absolute_import
import functools
from collections import namedtuple
from threading import RLock
_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])
@functools.wraps(functools.update_wrapper)
def update_wrapper(
wrapper,
wrapped,
assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES,
):
"""
Patch two bugs in functools.update_wrapper.
"""
# workaround for http://bugs.python.org/issue3445
assigned = tuple(attr for attr in assigned if hasattr(wrapped, attr))
wrapper = functools.update_wrapper(wrapper, wrapped, assigned, updated)
# workaround for https://bugs.python.org/issue17482
wrapper.__wrapped__ = wrapped
return wrapper
class _HashedSeq(list):
__slots__ = 'hashvalue'
def __init__(self, tup, hash=hash):
self[:] = tup
self.hashvalue = hash(tup)
def __hash__(self):
return self.hashvalue
def _make_key(
args,
kwds,
typed,
kwd_mark=(object(),),
fasttypes=set([int, str, frozenset, type(None)]),
sorted=sorted,
tuple=tuple,
type=type,
len=len,
):
'Make a cache key from optionally typed positional and keyword arguments'
key = args
if kwds:
sorted_items = sorted(kwds.items())
key += kwd_mark
for item in sorted_items:
key += item
if typed:
key += tuple(type(v) for v in args)
if kwds:
key += tuple(type(v) for k, v in sorted_items)
elif len(key) == 1 and type(key[0]) in fasttypes:
return key[0]
return _HashedSeq(key)
def lru_cache(maxsize=100, typed=False):
"""Least-recently-used cache decorator.
If *maxsize* is set to None, the LRU features are disabled and the cache
can grow without bound.
If *typed* is True, arguments of different types will be cached separately.
For example, f(3.0) and f(3) will be treated as distinct calls with
distinct results.
Arguments to the cached function must be hashable.
View the cache statistics named tuple (hits, misses, maxsize, currsize) with
f.cache_info(). Clear the cache and statistics with f.cache_clear().
Access the underlying function with f.__wrapped__.
See: http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used
"""
# Users should only access the lru_cache through its public API:
# cache_info, cache_clear, and f.__wrapped__
# The internals of the lru_cache are encapsulated for thread safety and
# to allow the implementation to change (including a possible C version).
def decorating_function(user_function):
cache = dict()
stats = [0, 0] # make statistics updateable non-locally
HITS, MISSES = 0, 1 # names for the stats fields
make_key = _make_key
cache_get = cache.get # bound method to lookup key or return None
_len = len # localize the global len() function
lock = RLock() # because linkedlist updates aren't threadsafe
root = [] # root of the circular doubly linked list
root[:] = [root, root, None, None] # initialize by pointing to self
nonlocal_root = [root] # make updateable non-locally
PREV, NEXT, KEY, RESULT = 0, 1, 2, 3 # names for the link fields
if maxsize == 0:
def wrapper(*args, **kwds):
# no caching, just do a statistics update after a successful call
result = user_function(*args, **kwds)
stats[MISSES] += 1
return result
elif maxsize is None:
def wrapper(*args, **kwds):
# simple caching without ordering or size limit
key = make_key(args, kwds, typed)
result = cache_get(
key, root
) # root used here as a unique not-found sentinel
if result is not root:
stats[HITS] += 1
return result
result = user_function(*args, **kwds)
cache[key] = result
stats[MISSES] += 1
return result
else:
def wrapper(*args, **kwds):
# size limited caching that tracks accesses by recency
key = make_key(args, kwds, typed) if kwds or typed else args
with lock:
link = cache_get(key)
if link is not None:
# record recent use of the key by moving it
# to the front of the list
root, = nonlocal_root
link_prev, link_next, key, result = link
link_prev[NEXT] = link_next
link_next[PREV] = link_prev
last = root[PREV]
last[NEXT] = root[PREV] = link
link[PREV] = last
link[NEXT] = root
stats[HITS] += 1
return result
result = user_function(*args, **kwds)
with lock:
root, = nonlocal_root
if key in cache:
# getting here means that this same key was added to the
# cache while the lock was released. since the link
# update is already done, we need only return the
# computed result and update the count of misses.
pass
elif _len(cache) >= maxsize:
# use the old root to store the new key and result
oldroot = root
oldroot[KEY] = key
oldroot[RESULT] = result
# empty the oldest link and make it the new root
root = nonlocal_root[0] = oldroot[NEXT]
oldkey = root[KEY]
root[KEY] = root[RESULT] = None
# now update the cache dictionary for the new links
del cache[oldkey]
cache[key] = oldroot
else:
# put result in a new link at the front of the list
last = root[PREV]
link = [last, root, key, result]
last[NEXT] = root[PREV] = cache[key] = link
stats[MISSES] += 1
return result
def cache_info():
"""Report cache statistics"""
with lock:
return _CacheInfo(stats[HITS], stats[MISSES], maxsize, len(cache))
def cache_clear():
"""Clear the cache and cache statistics"""
with lock:
cache.clear()
root = nonlocal_root[0]
root[:] = [root, root, None, None]
stats[:] = [0, 0]
wrapper.__wrapped__ = user_function
wrapper.cache_info = cache_info
wrapper.cache_clear = cache_clear
return update_wrapper(wrapper, user_function)
return decorating_function | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_debug_adapter/vendored/vendored_pydevd/third_party/isort_container/backports/functools_lru_cache.py | 0.671794 | 0.19498 | functools_lru_cache.py | pypi |
from _pydevd_bundle.pydevd_breakpoints import LineBreakpoint
from _pydevd_bundle.pydevd_api import PyDevdAPI
import bisect
from _pydev_bundle import pydev_log
class LineBreakpointWithLazyValidation(LineBreakpoint):
def __init__(self, *args, **kwargs):
LineBreakpoint.__init__(self, *args, **kwargs)
# This is the _AddBreakpointResult that'll be modified (and then re-sent on the
# on_changed_breakpoint_state).
self.add_breakpoint_result = None
# The signature for the callback should be:
# on_changed_breakpoint_state(breakpoint_id: int, add_breakpoint_result: _AddBreakpointResult)
self.on_changed_breakpoint_state = None
# When its state is checked (in which case it'd call on_changed_breakpoint_state if the
# state changed), we store a cache key in 'verified_cache_key' -- in case it changes
# we'd need to re-verify it (for instance, the template could have changed on disk).
self.verified_cache_key = None
class ValidationInfo(object):
def __init__(self):
self._canonical_normalized_filename_to_last_template_lines = {}
def _collect_valid_lines_in_template(self, template):
# We cache the lines in the template itself. Note that among requests the
# template may be a different instance (because the template contents could be
# changed on disk), but this may still be called multiple times during the
# same render session, so, caching is interesting.
lines_cache = getattr(template, '__pydevd_lines_cache__', None)
if lines_cache is not None:
lines, sorted_lines = lines_cache
return lines, sorted_lines
lines = self._collect_valid_lines_in_template_uncached(template)
lines = frozenset(lines)
sorted_lines = tuple(sorted(lines))
template.__pydevd_lines_cache__ = lines, sorted_lines
return lines, sorted_lines
def _collect_valid_lines_in_template_uncached(self, template):
raise NotImplementedError()
def verify_breakpoints(self, py_db, canonical_normalized_filename, template_breakpoints_for_file, template):
'''
This function should be called whenever a rendering is detected.
:param str canonical_normalized_filename:
:param dict[int:LineBreakpointWithLazyValidation] template_breakpoints_for_file:
'''
valid_lines_frozenset, sorted_lines = self._collect_valid_lines_in_template(template)
self._canonical_normalized_filename_to_last_template_lines[canonical_normalized_filename] = valid_lines_frozenset, sorted_lines
self._verify_breakpoints_with_lines_collected(py_db, canonical_normalized_filename, template_breakpoints_for_file, valid_lines_frozenset, sorted_lines)
def verify_breakpoints_from_template_cached_lines(self, py_db, canonical_normalized_filename, template_breakpoints_for_file):
'''
This is used when the lines are already available (if just the template is available,
`verify_breakpoints` should be used instead).
'''
cached = self._canonical_normalized_filename_to_last_template_lines.get(canonical_normalized_filename)
if cached is not None:
valid_lines_frozenset, sorted_lines = cached
self._verify_breakpoints_with_lines_collected(py_db, canonical_normalized_filename, template_breakpoints_for_file, valid_lines_frozenset, sorted_lines)
def _verify_breakpoints_with_lines_collected(self, py_db, canonical_normalized_filename, template_breakpoints_for_file, valid_lines_frozenset, sorted_lines):
for line, template_bp in list(template_breakpoints_for_file.items()): # Note: iterate in a copy (we may mutate it).
if template_bp.verified_cache_key != valid_lines_frozenset:
template_bp.verified_cache_key = valid_lines_frozenset
valid = line in valid_lines_frozenset
if not valid:
new_line = -1
if sorted_lines:
# Adjust to the first preceding valid line.
idx = bisect.bisect_left(sorted_lines, line)
if idx > 0:
new_line = sorted_lines[idx - 1]
if new_line >= 0 and new_line not in template_breakpoints_for_file:
# We just add it if found and if there's no existing breakpoint at that
# location.
if template_bp.add_breakpoint_result.error_code != PyDevdAPI.ADD_BREAKPOINT_NO_ERROR and template_bp.add_breakpoint_result.translated_line != new_line:
pydev_log.debug('Template breakpoint in %s in line: %s moved to line: %s', canonical_normalized_filename, line, new_line)
template_bp.add_breakpoint_result.error_code = PyDevdAPI.ADD_BREAKPOINT_NO_ERROR
template_bp.add_breakpoint_result.translated_line = new_line
# Add it to a new line.
template_breakpoints_for_file.pop(line, None)
template_breakpoints_for_file[new_line] = template_bp
template_bp.on_changed_breakpoint_state(template_bp.breakpoint_id, template_bp.add_breakpoint_result)
else:
if template_bp.add_breakpoint_result.error_code != PyDevdAPI.ADD_BREAKPOINT_INVALID_LINE:
pydev_log.debug('Template breakpoint in %s in line: %s invalid (valid lines: %s)', canonical_normalized_filename, line, valid_lines_frozenset)
template_bp.add_breakpoint_result.error_code = PyDevdAPI.ADD_BREAKPOINT_INVALID_LINE
template_bp.on_changed_breakpoint_state(template_bp.breakpoint_id, template_bp.add_breakpoint_result)
else:
if template_bp.add_breakpoint_result.error_code != PyDevdAPI.ADD_BREAKPOINT_NO_ERROR:
template_bp.add_breakpoint_result.error_code = PyDevdAPI.ADD_BREAKPOINT_NO_ERROR
template_bp.on_changed_breakpoint_state(template_bp.breakpoint_id, template_bp.add_breakpoint_result) | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_debug_adapter/vendored/vendored_pydevd/pydevd_plugins/pydevd_line_validation.py | 0.713731 | 0.16502 | pydevd_line_validation.py | pypi |
from typing import List
ROBOT_RUN_TEST = "robot.runTest" # Run Test/Task
ROBOT_DEBUG_TEST = "robot.debugTest" # Debug Test/Task
ROBOT_RUN_SUITE = "robot.runSuite" # Run Tests/Tasks Suite
ROBOT_DEBUG_SUITE = "robot.debugSuite" # Debug Tests/Tasks Suite
ROBOT_INTERACTIVE_SHELL = "robot.interactiveShell" # Start Interactive Console
ROBOT_INTERNAL_RFINTERACTIVE_START = "robot.internal.rfinteractive.start" # Create Interactive Console
ROBOT_INTERNAL_RFINTERACTIVE_EVALUATE = "robot.internal.rfinteractive.evaluate" # Evaluate in Interactive Console
ROBOT_INTERNAL_RFINTERACTIVE_STOP = "robot.internal.rfinteractive.stop" # Stop Interactive Console
ROBOT_INTERNAL_RFINTERACTIVE_SEMANTIC_TOKENS = "robot.internal.rfinteractive.semanticTokens" # Get the semantic tokens based on the code entered.
ROBOT_INTERNAL_RFINTERACTIVE_RESOLVE_COMPLETION = "robot.internal.rfinteractive.resolveCompletion" # Resolves the passed completion.
ROBOT_INTERNAL_RFINTERACTIVE_COMPLETIONS = "robot.internal.rfinteractive.completions" # Get the completions based on the code entered.
ROBOT_GET_RFLS_HOME_DIR = "robot.getRFLSHomeDir" # Provides the directory used to store information (usually ~/.robotframework-ls -- may be configured with `ROBOTFRAMEWORK_LS_USER_HOME` environment variable).
ROBOT_CLEAR_CACHES_AND_RESTART_PROCESSES = "robot.clearCachesAndRestartProcesses" # Clear caches and restart Robot Framework Language Server
ROBOT_CLEAR_CACHES_AND_RESTART_PROCESSES_START_INTERNAL = "robot.clearCachesAndRestartProcesses.start.internal" # Stops the RFLS and waits for robot.clearCachesAndRestartProcesses.finish.internal to restart
ROBOT_CLEAR_CACHES_AND_RESTART_PROCESSES_FINISH_INTERNAL = "robot.clearCachesAndRestartProcesses.finish.internal" # To be used to restart the processes
ROBOT_START_INDEXING_INTERNAL = "robot.startIndexing.internal" # Starts the indexing service
ROBOT_WAIT_FULL_TEST_COLLECTION_INTERNAL = "robot.waitFullTestCollection.internal" # Schedules and Waits for a full test collection
ROBOT_RF_INFO_INTERNAL = "robot.rfInfo.internal" # Collects information on the Robot Framework version being used.
ROBOT_LINT_WORKSPACE = "robot.lint.workspace" # Lint all files in the workspace.
ROBOT_LINT_EXPLORER = "robot.lint.explorer" # Lint
ROBOT_OPEN_FLOW_EXPLORER = "robot.openFlowExplorer" # Open Robot Flow Explorer
ROBOT_OPEN_FLOW_EXPLORER_INTERNAL = "robot.openFlowExplorer.internal" # Open Robot Flow Explorer Internals
ROBOT_GENERATE_FLOW_EXPLORER_MODEL = "robot.generateFlowExplorerModel" # Generate Robot Flow Explorer Model
ROBOT_COLLECT_ROBOT_DOCUMENTATION = "robot.collectRobotDocumentation" # Collect Robot Documentation
ROBOT_VIEW_DOCUMENTATION_PIN = "robot.view.documentation.pin" # Pin Robot Documentation
ROBOT_VIEW_DOCUMENTATION_UNPIN = "robot.view.documentation.unpin" # Unpin Robot Documentation
ROBOT_CONVERT_OUTPUT_XML_TO_ROBOSTREAM = "robot.convertOutputXMLToRobostream" # Convert output.xml to robocorp stream
ROBOT_APPLY_CODE_ACTION = "robot.applyCodeAction" # Applies some code action
ALL_SERVER_COMMANDS: List[str] = [
ROBOT_INTERNAL_RFINTERACTIVE_START,
ROBOT_INTERNAL_RFINTERACTIVE_EVALUATE,
ROBOT_INTERNAL_RFINTERACTIVE_STOP,
ROBOT_INTERNAL_RFINTERACTIVE_SEMANTIC_TOKENS,
ROBOT_INTERNAL_RFINTERACTIVE_RESOLVE_COMPLETION,
ROBOT_INTERNAL_RFINTERACTIVE_COMPLETIONS,
ROBOT_GET_RFLS_HOME_DIR,
ROBOT_START_INDEXING_INTERNAL,
ROBOT_WAIT_FULL_TEST_COLLECTION_INTERNAL,
ROBOT_RF_INFO_INTERNAL,
ROBOT_LINT_WORKSPACE,
ROBOT_LINT_EXPLORER,
ROBOT_OPEN_FLOW_EXPLORER_INTERNAL,
ROBOT_GENERATE_FLOW_EXPLORER_MODEL,
ROBOT_COLLECT_ROBOT_DOCUMENTATION,
ROBOT_CONVERT_OUTPUT_XML_TO_ROBOSTREAM,
ROBOT_APPLY_CODE_ACTION,
]
# fmt: on | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/commands.py | 0.604983 | 0.205097 | commands.py | pypi |
from typing import Set, Iterable, Any, List
from robotframework_ls.impl.protocols import (
ICompletionContext,
IRobotDocument,
IRobotToken,
)
from robocorp_ls_core.lsp import (
Range,
TextEditTypedDict,
CodeActionTypedDict,
)
from robotframework_ls.impl._code_action_utils import wrap_edits_in_snippet
def _create_local_variable_refactoring(
completion_context: ICompletionContext,
select_range: Range,
) -> Iterable[CodeActionTypedDict]:
doc: IRobotDocument = completion_context.doc
line = select_range.start.line
col = select_range.start.character
endline = select_range.end.line
endcol = select_range.end.character
if line == endline and col != endcol:
contents = doc.get_range(line, col, endline, endcol)
token_info = completion_context.get_current_token()
if token_info:
curr_node_line_0_based = token_info.node.lineno - 1
from robotframework_ls.robot_config import get_arguments_separator
from robotframework_ls.robot_config import (
create_convert_keyword_format_func,
)
import re
format_name = create_convert_keyword_format_func(completion_context.config)
set_var_name = format_name("Set Variable")
indent = " "
line_contents = completion_context.doc.get_line(curr_node_line_0_based)
found = re.match("[\s]+", line_contents)
if found:
indent = found.group()
sep = get_arguments_separator(completion_context)
tok: IRobotToken = token_info.token
changes: List[TextEditTypedDict] = [
{
"range": {
"start": {"line": curr_node_line_0_based, "character": 0},
"end": {"line": curr_node_line_0_based, "character": 0},
},
"newText": "%s${${0:variable}}=%s%s%s%s\n"
% (indent, sep, set_var_name, sep, contents),
},
{
"range": {
"start": {"line": tok.lineno - 1, "character": col},
"end": {
"line": tok.lineno - 1,
"character": endcol,
},
},
"newText": "${${0:variable}}",
},
]
yield wrap_edits_in_snippet(
completion_context,
"Extract local variable",
changes,
"refactor.extract",
)
def _create_variable_section_refactoring(
completion_context: ICompletionContext,
select_range: Range,
) -> Iterable[CodeActionTypedDict]:
line = select_range.start.line
col = select_range.start.character
endline = select_range.end.line
endcol = select_range.end.character
if line == endline and col != endcol:
token_info = completion_context.get_current_token()
if token_info:
from robotframework_ls.robot_config import get_arguments_separator
from robotframework_ls.impl.code_action_common import (
create_var_in_variables_section_text_edit,
)
doc = completion_context.doc
contents = doc.get_range(line, col, endline, endcol)
sep = get_arguments_separator(completion_context)
var_template = "${${0:variable}}%s%s\n" % (
sep,
contents,
)
tok: IRobotToken = token_info.token
text_edit = create_var_in_variables_section_text_edit(
completion_context, var_template
)
changes: List[TextEditTypedDict] = [
text_edit,
{
"range": {
"start": {"line": tok.lineno - 1, "character": col},
"end": {
"line": tok.lineno - 1,
"character": endcol,
},
},
"newText": "${${0:variable}}",
},
]
yield wrap_edits_in_snippet(
completion_context,
"Extract variable to variable section",
changes,
"refactor.extract",
)
def code_action_refactoring(
completion_context: ICompletionContext,
select_range: Range,
only: Set[str],
) -> Iterable[CodeActionTypedDict]:
"""
Used to do refactorings.
"""
from robotframework_ls.impl import ast_utils
current_section: Any = completion_context.get_ast_current_section()
if ast_utils.is_keyword_section(current_section) or ast_utils.is_testcase_section(
current_section
):
if not only or (
only
and (
"refactor" in only
or "refactor.extract" in only
or "refactor.extract.local" in only
)
):
yield from _create_local_variable_refactoring(
completion_context, select_range
)
if not only or (
only
and (
"refactor" in only
or "refactor.extract" in only
or "refactor.extract.variableSection" in only
)
):
yield from _create_variable_section_refactoring(
completion_context, select_range
) | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/impl/code_action_refactoring.py | 0.617051 | 0.273109 | code_action_refactoring.py | pypi |
from typing import List
from robotframework_ls.impl.protocols import ICompletionContext
from robocorp_ls_core.lsp import DocumentSymbolTypedDict, SymbolKind
def collect_children(ast) -> List[DocumentSymbolTypedDict]:
from robotframework_ls.impl import ast_utils
from robot.api import Token
from robotframework_ls.impl.ast_utils import create_range_from_token
ret: List[DocumentSymbolTypedDict] = []
for node_info in ast_utils.iter_nodes(
ast, accept_class=("Keyword", "TestCase", "Variable")
):
node = node_info.node
classname = node.__class__.__name__
if classname == "Keyword":
token = node.header.get_token(Token.KEYWORD_NAME)
symbol_range = create_range_from_token(token)
doc_symbol: DocumentSymbolTypedDict = {
"name": str(token),
"kind": SymbolKind.Function,
"range": symbol_range,
"selectionRange": symbol_range,
}
ret.append(doc_symbol)
elif classname == "TestCase":
token = node.header.get_token(Token.TESTCASE_NAME)
symbol_range = create_range_from_token(token)
doc_symbol = {
"name": str(token),
"kind": SymbolKind.Class,
"range": symbol_range,
"selectionRange": symbol_range,
}
ret.append(doc_symbol)
elif classname == "Variable":
token = node.get_token(Token.VARIABLE)
symbol_range = create_range_from_token(token)
doc_symbol = {
"name": str(token),
"kind": SymbolKind.Variable,
"range": symbol_range,
"selectionRange": symbol_range,
}
ret.append(doc_symbol)
return ret
def create_section_doc_symbol(
ret: List[DocumentSymbolTypedDict], ast, header_token_type, symbol_kind
):
from robotframework_ls.impl.ast_utils import create_range_from_token
if not isinstance(header_token_type, tuple):
header_token_type = (header_token_type,)
for t in header_token_type:
token = ast.header.get_token(t)
if token is not None:
symbol_range = create_range_from_token(token)
doc_symbol: DocumentSymbolTypedDict = {
"name": str(token).replace("*", "").strip(),
"kind": symbol_kind,
"range": symbol_range,
"selectionRange": symbol_range,
"children": collect_children(ast),
}
ret.append(doc_symbol)
break
def document_symbol(
completion_context: ICompletionContext,
) -> List[DocumentSymbolTypedDict]:
from robotframework_ls.impl import ast_utils
from robot.api import Token
ret: List[DocumentSymbolTypedDict] = []
ast = completion_context.get_ast()
for node_info in ast_utils.iter_nodes(ast, "SettingSection"):
create_section_doc_symbol(
ret, node_info.node, Token.SETTING_HEADER, SymbolKind.Namespace
)
for node_info in ast_utils.iter_nodes(ast, "VariableSection"):
create_section_doc_symbol(
ret, node_info.node, Token.VARIABLE_HEADER, SymbolKind.Namespace
)
for node_info in ast_utils.iter_nodes(ast, "TestCaseSection"):
create_section_doc_symbol(
ret,
node_info.node,
(Token.TESTCASE_HEADER, getattr(Token, "TASK_HEADER", None)),
SymbolKind.Namespace,
)
for node_info in ast_utils.iter_nodes(ast, "KeywordSection"):
create_section_doc_symbol(
ret, node_info.node, Token.KEYWORD_HEADER, SymbolKind.Namespace
)
return ret | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/impl/document_symbol.py | 0.738386 | 0.384508 | document_symbol.py | pypi |
from robocorp_ls_core.lsp import DocumentHighlightTypedDict
from typing import Optional, List
from robotframework_ls.impl.protocols import ICompletionContext, VarTokenInfo
def _highlight_keyword(
completion_context: ICompletionContext, curr_token_info
) -> List[DocumentHighlightTypedDict]:
from robotframework_ls.impl.references import (
iter_keyword_references_in_doc,
matches_source,
)
from robotframework_ls.impl.text_utilities import normalize_robot_name
from robotframework_ls.impl.protocols import IKeywordFound
from robocorp_ls_core.lsp import DocumentHighlightKind
ret: List[DocumentHighlightTypedDict] = []
# We're in a keyword, so, search for matches.
normalized_name = normalize_robot_name(curr_token_info.token.value)
if curr_token_info.token.type == curr_token_info.token.KEYWORD:
dot_i = normalized_name.find(".")
if dot_i != -1:
normalized_name = normalized_name[dot_i + 1 :]
for range_ref in iter_keyword_references_in_doc(
completion_context,
completion_context.doc,
normalized_name,
keyword_found=None, # We don't want to check it even if available (we want textual matches too even if not defined).
):
completion_context.check_cancelled()
ret.append({"range": range_ref, "kind": DocumentHighlightKind.Text})
if curr_token_info.token.type == curr_token_info.token.KEYWORD_NAME:
# We're hovering over the keyword name.
ret.append(
{
"range": {
"start": {
"line": curr_token_info.token.lineno - 1,
"character": curr_token_info.token.col_offset,
},
"end": {
"line": curr_token_info.token.lineno - 1,
"character": curr_token_info.token.end_col_offset,
},
},
"kind": DocumentHighlightKind.Text,
}
)
else:
current_keyword_definition_and_usage_info = (
completion_context.get_current_keyword_definition_and_usage_info()
)
if current_keyword_definition_and_usage_info is not None:
# i.e.: check if the definition also matches.
(
keyword_definition,
_usage_info,
) = current_keyword_definition_and_usage_info
keyword_found: IKeywordFound = keyword_definition.keyword_found
include_declaration = matches_source(
completion_context.doc.path, keyword_found.source
)
if include_declaration:
ret.append(
{
"range": {
"start": {
"line": keyword_found.lineno,
"character": keyword_found.col_offset,
},
"end": {
"line": keyword_found.end_lineno,
"character": keyword_found.end_col_offset,
},
},
"kind": DocumentHighlightKind.Text,
}
)
return ret
def _highlight_variables(
completion_context: ICompletionContext, curr_token_info: VarTokenInfo
) -> List[DocumentHighlightTypedDict]:
from robocorp_ls_core.lsp import DocumentHighlightKind
from robotframework_ls.impl.references import iter_variable_references_in_doc
from robotframework_ls.impl.find_definition import find_variable_definition
ret: List[DocumentHighlightTypedDict] = []
found_definitions = find_variable_definition(completion_context, curr_token_info)
if not found_definitions:
return ret
if len(found_definitions) > 1:
# Give preference for a global definition (in which case we'll provide
# both globals as well as locals overriding the global).
for definition in found_definitions:
if not definition.variable_found.is_local_variable:
break
else:
definition = next(iter(found_definitions))
else:
definition = next(iter(found_definitions))
for range_ref in iter_variable_references_in_doc(
completion_context,
definition.variable_found,
):
completion_context.check_cancelled()
ret.append({"range": range_ref, "kind": DocumentHighlightKind.Text})
return ret
def doc_highlight(
completion_context: ICompletionContext,
) -> Optional[List[DocumentHighlightTypedDict]]:
from robocorp_ls_core.lsp import DocumentHighlightKind
curr_token_info = completion_context.get_current_token()
if curr_token_info is None:
return None
ret: List[DocumentHighlightTypedDict] = []
if curr_token_info.token.type in (
curr_token_info.token.KEYWORD,
curr_token_info.token.KEYWORD_NAME,
):
return _highlight_keyword(completion_context, curr_token_info)
curr_var_token_info = completion_context.get_current_variable()
if curr_var_token_info is not None:
return _highlight_variables(completion_context, curr_var_token_info)
# We found no custom heuristics, just use a text-based approach.
doc = completion_context.doc
sel = completion_context.sel
word_to_col = sel.word_at_column
if not word_to_col:
return ret
contents = doc.source
import re
for m in re.finditer(f"\\b{re.escape(word_to_col)}\\b", contents):
start = m.start(0)
end = m.end(0)
start_line, start_col = doc.offset_to_line_col(start)
end_line, end_col = doc.offset_to_line_col(end)
ret.append(
{
"range": {
"start": {
"line": start_line,
"character": start_col,
},
"end": {
"line": end_line,
"character": end_col,
},
},
"kind": DocumentHighlightKind.Text,
}
)
return ret | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/impl/doc_highlight.py | 0.721743 | 0.249462 | doc_highlight.py | pypi |
from typing import List
from robocorp_ls_core.lsp import CompletionItemTypedDict
from robotframework_ls.impl.protocols import ICompletionContext
_SNIPPETS_RF4 = {
"FOR IN": {
"prefix": "FOR IN",
"body": [
"FOR<sp>${${1:element}}<sp>IN<sp>@{${2:LIST}}",
" Log<sp>${${1:element}}",
" $0",
"END",
],
"description": "Snippet of a FOR IN loop.\n\nA for loop that iterates over a list of values and assigns one value to a variable per iteration.",
},
"FOR IN ENUMERATE": {
"prefix": "FOR IN ENUMERATE",
"body": [
"FOR<sp>${${1:index}}<sp>${${2:element}}<sp>IN ENUMERATE<sp>@{${3:LIST}}",
" Log<sp>${${1:index}}: ${${2:element}}",
" $0",
"END",
],
"description": "Snippet of a FOR IN ENUMERATE loop.\n\nA for loop that iterates over a list of values and assigns the iteration index to the first and the value to the second variable per iteration.",
},
"FOR IN RANGE": {
"prefix": "FOR IN RANGE",
"body": [
"FOR<sp>${${1:counter}}<sp>IN RANGE<sp>${2:START}<sp>${3:END}<sp>${4:opt.STEPS}",
" Log<sp>${${1:counter}}",
" $0",
"END",
],
"description": "Snippet of a FOR IN RANGE loop.\n\nA for loop that iterates over a range of values with an optional configurable step width.",
},
"FOR IN ZIP": {
"prefix": "FOR IN ZIP",
"body": [
"FOR<sp>${${1:l1-element}}<sp>${${2:l2-element}}<sp>IN ZIP<sp>${${3:LIST-1}}<sp>${${4:LIST-2}}",
" Log<sp>${${1:l1-element}} - ${${2:l2-element}}",
" $0",
"END",
],
"description": "Snippet of a FOR IN ZIP loop\n\nA for loop that iterates over two lists and assigns the values from the first list to the first variable and values from the second list to the second variable per iteration.",
},
"IF STATEMENT": {
"prefix": "IF STATEMENT",
"body": [
"IF<sp>${1:\\$var_in_py_expr1 == \\$var_in_py_expr2}",
" $0",
"END",
],
"description": "Snippet of an IF..END statement.",
},
"IF ELSE STATEMENT": {
"prefix": "IF ELSE STATEMENT",
"body": [
"IF<sp>${1:\\$var_in_py_expr1 == \\$var_in_py_expr2}",
" ${3:Call Keyword}",
"ELSE",
" $0",
"END",
],
"description": "Snippet of an IF..ELSE..END statement",
},
"Run Keyword If": {
"prefix": "Run Keyword If",
"body": [
"Run Keyword If<sp>${1:\\$var_in_py_expr1 == \\$var_in_py_expr2}",
"... ${3:Keyword}<sp>${4:@args}",
"... ELSE IF<sp>${2:condition_in_py_expr}",
"... ${5:Keyword}<sp>${6:@args}",
"... ELSE",
"... ${7:Keyword}<sp>${8:@args}",
],
"description": "Runs the given keyword with the given arguments, if condition is true.",
},
"Run Keywords": {
"prefix": "Run Keywords",
"body": [
"Run Keywords",
"... ${1:Keyword}<sp>${2:@args}",
"... AND",
"... ${3:Keyword}<sp>${4:@args}",
],
"description": "Executes all the given keywords in a sequence.",
},
}
_SNIPPETS_RF5 = {
"TRY EXCEPT STATEMENT": {
"prefix": "TRY EXCEPT",
"body": ["TRY", " $0", "EXCEPT<sp>message", " ", "END"],
"description": "Snippet of a TRY..EXCEPT statement",
},
"TRY EXCEPT FINALLY STATEMENT": {
"prefix": "TRY EXCEPT FINALLY",
"body": [
"TRY",
" $0",
"EXCEPT<sp>message",
" ",
"FINALLY",
" ",
"END",
],
"description": "Snippet of a TRY..EXCEPT..FINALLY statement",
},
"TRY FINALLY STATEMENT": {
"prefix": "TRY FINALLY",
"body": ["TRY", " $0", "FINALLY", " ", "END"],
"description": "Snippet of a TRY..EXCEPT..FINALLY statement",
},
"WHILE STATEMENT": {
"prefix": "WHILE",
"body": [
"WHILE<sp>${1:\\$var_in_py_expr1 == \\$var_in_py_expr2}",
" $0",
"END",
],
"description": "Snippet of a WHILE statement",
},
"WHILE STATEMENT UNLIMITED": {
"prefix": "WHILE",
"body": [
"WHILE<sp>${1:\\$var_in_py_expr1 == \\$var_in_py_expr2}<sp>limit=NONE",
" $0",
"END",
],
"description": "Snippet of a WHILE statement with limit=NONE",
},
"CONTINUE": {
"prefix": "CONTINUE",
"body": ["CONTINUE"],
"description": "Snippet for CONTINUE",
},
"BREAK": {
"prefix": "BREAK",
"body": ["BREAK"],
"description": "Snippet for BREAK",
},
"RETURN": {
"prefix": "RETURN",
"body": ["RETURN"],
"description": "Snippet for RETURN",
},
"ELSE": {
"prefix": "ELSE",
"body": ["ELSE"],
"description": "Snippet for ELSE",
},
}
_SNIPPETS_SORTED = None
def _get_global_snippets():
from robotframework_ls.impl.robot_version import get_robot_major_version
global _SNIPPETS_SORTED
if _SNIPPETS_SORTED is None:
use = {}
use.update(_SNIPPETS_RF4)
if get_robot_major_version() >= 5:
use.update(_SNIPPETS_RF5)
_SNIPPETS_SORTED = sorted(use.items())
return _SNIPPETS_SORTED
def _create_completion_item_from_snippet(
label, snippet, selection, line_to_col, separator_spaces
):
"""
:param selection: DocumentSelection
"""
from robocorp_ls_core.lsp import (
CompletionItem,
InsertTextFormat,
Position,
Range,
TextEdit,
)
from robocorp_ls_core.lsp import CompletionItemKind
current_col = selection.col
text = "\n".join(snippet["body"]).replace("<sp>", separator_spaces)
text_edit = TextEdit(
Range(
start=Position(selection.line, current_col - len(line_to_col)),
end=Position(selection.line, current_col),
),
text,
)
return CompletionItem(
label,
kind=CompletionItemKind.Snippet,
text_edit=text_edit,
insertText=text_edit.newText,
documentation=snippet["description"] + "\n\n" + text,
insertTextFormat=InsertTextFormat.Snippet,
).to_dict()
def complete(completion_context: ICompletionContext) -> List[CompletionItemTypedDict]:
"""
Collects all the keywords that are available to the given completion_context.
:param CompletionContext completion_context:
"""
from robotframework_ls.robot_config import get_arguments_separator
sel = completion_context.sel #::type sel: DocumentSelection
line_to_column = sel.line_to_column.lstrip().lower()
if not line_to_column:
return []
separator_spaces = get_arguments_separator(completion_context)
ret = []
for label, data in _get_global_snippets():
if line_to_column in data["prefix"].lower():
ret.append(
_create_completion_item_from_snippet(
label, data, sel, line_to_column, separator_spaces
)
)
return ret | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/impl/snippets_completions.py | 0.686895 | 0.477371 | snippets_completions.py | pypi |
from typing import Dict, Tuple, Sequence, Iterator, List
from robocorp_ls_core.robotframework_log import get_logger
from robotframework_ls.impl.protocols import ICompletionContext
from robocorp_ls_core.lsp import (
CompletionItem,
CompletionItemKind,
InsertTextFormat,
Position,
Range,
TextEdit,
CompletionItemTypedDict,
)
log = get_logger(__name__)
def _iter_normalized_variables_and_values(
completion_context: ICompletionContext,
) -> Iterator[Tuple[str, Tuple[str, ...]]]:
from robot.api import Token
from robotframework_ls.impl.variable_resolve import robot_search_variable
from robotframework_ls.impl.text_utilities import normalize_robot_name
for node_info in completion_context.get_all_variables():
node = node_info.node
token = node.get_token(Token.VARIABLE)
if token is None:
continue
var_name = token.value
robot_match = robot_search_variable(var_name)
if robot_match and robot_match.base:
# i.e.: Variable.value provides the values of the assign
var_value: Tuple[str, ...] = node.value
yield (normalize_robot_name(robot_match.base), var_value)
def _as_dictionary(
dict_tokens: Sequence[str], normalize=False, filter_token: str = ""
) -> Dict[str, str]:
"""
Parse ["key1=val1", "key2=val2",...] as a dictionary
"""
from robotframework_ls.impl.text_utilities import normalize_robot_name
dictionary = {}
for token in dict_tokens:
key, val = token.split("=")
if normalize:
key = normalize_robot_name(key)
if filter_token and filter_token not in normalize_robot_name(key):
continue
dictionary.update({key: val})
return dictionary
def _completion_items(
dictionary: Dict[str, str], editor_range: Range
) -> List[CompletionItemTypedDict]:
return [
CompletionItem(
key,
kind=CompletionItemKind.Variable,
text_edit=TextEdit(editor_range, key),
insertText=key,
documentation=value,
insertTextFormat=InsertTextFormat.Snippet,
).to_dict()
for key, value in dictionary.items()
]
def _iter_all_normalized_variables_and_values(
completion_context: ICompletionContext,
) -> Iterator[Tuple[str, Tuple[str, ...]]]:
yield from _iter_normalized_variables_and_values(completion_context)
dependency_graph = completion_context.collect_dependency_graph()
for resource_doc in completion_context.iter_dependency_and_init_resource_docs(
dependency_graph
):
new_ctx = completion_context.create_copy(resource_doc)
yield from _iter_normalized_variables_and_values(new_ctx)
def complete(completion_context: ICompletionContext) -> List[CompletionItemTypedDict]:
from robotframework_ls.impl.variable_resolve import iter_robot_variable_matches
from robotframework_ls.impl.ast_utils import iter_robot_match_as_tokens
from robotframework_ls.impl.text_utilities import normalize_robot_name
from robotframework_ls.impl.variable_resolve import robot_search_variable
token_info = completion_context.get_current_token()
if token_info is None:
return []
token = token_info.token
value = token.value
col = completion_context.sel.col
last_opening_bracket_column = -1
items_seen = []
prev_rtoken = None
for robot_match, relative_index in iter_robot_variable_matches(value):
robot_match_start = token.col_offset + relative_index + robot_match.start
robot_match_end = token.col_offset + relative_index + robot_match.end
if robot_match.base and robot_match_start < col < robot_match_end:
# Now, let's see in which item/offset we're in.
for rtoken in iter_robot_match_as_tokens(
robot_match, relative_index=robot_match_start, lineno=token.lineno
):
if rtoken.type == "[":
last_opening_bracket_column = rtoken.col_offset
if rtoken.col_offset >= col:
if (
rtoken.type == "item"
and rtoken.col_offset == rtoken.end_col_offset
):
# dealing with empty item at cursor
prev_rtoken = rtoken
items_seen.append(rtoken)
# The prev_rtoken is the one we're interested in
break
if rtoken.type == "item":
items_seen.append(rtoken)
prev_rtoken = rtoken
break
else:
return []
if prev_rtoken is None:
return []
if prev_rtoken.type not in ("[", "item"):
return []
if last_opening_bracket_column == -1:
return []
search_items_normalized = [normalize_robot_name(robot_match.base)]
if len(items_seen) > 1:
for item in items_seen[:-1]:
# The last one is the one we're currently completing
search_items_normalized.append(normalize_robot_name(item.value))
selection = completion_context.sel
if prev_rtoken.type == "[":
start_offset = end_offset = prev_rtoken.col_offset
filter_token = ""
else:
start_offset = prev_rtoken.col_offset
end_offset = prev_rtoken.end_col_offset
filter_token = normalize_robot_name(prev_rtoken.value)
normalized_variables_and_values = dict(
_iter_all_normalized_variables_and_values(completion_context)
)
last_dict = None
count = 0
while search_items_normalized:
count += 1
if count > 10:
log.info(
"Breaking up possible recursion on dictionary completions. Stack: %s",
search_items_normalized,
)
return []
search_name_normalized = search_items_normalized.pop(0)
variable_values = normalized_variables_and_values.get(search_name_normalized)
if not variable_values:
return []
if not search_items_normalized:
dictionary = _as_dictionary(variable_values, filter_token=filter_token)
editor_range = Range(
start=Position(selection.line, start_offset),
end=Position(selection.line, end_offset),
)
return _completion_items(dictionary, editor_range)
else:
last_dict = _as_dictionary(variable_values, normalize=True)
next_search = last_dict.get(search_items_normalized.pop(0))
if not next_search:
return []
if not next_search.startswith("&{"):
return []
new_match = robot_search_variable(next_search)
if not new_match or not new_match.base:
return []
for it in reversed(new_match.items):
search_items_normalized.insert(0, normalize_robot_name(it))
search_items_normalized.insert(0, normalize_robot_name(new_match.base))
return [] | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/impl/dictionary_completions.py | 0.759404 | 0.260072 | dictionary_completions.py | pypi |
from typing import Optional, Iterator, Tuple
from robotframework_ls.impl.protocols import KeywordUsageInfo, IRobotToken
from robotframework_ls.impl.keywords_in_args import KEYWORD_NAME_TO_KEYWORD_INDEX
from robotframework_ls.impl.keywords_in_args import KEYWORD_NAME_TO_CONDITION_INDEX
TOK_TYPE_NONE = 0
TOK_TYPE_KEYWORD = 1
TOK_TYPE_EXPRESSION = 2
TOK_TYPE_CONTROL = 3
TOK_TYPE_IGNORE = 4
def _tok_type_as_str(tok_type) -> str:
if tok_type == TOK_TYPE_NONE:
return "<none>"
if tok_type == TOK_TYPE_EXPRESSION:
return "<expression>"
if tok_type == TOK_TYPE_KEYWORD:
return "<keyword>"
if tok_type == TOK_TYPE_CONTROL:
return "<control>"
if tok_type == TOK_TYPE_IGNORE:
return "<ignore>"
raise AssertionError(f"Unexpected: {tok_type}")
class _ConsiderArgsAsKeywordNames:
NONE = TOK_TYPE_NONE
KEYWORD = TOK_TYPE_KEYWORD
EXPRESSION = TOK_TYPE_EXPRESSION
CONTROL = TOK_TYPE_CONTROL
IGNORE = TOK_TYPE_IGNORE
def __init__(
self,
node,
normalized_keyword_name,
consider_keyword_at_index,
consider_condition_at_index,
):
self._normalized_keyword_name = normalized_keyword_name
self._consider_keyword_at_index = consider_keyword_at_index
self._consider_condition_at_index = consider_condition_at_index
self._current_arg = 0
# Run Keyword If is special because it has 'ELSE IF' / 'ELSE'
# which will then be be (cond, keyword) or just (keyword), so
# we need to provide keyword usages as needed.
if self._normalized_keyword_name == "runkeywordif":
self.next_tok_type = self._next_tok_type_run_keyword_if
elif self._normalized_keyword_name == "foreachinputworkitem":
self.next_tok_type = self._next_tok_type_for_each_input_work_item
elif self._normalized_keyword_name == "runkeywords":
found = False
for token in node.tokens:
if "AND" == token.value:
found = True
break
if found:
self.next_tok_type = self._next_tok_type_run_keywords
else:
self.next_tok_type = self._consider_each_arg_as_keyword
self._stack_kind = None
self._stack = None
self._started_match = False
def next_tok_type(self, token) -> int: # pylint: disable=method-hidden
assert token.type == token.ARGUMENT
self._current_arg += 1
if self._current_arg == self._consider_condition_at_index:
return self.EXPRESSION
if self._current_arg == self._consider_keyword_at_index:
return self.KEYWORD
return self.NONE
def _next_tok_type_for_each_input_work_item(self, token):
from robotframework_ls.impl.variable_resolve import find_split_index
from robotframework_ls.impl.text_utilities import normalize_robot_name
assert token.type == token.ARGUMENT
self._current_arg += 1
if self._current_arg == self._consider_keyword_at_index:
return self.KEYWORD
i = find_split_index(token.value)
if i > 0:
v = normalize_robot_name(token.value[:i])
if v in ("itemslimit", "returnresults"):
return self.IGNORE
return self.NONE
def _next_tok_type_run_keyword_if(self, token):
assert token.type == token.ARGUMENT
self._current_arg += 1
if token.value == "ELSE IF":
self._started_match = True
self._stack = []
self._stack_kind = token.value
return self.CONTROL
elif token.value == "ELSE":
self._started_match = True
self._stack = []
self._stack_kind = token.value
return self.CONTROL
else:
self._started_match = False
if self._stack is not None:
self._stack.append(token)
if self._stack is not None:
if self._stack_kind == "ELSE IF":
if len(self._stack) == 1:
return self.EXPRESSION
return self.KEYWORD if len(self._stack) == 2 else self.NONE
if self._stack_kind == "ELSE":
return self.KEYWORD if len(self._stack) == 1 else self.NONE
if self._current_arg == self._consider_condition_at_index:
return self.EXPRESSION
if self._current_arg == self._consider_keyword_at_index:
return self.KEYWORD
return self.NONE
def _consider_each_arg_as_keyword(self, token):
assert token.type == token.ARGUMENT
return self.KEYWORD
def _next_tok_type_run_keywords(self, token):
assert token.type == token.ARGUMENT
self._current_arg += 1
if token.value == "AND":
self._started_match = True
self._stack = []
self._stack_kind = token.value
return self.CONTROL
else:
self._started_match = False
if self._stack is not None:
self._stack.append(token)
if self._stack is not None:
if self._stack_kind == "AND":
return self.KEYWORD if len(self._stack) == 1 else self.NONE
if self._current_arg == self._consider_keyword_at_index:
return self.KEYWORD
return self.NONE
def _create_root_keyword_usage_info(stack, node) -> Optional[KeywordUsageInfo]:
"""
If this is a keyword usage node, return information on it, otherwise,
returns None.
:note: this goes hand-in-hand with get_keyword_name_token.
"""
from robot.api import Token
from robotframework_ls.impl.ast_utils import (
CLASSES_WITH_ARGUMENTS_AS_KEYWORD_CALLS_AS_SET,
)
from robotframework_ls.impl.ast_utils import _strip_node_and_token_bdd_prefix
if node.__class__.__name__ == "KeywordCall":
token_type = Token.KEYWORD
elif node.__class__.__name__ in CLASSES_WITH_ARGUMENTS_AS_KEYWORD_CALLS_AS_SET:
token_type = Token.NAME
else:
return None
prefix, node, token = _strip_node_and_token_bdd_prefix(stack, node, token_type)
if token is None:
return None
keyword_name = token.value
if keyword_name.lower() == "none":
return None
return KeywordUsageInfo(tuple(stack), node, token, keyword_name, prefix=prefix)
def _build_keyword_usage(stack, node, current_tokens) -> Optional[KeywordUsageInfo]:
from robotframework_ls.impl.ast_utils import copy_token_replacing
# Note: just check for line/col because the token could be changed
# (for instance, an EOL ' ' could be added to the token).
if not current_tokens:
return None
keyword_at_index = 0
keyword_token = current_tokens[keyword_at_index]
keyword_token = copy_token_replacing(keyword_token, type=keyword_token.KEYWORD)
new_tokens = [keyword_token]
new_tokens.extend(current_tokens[keyword_at_index + 1 :])
new_node = node.__class__(new_tokens)
return KeywordUsageInfo(
stack,
new_node,
keyword_token,
keyword_token.value,
True,
)
def _iter_keyword_usage_info_uncached_from_args(
stack, node, args_as_keywords_handler, token_line_col_to_type
) -> Iterator[KeywordUsageInfo]:
# We may have multiple matches, so, we need to setup the appropriate book-keeping
current_tokens = []
iter_in = iter(node.tokens)
for token in iter_in:
if token.type == token.ARGUMENT:
next_tok_type = args_as_keywords_handler.next_tok_type(token)
token_line_col_to_type[(token.lineno, token.col_offset)] = next_tok_type
if next_tok_type == args_as_keywords_handler.KEYWORD:
current_tokens.append(token)
break
for token in iter_in:
if token.type == token.ARGUMENT:
next_tok_type = args_as_keywords_handler.next_tok_type(token)
token_line_col_to_type[(token.lineno, token.col_offset)] = next_tok_type
if next_tok_type in (
args_as_keywords_handler.CONTROL,
args_as_keywords_handler.EXPRESSION,
args_as_keywords_handler.IGNORE,
):
# Don't add IF/ELSE IF/AND nor the condition.
continue
if next_tok_type != args_as_keywords_handler.KEYWORD:
# Argument was now added to current_tokens.
current_tokens.append(token)
continue
if current_tokens:
# Starting a new one (build for the previous).
usage_info = _build_keyword_usage(
stack,
node,
current_tokens,
)
if usage_info is not None:
yield usage_info
current_tokens = [token]
else:
# Do one last iteration at the end to deal with the last one.
if current_tokens:
usage_info = _build_keyword_usage(
stack,
node,
current_tokens,
)
if usage_info is not None:
yield usage_info
class _KeywordUsageHandler:
"""
We have the following main use-cases when dealing with keyword usages (also
known as keyword references):
1. Obtain the usages (keyword call/arguments) for code-analysis.
2. For each token in a keyword usage, know what it maps to (
keyword name, expression, control, regular argument, ...)
Also, it needs to be considered that a given keyword usage may have
other usages within it, so, the _KeywordUsageHandler is an API to help
make things more streamlined for each use-case.
"""
NONE = TOK_TYPE_NONE
KEYWORD = TOK_TYPE_KEYWORD
EXPRESSION = TOK_TYPE_EXPRESSION
CONTROL = TOK_TYPE_CONTROL
IGNORE = TOK_TYPE_IGNORE
def __init__(self, stack, node, recursive):
self.node = node
self.stack = stack
self._recursive = recursive
# We store as line/col the type info and not the actual token because we
# may create dummy tokens along the way and in this case we're
# interested in the positions.
self._token_line_col_to_type = {}
self._keyword_usages_from_node_cache = None
def _ensure_cached(self):
if self._keyword_usages_from_node_cache is None:
self._keyword_usages_from_node_cache = tuple(
self._iter_keyword_usages_from_node()
)
def iter_keyword_usages_from_node(self) -> Iterator[KeywordUsageInfo]:
self._ensure_cached()
yield from iter(self._keyword_usages_from_node_cache)
def _iter_keyword_usages_from_node(self) -> Iterator[KeywordUsageInfo]:
"""
Note: the iteration order is guaranteed and it's from the inside to
the outside (because when matching tokens we want to match more
specific ones before outer ones).
"""
root_keyword_usage_info = _create_root_keyword_usage_info(self.stack, self.node)
if root_keyword_usage_info is None:
return
# Ok, we have the root one, now, we need to recursively detect others.
if self._recursive:
yield from self._iter_keyword_usages_inside_keyword_usage(
root_keyword_usage_info
)
yield root_keyword_usage_info
def _iter_keyword_usages_inside_keyword_usage(
self, root_keyword_usage_info: KeywordUsageInfo
) -> Iterator[KeywordUsageInfo]:
from robotframework_ls.impl.text_utilities import normalize_robot_name
# Now, we have the root, determine if it can have other usages inside itself...
normalized_keyword_name = normalize_robot_name(root_keyword_usage_info.name)
consider_keyword_at_index = KEYWORD_NAME_TO_KEYWORD_INDEX.get(
normalized_keyword_name
)
consider_condition_at_index = KEYWORD_NAME_TO_CONDITION_INDEX.get(
normalized_keyword_name
)
if (
consider_keyword_at_index is not None
or consider_condition_at_index is not None
):
args_as_keywords_handler = _ConsiderArgsAsKeywordNames(
root_keyword_usage_info.node,
normalized_keyword_name,
consider_keyword_at_index,
consider_condition_at_index,
)
for kw_usage in _iter_keyword_usage_info_uncached_from_args(
self.stack,
root_keyword_usage_info.node,
args_as_keywords_handler,
self._token_line_col_to_type,
):
yield from self._iter_keyword_usages_inside_keyword_usage(kw_usage)
yield kw_usage
def get_token_type(self, tok: IRobotToken) -> int:
"""
:return:
TOK_TYPE_NONE = 0
TOK_TYPE_KEYWORD = 1
TOK_TYPE_EXPRESSION = 2
TOK_TYPE_CONTROL = 3
TOK_TYPE_IGNORE = 4
"""
self._ensure_cached()
return self._token_line_col_to_type.get(
(tok.lineno, tok.col_offset), TOK_TYPE_NONE
)
def get_token_type_as_str(self, token: IRobotToken) -> str:
return _tok_type_as_str(self.get_token_type(token))
def iter_tokens_with_type(self) -> Iterator[Tuple[IRobotToken, int]]:
self._ensure_cached()
for tok in self.node.tokens:
yield tok, self._token_line_col_to_type.get(
(tok.lineno, tok.col_offset), TOK_TYPE_NONE
)
def get_keyword_usage_for_token_line_col(
self, line, col
) -> Optional[KeywordUsageInfo]:
self._ensure_cached()
for kw_usage in self.iter_keyword_usages_from_node():
for token in kw_usage.node.tokens:
if token.lineno == line and token.col_offset == col:
return kw_usage
return None
def obtain_keyword_usage_handler(
stack, node, recursive=True
) -> Optional[_KeywordUsageHandler]:
from robotframework_ls.impl.ast_utils import (
CLASSES_WITH_ARGUMENTS_AS_KEYWORD_CALLS_AS_SET,
)
if (
node.__class__.__name__ != "KeywordCall"
and node.__class__.__name__
not in CLASSES_WITH_ARGUMENTS_AS_KEYWORD_CALLS_AS_SET
):
return None
return _KeywordUsageHandler(stack, node, recursive=recursive)
def obtain_keyword_usage_for_token(stack, node, token) -> Optional[KeywordUsageInfo]:
keyword_usage_handler = obtain_keyword_usage_handler(stack, node)
if keyword_usage_handler is not None:
keyword_usage = keyword_usage_handler.get_keyword_usage_for_token_line_col(
token.lineno, token.col_offset
)
return keyword_usage
return None | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/impl/ast_utils_keyword_usage.py | 0.838878 | 0.203134 | ast_utils_keyword_usage.py | pypi |
from robotframework_ls.impl.protocols import (
ICompletionContext,
NodeInfo,
ILibraryDocConversions,
)
from typing import Optional, Any
import typing
from robocorp_ls_core.lsp import HoverTypedDict
from robocorp_ls_core.robotframework_log import get_logger
from robocorp_ls_core.protocols import ActionResultDict
log = get_logger(__name__)
def _collect_html_for_non_library_element(
ctx: ICompletionContext,
) -> Optional[HoverTypedDict]:
from robotframework_ls.impl import hover
return hover.hover(ctx)
def collect_robot_documentation(
library_name: Optional[str], ctx: ICompletionContext
) -> ActionResultDict:
try:
return _collect_robot_documentation(library_name, ctx)
except Exception as e:
msg = f"Error collecting robot documentation: {str(e)}"
log.exception(msg)
return {
"success": False,
"message": msg,
"result": None,
}
def _collect_robot_documentation(
library_name: Optional[str], ctx: ICompletionContext
) -> ActionResultDict:
from robotframework_ls.impl import ast_utils
ws = ctx.workspace
libspec_manager = ws.libspec_manager
# We need to create a copy (which we'll use for dealing with HTML).
# Note that we also want to collect the original copy, not the one
# which was converted to markdown.
libspec_manager = libspec_manager.create_copy()
if library_name:
library_doc_or_error = libspec_manager.get_library_doc_or_error(
library_name,
create=True,
completion_context=ctx,
)
else:
ast = ctx.get_ast()
from robot.api import Token
line = ctx.sel.line
col = ctx.sel.col
section = ast_utils.find_section(ast, line)
if not section:
return {
"success": False,
"message": f"No documentation for selection at line: {line}",
"result": None,
}
token_info = ast_utils.find_token(section, line, col)
if not token_info:
return {
"success": False,
"message": f"No documentation for selection in line: {line}, col: {col}",
"result": None,
}
node_info: NodeInfo[Any] = NodeInfo(token_info.stack, token_info.node)
if not ast_utils.is_library_node_info(node_info):
# Ok, no docs for a library, let's get the hover info and provide it.
ret = _collect_html_for_non_library_element(ctx)
if not ret:
return {
"success": False,
"message": f"No custom documentation available for node: {node_info.node.__class__.__name__} at line: {line}, col: {col} ",
"result": None,
}
else:
return {
"success": True,
"message": None,
"result": ret,
}
library_name_token = node_info.node.get_token(Token.NAME)
if library_name_token is None:
return {
"success": False,
"message": f"Unable to get library name for library import in line: {line}, col: {col}.",
"result": None,
}
library_doc_or_error = libspec_manager.get_library_doc_or_error(
ctx.token_value_resolving_variables(library_name_token),
create=True,
completion_context=ctx,
args=ast_utils.get_library_arguments_serialized(node_info.node),
)
library_doc = library_doc_or_error.library_doc
if not library_doc:
return {
"success": False,
"message": library_doc_or_error.error,
"result": None,
}
try:
typing.cast(ILibraryDocConversions, library_doc).convert_docs_to_html()
except Exception as e:
msg = f"Error converting docs to html: {str(e)}"
log.exception(msg)
return {
"success": False,
"message": msg,
"result": None,
}
return {
"success": True,
"message": None,
"result": {"libdoc_json": library_doc.to_dictionary()},
} | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/impl/collect_robot_documentation.py | 0.805135 | 0.211132 | collect_robot_documentation.py | pypi |
from typing import Optional, Set, List, Dict, Iterator, Sequence
import weakref
from robocorp_ls_core.protocols import ITestInfoFromSymbolsCacheTypedDict
from robotframework_ls.impl.protocols import (
ILibraryDoc,
IRobotDocument,
ISymbolsJsonListEntry,
ICompletionContext,
ISymbolKeywordInfo,
)
import typing
import threading
from robocorp_ls_core.robotframework_log import get_logger
log = get_logger(__name__)
class BaseSymbolsCache:
_library_info: "Optional[weakref.ReferenceType[ILibraryDoc]]"
_doc: "Optional[weakref.ReferenceType[IRobotDocument]]"
def __init__(
self,
json_list: List[ISymbolsJsonListEntry],
library_info: Optional[ILibraryDoc],
doc: Optional[IRobotDocument],
keywords_used: Set[str],
uri: Optional[str], # Always available if generated from doc.
test_info: Optional[List[ITestInfoFromSymbolsCacheTypedDict]],
global_variables_defined: Optional[Set[str]] = None,
variable_references: Optional[Set[str]] = None,
):
from robocorp_ls_core.cache import LRUCache
self._uri = uri
if library_info is not None:
self._library_info = weakref.ref(library_info)
else:
self._library_info = None
if doc is not None:
self._doc = weakref.ref(doc)
else:
self._doc = None
self._json_list = json_list
self._keywords_used = keywords_used
self._check_name_with_vars_cache_usage: LRUCache[str, bool] = LRUCache()
self._check_name_with_vars_cache_usage_lock = threading.Lock()
if global_variables_defined is None:
global_variables_defined = set()
self._global_variables_defined: Set[str] = global_variables_defined
if variable_references is None:
variable_references = set()
self._variable_references: Set[str] = variable_references
self._test_info = test_info
def get_test_info(self) -> Optional[List[ITestInfoFromSymbolsCacheTypedDict]]:
return self._test_info
def get_uri(self) -> Optional[str]:
return self._uri
def has_keyword_usage(self, normalized_keyword_name: str) -> bool:
ret = normalized_keyword_name in self._keywords_used
if ret or "{" not in normalized_keyword_name:
return ret
with self._check_name_with_vars_cache_usage_lock:
# The LRU is not thread safe, so, we need a lock (not ideal though as
# it's slow)... using lru_cache would be thread safe, but we don't want to put
# 'self' in it (so, we'd need some tricks to use it).
# For now just use a lock for simplicity.
found_in_cache = self._check_name_with_vars_cache_usage.get(
normalized_keyword_name, None
)
if found_in_cache is not None:
return found_in_cache
from robotframework_ls.impl.text_utilities import (
matches_name_with_variables,
)
for keyword_name_used in self._keywords_used:
if matches_name_with_variables(
keyword_name_used, normalized_keyword_name
):
ret = True
break
self._check_name_with_vars_cache_usage[normalized_keyword_name] = ret
return ret
def has_global_variable_definition(self, normalized_variable_name: str) -> bool:
return normalized_variable_name in self._global_variables_defined
def has_variable_reference(self, normalized_variable_name: str) -> bool:
return normalized_variable_name in self._variable_references
def get_json_list(self) -> List[ISymbolsJsonListEntry]:
return self._json_list
def get_library_info(self) -> Optional[ILibraryDoc]:
w = self._library_info
if w is None:
return None
return w()
def get_doc(self) -> Optional[IRobotDocument]:
w = self._doc
if w is None:
return None
return w()
def iter_keyword_info(self) -> Iterator[ISymbolKeywordInfo]:
raise NotImplementedError("iter_keyword_info abstract in: %s", self.__class__)
class SymbolsCacheReverseIndex:
def __init__(self):
self._global_var_to_uris: Dict[str, Set[str]] = {}
self._lock = threading.Lock()
self._reindex_count = 0
self._uris_changed = set()
self._force_reindex = True
def request_full_reindex(self):
with self._lock:
self._force_reindex = True
self._uris_changed.clear()
def notify_uri_changed(self, uri: str) -> None:
with self._lock:
if not self._force_reindex:
self._uris_changed.add(uri)
if len(self._uris_changed) > 1:
self._force_reindex = True
self._uris_changed.clear()
def has_global_variable(self, normalized_var_name: str) -> bool:
return normalized_var_name in self._global_var_to_uris
def get_global_variable_uri_definitions(
self, normalized_var_name: str
) -> Optional[Set[str]]:
return self._global_var_to_uris.get(normalized_var_name)
def synchronize(self, context: ICompletionContext):
with self._lock:
if not self._force_reindex:
if not self._uris_changed or self._uris_changed == {context.doc.uri}:
# If the only thing changed is the current uri (or if there
# were no changes), don't do a workspace-wide update.
return
# Reset synchronize-related flags.
self._force_reindex = False
self._uris_changed.clear()
self._reindex_count += 1
self._compute_new_symbols_cache_reverse_index_state(context)
def dispose(self):
self._global_var_to_uris = {}
def _compute_new_symbols_cache_reverse_index_state(
self, context: ICompletionContext
) -> None:
from robotframework_ls.impl.workspace_symbols import iter_symbols_caches
new_global_var_to_uris: Dict[str, Set[str]] = {}
symbols_cache: BaseSymbolsCache
# Note: always update as a whole.
it = typing.cast(Iterator[BaseSymbolsCache], iter_symbols_caches("", context))
try:
for symbols_cache in it:
uri = symbols_cache.get_uri()
if not uri:
continue
for global_var_name in symbols_cache._global_variables_defined:
s = new_global_var_to_uris.get(global_var_name)
if s is None:
s = new_global_var_to_uris[global_var_name] = set()
s.add(uri)
except:
log.exception("Exception computing symbols cache reverse index.")
raise # Maybe it was cancelled (or we had another error).
else:
# ok, it worked, let's actually update our internal state.
self._global_var_to_uris = new_global_var_to_uris | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/impl/_symbols_cache.py | 0.884177 | 0.152694 | _symbols_cache.py | pypi |
from robotframework_ls.impl.protocols import ICompletionContext, ILocalizationInfo
from typing import Optional
from robotframework_ls.impl.text_utilities import normalize_robot_name
class _Requisites(object):
def __init__(self, section, matcher, replace_from_col, replace_to_col, selection):
self.section = section
self.matcher = matcher
self.replace_from_col = replace_from_col
self.replace_to_col = replace_to_col
self.selection = selection
def get_requisites(completion_context: ICompletionContext) -> Optional[_Requisites]:
section_node = completion_context.get_ast_current_section()
if section_node is None:
return None
from robotframework_ls.impl.string_matcher import RobotStringMatcher
from robotframework_ls.impl.section_completions import get_section_constant
section = get_section_constant(completion_context, section_node)
if section is None:
return None
selection = completion_context.sel #: :type selection: DocumentSelection
line_to_col = selection.line_to_column
if line_to_col.endswith(" "):
return None
replace_to_col = selection.col
if section.names_in_brackets:
for i, c in enumerate(line_to_col):
if c.isspace():
continue
elif c == "[":
line_to_col = line_to_col[i + 1 :]
replace_from_col = i
break
else:
return None
else:
return None
matcher = RobotStringMatcher(line_to_col)
else:
# i.e.: Needs to be the first char
matcher = RobotStringMatcher(line_to_col)
replace_from_col = 0
return _Requisites(section, matcher, replace_from_col, replace_to_col, selection)
def complete(completion_context: ICompletionContext):
from robocorp_ls_core.lsp import (
TextEdit,
Range,
Position,
CompletionItem,
CompletionItemKind,
)
from robotframework_ls.impl.robot_version import robot_version_supports_language
requisites = get_requisites(completion_context)
if requisites is None:
return []
section = requisites.section
matcher = requisites.matcher
replace_from_col = requisites.replace_from_col
selection = requisites.selection
replace_to_col = requisites.replace_to_col
line = selection.current_line
sel_ends_with_close = line[selection.col :].startswith("]")
ret = []
if robot_version_supports_language():
from robot.api import Language
locinfo: ILocalizationInfo = completion_context.get_ast_localization_info()
current_section_name = normalize_robot_name(
completion_context.get_current_section_name()
)
def _translated_words():
lang: Language
for lang in locinfo.iter_languages_on_write():
markers = section.markers_for_lang(lang)
for marker in markers:
if normalize_robot_name(marker) == current_section_name:
yield from iter(section.names_for_lang(lang))
return
# If it didn't return (is this possible?), provide all.
for lang in locinfo.iter_languages_on_write():
yield from iter(section.names_for_lang(lang))
words = tuple(_translated_words())
else:
words = section.get_names_in_section_pre_rf_5_1()
for word in sorted(words):
if matcher.accepts(word):
col_delta = 0
if section.names_in_brackets:
label = f"[{word}]"
replacement = label
if sel_ends_with_close:
col_delta = 1
else:
label = word
replacement = word
text_edit = TextEdit(
Range(
start=Position(selection.line, replace_from_col),
end=Position(selection.line, replace_to_col + col_delta),
),
replacement,
)
# text_edit = None
ret.append(
CompletionItem(
label, kind=CompletionItemKind.Keyword, text_edit=text_edit
).to_dict()
)
return ret | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/impl/section_name_completions.py | 0.807954 | 0.286306 | section_name_completions.py | pypi |
from robotframework_ls.impl.protocols import (
ICompletionContext,
TokenInfo,
VarTokenInfo,
IRobotToken,
INode,
)
from robocorp_ls_core.lsp import (
PositionTypedDict,
SelectionRangeTypedDict,
)
from typing import List, Sequence
from robocorp_ls_core.robotframework_log import get_logger
import itertools
log = get_logger(__name__)
def _empty_range(position: PositionTypedDict) -> SelectionRangeTypedDict:
line, col = position["line"], position["character"]
r: SelectionRangeTypedDict = {
"range": {
"start": {
"line": line,
"character": col,
},
"end": {
"line": line,
"character": col,
},
}
}
return r
def _build_variable_range_hierarchy(
current_token: TokenInfo,
current_variable: VarTokenInfo,
) -> SelectionRangeTypedDict:
from robotframework_ls.impl.ast_utils import create_range_from_token
ret: SelectionRangeTypedDict = {
"range": create_range_from_token(current_variable.token)
}
# The current token is the container of the current variable.
current_token_selection_range = _build_token_range_hierarchy(current_token)
if not current_token_selection_range.get("range"):
return ret
if current_token_selection_range.get("range") != ret["range"]:
ret["parent"] = current_token_selection_range
return ret
def _build_token_range_from_stack(
ret: SelectionRangeTypedDict,
current_node: INode,
stack: Sequence[INode],
token: IRobotToken,
):
from robotframework_ls.impl.ast_utils import create_range_from_node
last: SelectionRangeTypedDict = ret
for stack_node in itertools.chain((current_node,), reversed(stack)):
r = create_range_from_node(
stack_node,
accept_empty=token.type in (token.EOL, token.EOS, token.SEPARATOR),
)
if r is None:
continue
if last["range"] == r: # If it's the same, don't add it.
continue
new_range: SelectionRangeTypedDict = {"range": r}
last["parent"] = new_range
last = new_range
def _build_token_range_hierarchy(current_token: TokenInfo) -> SelectionRangeTypedDict:
from robotframework_ls.impl.ast_utils import create_range_from_token
token = current_token.token
ret: SelectionRangeTypedDict = {"range": create_range_from_token(token)}
_build_token_range_from_stack(ret, current_token.node, current_token.stack, token)
return ret
def selection_range(
context: ICompletionContext, positions: List[PositionTypedDict]
) -> List[SelectionRangeTypedDict]:
if not positions:
return []
ret: List[SelectionRangeTypedDict] = []
from robotframework_ls.impl import ast_utils
ast = context.get_ast()
for position in positions:
line = position["line"]
section = ast_utils.find_section(ast, line)
if section is None:
ret.append(_empty_range(position))
continue
col = position["character"]
current_token = ast_utils.find_token(section, line, col)
if current_token is None:
ret.append(_empty_range(position))
continue
current_variable = ast_utils.find_variable(section, line, col)
if current_variable is not None:
ret.append(_build_variable_range_hierarchy(current_token, current_variable))
continue
ret.append(_build_token_range_hierarchy(current_token))
log.info("returning: %s", ret)
return ret | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/impl/selection_range.py | 0.7586 | 0.315841 | selection_range.py | pypi |
from typing import List, Optional
from robocorp_ls_core.lsp import (
TextEditTypedDict,
CodeActionTypedDict,
WorkspaceEditTypedDict,
WorkspaceEditParamsTypedDict,
CommandTypedDict,
RangeTypedDict,
ShowDocumentParamsTypedDict,
)
from robotframework_ls.impl.protocols import ICompletionContext
def wrap_edits_in_snippet(
completion_context: ICompletionContext,
title,
text_edits: List[TextEditTypedDict],
kind: str,
) -> CodeActionTypedDict:
changes = {completion_context.doc.uri: text_edits}
edit: WorkspaceEditTypedDict = {"changes": changes}
edit_params: WorkspaceEditParamsTypedDict = {"edit": edit, "label": title}
command: CommandTypedDict = {
"title": title,
"command": "robot.applyCodeAction",
"arguments": [{"apply_snippet": edit_params}],
}
return {"title": title, "kind": kind, "command": command}
def wrap_edit_in_command(
completion_context: ICompletionContext, title, text_edit: TextEditTypedDict
) -> CodeActionTypedDict:
changes = {completion_context.doc.uri: [text_edit]}
edit: WorkspaceEditTypedDict = {"changes": changes}
edit_params: WorkspaceEditParamsTypedDict = {"edit": edit, "label": title}
command: CommandTypedDict = {
"title": title,
"command": "robot.applyCodeAction",
"arguments": [{"apply_edit": edit_params}],
}
add_show_document_at_command(command, completion_context.doc.uri, text_edit)
return {"title": title, "kind": "quickfix", "command": command}
def add_show_document_at_command(
command: CommandTypedDict,
doc_uri: str,
text_edit: Optional[TextEditTypedDict] = None,
):
if text_edit:
new_text = text_edit["newText"]
cursor_i = new_text.find("$__LSP_CURSOR_LOCATION__$")
if cursor_i == -1:
endline = text_edit["range"]["end"]["line"]
endchar = text_edit["range"]["end"]["character"]
else:
endline = text_edit["range"]["start"]["line"]
endchar = text_edit["range"]["start"]["character"]
# Find the actual cursor_i location (and remove it from the text)
text_edit["newText"] = new_text.replace("$__LSP_CURSOR_LOCATION__$", "", 1)
for line_i, text in enumerate(new_text.splitlines()):
if "$__LSP_CURSOR_LOCATION__$" in text:
endline += line_i
endchar += text.find("$__LSP_CURSOR_LOCATION__$")
break
else:
endline = 0
endchar = 0
selection: RangeTypedDict = {
"start": {"line": endline, "character": endchar},
"end": {"line": endline, "character": endchar},
}
show_document: ShowDocumentParamsTypedDict = {
"uri": doc_uri,
"selection": selection,
"takeFocus": True,
}
arguments = command["arguments"]
if arguments:
arguments[0]["show_document"] = show_document | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/impl/_code_action_utils.py | 0.737253 | 0.374791 | _code_action_utils.py | pypi |
from typing import Set, Iterable, List
from robocorp_ls_core.lsp import Range, CodeActionTypedDict, TextEditTypedDict
from robotframework_ls.impl.protocols import ICompletionContext, IRobotToken
from robotframework_ls.impl._code_action_utils import wrap_edits_in_snippet
from robocorp_ls_core.protocols import IDocument
def code_action_others(
completion_context: ICompletionContext,
select_range: Range,
only: Set[str],
) -> Iterable[CodeActionTypedDict]:
from robocorp_ls_core.basic import isinstance_name
if only and "assign.toVar" not in only:
return
if select_range.start != select_range.end:
# This one is only done when no range is given.
return
token_info = completion_context.get_current_token()
if not token_info:
return
if not isinstance_name(token_info.node, "KeywordCall"):
return
for keyword_token in token_info.node.tokens:
if keyword_token.type == keyword_token.ASSIGN:
return
if keyword_token.type == keyword_token.KEYWORD:
# Leave keyword_token in the namespace.
break
else:
return
from robotframework_ls.robot_config import get_arguments_separator
sep = get_arguments_separator(completion_context)
tok: IRobotToken = keyword_token
text_edits: List[TextEditTypedDict] = [
{
"range": {
"start": {"line": tok.lineno - 1, "character": tok.col_offset},
"end": {"line": tok.lineno - 1, "character": tok.col_offset},
},
"newText": "${${0:variable}}=%s" % (sep,),
}
]
yield wrap_edits_in_snippet(
completion_context, "Assign to variable", text_edits, "assign.toVar"
)
def code_action_surround_with(
completion_context: ICompletionContext,
select_range: Range,
only: Set[str],
) -> Iterable[CodeActionTypedDict]:
from robotframework_ls.impl.robot_version import get_robot_major_version
if (
only
and "surroundWith.tryExcept" not in only
and "surroundWith.tryExceptFinally" not in only
):
return
if select_range.start == select_range.end:
# This one requires a selection.
return
if get_robot_major_version() < 5:
return # Requires RF 5.
line = select_range.start.line
col = select_range.start.character
endline = select_range.end.line
endcol = select_range.end.character
doc: IDocument = completion_context.doc
contents = doc.get_range(line, col, endline, endcol)
first_line_contents = doc.get_line(line)
last_line_contents = doc.get_line(endline)
if line == endline:
# If the selection is only at one line, verify whether all contents are
# selected.
if first_line_contents.strip() != contents.strip():
return
from robotframework_ls.impl.text_utilities import TextUtilities
from robotframework_ls.robot_config import get_arguments_separator
sep = get_arguments_separator(completion_context)
indent = None
lines = []
for line_i in range(line, endline + 1):
line_content = doc.get_line(line_i)
if not line_content.strip().startswith("#"):
new_indent = TextUtilities(line_content).get_indent()
if indent is None:
indent = new_indent
else:
if len(new_indent) < len(indent):
indent = new_indent
lines.append(line_content)
if not indent:
return
if not only or "surroundWith.tryExcept" in only:
# Try..except
full_lines = []
full_lines.append(f"{indent}TRY")
for line_content in lines:
full_lines.append(f"{indent}{line_content}")
full_lines.append(f"{indent}EXCEPT{sep}${{0:message}}")
full_lines.append(f"{indent}{sep}No operation")
full_lines.append(f"{indent}END")
text_edits: List[TextEditTypedDict] = [
{
"range": {
"start": {"line": line, "character": 0},
"end": {"line": endline, "character": len(last_line_contents)},
},
"newText": "\n".join(full_lines),
}
]
yield wrap_edits_in_snippet(
completion_context,
"Surround with try..except",
text_edits,
"surroundWith.tryExcept",
)
if not only or "surroundWith.tryExceptFinally" in only:
# Try..except..finally
full_lines = []
full_lines.append(f"{indent}TRY")
for line_content in lines:
full_lines.append(f"{indent}{line_content}")
full_lines.append(f"{indent}EXCEPT{sep}${{0:message}}")
full_lines.append(f"{indent}{sep}No operation")
full_lines.append(f"{indent}FINALLY")
full_lines.append(f"{indent}{sep}No operation")
full_lines.append(f"{indent}END")
text_edits = [
{
"range": {
"start": {"line": line, "character": 0},
"end": {"line": endline, "character": len(last_line_contents)},
},
"newText": "\n".join(full_lines),
}
]
yield wrap_edits_in_snippet(
completion_context,
"Surround with try..except..finally",
text_edits,
"surroundWith.tryExceptFinally",
) | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/impl/code_action_others.py | 0.70416 | 0.256739 | code_action_others.py | pypi |
from typing import List, Optional, Dict, Iterator, Tuple
from robocorp_ls_core.lsp import LocationTypedDict, RangeTypedDict, PositionTypedDict
from robocorp_ls_core.robotframework_log import get_logger
from robotframework_ls.impl.protocols import (
ICompletionContext,
IRobotDocument,
IKeywordFound,
IVariablesCollector,
IVariableFound,
cast_to_keyword_definition,
AbstractVariablesCollector,
cast_to_variable_definition,
VarTokenInfo,
VariableKind,
KeywordUsageInfo,
)
import typing
from robocorp_ls_core.protocols import check_implements
from robocorp_ls_core.basic import isinstance_name, normalize_filename
log = get_logger(__name__)
def matches_source(s1: str, s2: str) -> bool:
if s1 == s2:
return True
return normalize_filename(s1) == normalize_filename(s2)
class _VariableDefinitionsCollector(AbstractVariablesCollector):
def __init__(self, robot_string_matcher):
from robotframework_ls.impl.string_matcher import RobotStringMatcher
self.robot_string_matcher: RobotStringMatcher = robot_string_matcher
self.matches: List[IVariableFound] = []
def accepts(self, variable_name):
return self.robot_string_matcher.is_variable_name_match(variable_name)
def on_variable(self, variable_found: IVariableFound):
self.matches.append(variable_found)
def __typecheckself__(self) -> None:
_: IVariablesCollector = check_implements(self)
def iter_variable_references_in_doc(
completion_context: ICompletionContext,
variable_found: IVariableFound,
argument_var_references_computer: Optional[
"_NamedArgumentVarReferencesComputer"
] = None,
) -> Iterator[RangeTypedDict]:
from robotframework_ls.impl import ast_utils
from robotframework_ls.impl.ast_utils import create_range_from_token
from robotframework_ls.impl.string_matcher import RobotStringMatcher
from robotframework_ls.impl.variable_completions import collect_variables
from robotframework_ls.impl.text_utilities import normalize_robot_name
from robotframework_ls.impl.variable_completions import collect_local_variables
from robotframework_ls.impl.ast_utils import get_local_variable_stack_and_node
normalized_name = normalize_robot_name(variable_found.variable_name)
robot_string_matcher = RobotStringMatcher(normalized_name)
# Collector for any variable with the same name.
collector = _VariableDefinitionsCollector(robot_string_matcher)
if argument_var_references_computer is None:
argument_var_references_computer = _NamedArgumentVarReferencesComputer(
completion_context, variable_found
)
if argument_var_references_computer.check_keyword_usage_normalized_name:
lst = _PreventDuplicatesInList()
argument_var_references_computer.add_references_to_named_keyword_arguments_from_doc(
completion_context, lst
)
for entry in lst.lst:
yield entry["range"]
ast = completion_context.get_ast()
if ast is not None:
# Get references.
var_token_info: VarTokenInfo
if variable_found.is_local_variable:
# For local variables we must have the stack
stack = variable_found.stack
assert stack
# Just search the current stack.
stack, stack_node = get_local_variable_stack_and_node(stack)
for var_token_info in ast_utils.iter_variable_references(stack_node):
completion_context.check_cancelled()
if not robot_string_matcher.is_variable_name_match(
var_token_info.token.value
):
continue
yield create_range_from_token(var_token_info.token)
# Get definitions (only local).
cp = completion_context.create_copy_with_selection(
line=variable_found.lineno, col=variable_found.col_offset
)
token_info = cp.get_current_token()
assert token_info
collect_local_variables(cp, collector, token_info)
else:
# i.e.: For globals collect all globals as well as locals overriding
# the global value.
for var_token_info in ast_utils.iter_variable_references(ast):
completion_context.check_cancelled()
if not robot_string_matcher.is_variable_name_match(
var_token_info.token.value
):
continue
yield create_range_from_token(var_token_info.token)
# Get definitions (all).
collect_variables(completion_context, collector, only_current_doc=True)
variable: IVariableFound
for variable in collector.matches:
start: PositionTypedDict = {
"line": variable.lineno,
"character": variable.col_offset,
}
end: PositionTypedDict = {
"line": variable.lineno,
"character": variable.end_col_offset,
}
variable_range: RangeTypedDict = {"start": start, "end": end}
yield variable_range
def iter_keyword_usage_references_in_doc(
completion_context: ICompletionContext,
doc: IRobotDocument,
normalized_name: str,
keyword_found: Optional[IKeywordFound],
) -> Iterator[Tuple[KeywordUsageInfo, bool, str, str]]:
"""
:param keyword_found: if given, we'll match if the definition actually
maps to the proper place (if not given, we'll just match based on the name
without verifying if the definition is the same).
"""
from robotframework_ls.impl import ast_utils
from robotframework_ls.impl.find_definition import find_definition
from robotframework_ls.impl.text_utilities import normalize_robot_name
from robotframework_ls.impl.text_utilities import matches_name_with_variables
ast = doc.get_ast()
if ast is not None:
has_var_in_name = "{" in normalized_name
# Dict with normalized name -> whether it was found or not previously.
found_in_this_doc: Dict[str, bool] = {}
# Ok, we have the document, now, load the usages.
for keyword_usage_info in ast_utils.iter_keyword_usage_tokens(
ast, collect_args_as_keywords=True
):
completion_context.check_cancelled()
keword_name_possibly_dotted = keyword_usage_info.name
found_dot_in_usage = "." in keword_name_possibly_dotted
if found_dot_in_usage:
keword_name_not_dotted = keword_name_possibly_dotted.split(".")[-1]
else:
keword_name_not_dotted = keword_name_possibly_dotted
keword_name_not_dotted_normalized = normalize_robot_name(
keword_name_not_dotted
)
if keword_name_not_dotted_normalized == normalized_name or (
has_var_in_name
and matches_name_with_variables(
keword_name_not_dotted_normalized, normalized_name
)
):
found_once_in_this_doc = found_in_this_doc.get(
keword_name_possibly_dotted
)
token = keyword_usage_info.token
line = token.lineno - 1
if keyword_found is not None:
if found_once_in_this_doc is None:
# Verify if it's actually the same one (not one defined in
# a different place with the same name).
new_ctx = completion_context.create_copy_doc_line_col(
doc, line, token.col_offset
)
definitions = find_definition(new_ctx)
for definition in definitions:
found = matches_source(
definition.source, keyword_found.source
)
if found:
found_once_in_this_doc = found_in_this_doc[
keword_name_possibly_dotted
] = True
break
else:
found_once_in_this_doc = found_in_this_doc[
keword_name_possibly_dotted
] = False
continue
if not found_once_in_this_doc:
continue
yield (
keyword_usage_info,
found_dot_in_usage,
keword_name_possibly_dotted,
keword_name_not_dotted,
)
def iter_keyword_references_in_doc(
completion_context: ICompletionContext,
doc: IRobotDocument,
normalized_name: str,
keyword_found: Optional[IKeywordFound],
) -> Iterator[RangeTypedDict]:
for (
keyword_usage_info,
found_dot_in_usage,
keword_name_possibly_dotted,
keword_name_not_dotted,
) in iter_keyword_usage_references_in_doc(
completion_context, doc, normalized_name, keyword_found
):
token = keyword_usage_info.token
line = token.lineno - 1
token = keyword_usage_info.token
if found_dot_in_usage:
# We need to create a new token because we just want to match the name part.
col_offset = token.col_offset + (
len(keword_name_possibly_dotted) - len(keword_name_not_dotted)
)
end_col_offset = token.end_col_offset
else:
col_offset = token.col_offset
end_col_offset = token.end_col_offset
# Ok, we found it, let's add it to the result.
yield {
"start": {
"line": line,
"character": col_offset,
},
"end": {
"line": line,
"character": end_col_offset,
},
}
def collect_variable_references(
completion_context: ICompletionContext, var_token_info: VarTokenInfo
):
from robotframework_ls.impl.find_definition import find_variable_definition
var_definitions = find_variable_definition(completion_context, var_token_info)
if not var_definitions:
return []
variable_found_lst: List[IVariableFound] = []
for var_definition in var_definitions:
as_variable_definition = cast_to_variable_definition(var_definition)
if as_variable_definition:
v = as_variable_definition.variable_found
variable_found_lst.append(v)
if not variable_found_lst:
return []
for v in variable_found_lst:
if not v.is_local_variable:
# I.e.: prefer globals (in which case we'll also collect shadowed
# assigns in local scopes).
variable_found = v
break
else:
variable_found = next(iter(variable_found_lst))
return _references_for_variable_found(completion_context, variable_found)
def references(
completion_context: ICompletionContext, include_declaration: bool
) -> List[LocationTypedDict]:
var_token_info = completion_context.get_current_variable()
if var_token_info is not None:
return collect_variable_references(completion_context, var_token_info)
token_info = completion_context.get_current_token()
if token_info is None:
return []
keyword_found: IKeywordFound
if token_info.token.type == token_info.token.KEYWORD_NAME:
if isinstance_name(token_info.node, "KeywordName"):
from robotframework_ls.impl.find_definition import find_keyword_definition
definitions = find_keyword_definition(completion_context, token_info)
if definitions:
for definition in definitions:
as_keyword_definition = cast_to_keyword_definition(definition)
if as_keyword_definition:
keyword_found = as_keyword_definition.keyword_found
return references_for_keyword_found(
completion_context, keyword_found, include_declaration
)
current_keyword_definition_and_usage_info = (
completion_context.get_current_keyword_definition_and_usage_info()
)
if current_keyword_definition_and_usage_info is not None:
completion_context.monitor.check_cancelled()
keyword_definition, _usage_info = current_keyword_definition_and_usage_info
keyword_found = keyword_definition.keyword_found
return references_for_keyword_found(
completion_context, keyword_found, include_declaration
)
return []
class _NamedArgumentVarReferencesComputer:
"""
A helper to handle the case where we also need to rename named arguments.
To do this we need to:
1. Get references to the keyword
2. Check if any of its arguments has something as 'var_name=xxx'.
3. Create the reference to the 'var_name'.
"""
def __init__(
self,
initial_completion_context: ICompletionContext,
variable_found: IVariableFound,
):
from robotframework_ls.impl.ast_utils import get_local_variable_stack_and_node
from robotframework_ls.impl.find_definition import find_keyword_definition
from robotframework_ls.impl.text_utilities import normalize_robot_name
self.check_keyword_usage_keyword_found = None
self.check_keyword_usage_normalized_name = None
self.var_name_normalized = normalize_robot_name(variable_found.variable_name)
if (
variable_found.variable_kind == VariableKind.ARGUMENT
and variable_found.stack
):
_, keyword_or_test_case_node = get_local_variable_stack_and_node(
variable_found.stack
)
if keyword_or_test_case_node.__class__.__name__ == "Keyword":
cp = initial_completion_context.create_copy_with_selection(
keyword_or_test_case_node.lineno - 1,
keyword_or_test_case_node.col_offset,
)
cp_token_info = cp.get_current_token()
if cp_token_info:
found = find_keyword_definition(
cp,
cp_token_info,
)
for keyword_found_definition in found or ():
self.check_keyword_usage_keyword_found = (
keyword_found_definition.keyword_found
)
self.check_keyword_usage_normalized_name = normalize_robot_name(
keyword_found_definition.keyword_name
)
break
def add_references_to_named_keyword_arguments_from_doc(
self,
new_completion_context: ICompletionContext,
ret: "_PreventDuplicatesInList",
):
from robotframework_ls.impl.variable_resolve import find_split_index
from robotframework_ls.impl.text_utilities import normalize_robot_name
if not self.check_keyword_usage_normalized_name:
return
for (
keyword_usage_info,
_found_dot_in_usage,
_keword_name_possibly_dotted,
_keword_name_not_dotted,
) in iter_keyword_usage_references_in_doc(
new_completion_context,
new_completion_context.doc,
self.check_keyword_usage_normalized_name,
self.check_keyword_usage_keyword_found,
):
for token in keyword_usage_info.node.tokens:
if token.type == token.ARGUMENT:
split_eq = find_split_index(token.value)
if split_eq > 0:
arg_name = normalize_robot_name(token.value[:split_eq])
if arg_name == self.var_name_normalized:
start: PositionTypedDict = {
"line": token.lineno - 1,
"character": token.col_offset,
}
end: PositionTypedDict = {
"line": token.lineno - 1,
"character": token.col_offset + split_eq,
}
ref_range: RangeTypedDict = {"start": start, "end": end}
ret.append(
{
"uri": new_completion_context.doc.uri,
"range": ref_range,
}
)
class _PreventDuplicatesInList:
def __init__(self):
self.lst: List[LocationTypedDict] = []
self._found = set()
def append(self, location: LocationTypedDict):
key = (
location["uri"],
location["range"]["start"]["line"],
location["range"]["start"]["character"],
location["range"]["end"]["line"],
location["range"]["end"]["character"],
)
if key in self._found:
return
self._found.add(key)
self.lst.append(location)
def _references_for_variable_found(
initial_completion_context: ICompletionContext,
variable_found: IVariableFound,
):
from robotframework_ls.impl.text_utilities import normalize_robot_name
ret = _PreventDuplicatesInList()
is_local_variable = variable_found.is_local_variable
from robotframework_ls.impl.workspace_symbols import iter_symbols_caches
named_argument_var_references_computer = _NamedArgumentVarReferencesComputer(
initial_completion_context, variable_found
)
# Initial doc (need to get local scope).
ref_range: RangeTypedDict
for ref_range in iter_variable_references_in_doc(
initial_completion_context,
variable_found,
named_argument_var_references_computer,
):
ret.append({"uri": initial_completion_context.doc.uri, "range": ref_range})
if (
is_local_variable
and not named_argument_var_references_computer.check_keyword_usage_keyword_found
):
return ret.lst
normalized_variable_name = normalize_robot_name(variable_found.variable_name)
for symbols_cache in iter_symbols_caches(
None,
initial_completion_context,
force_all_docs_in_workspace=True,
timeout=999999,
):
initial_completion_context.check_cancelled()
# If it's a local variable we may still need to search for named arguments...
if (
is_local_variable
and named_argument_var_references_computer.check_keyword_usage_normalized_name
):
if not symbols_cache.has_keyword_usage(
named_argument_var_references_computer.check_keyword_usage_normalized_name
):
continue
elif not is_local_variable:
if not symbols_cache.has_global_variable_definition(
normalized_variable_name
) and not symbols_cache.has_variable_reference(normalized_variable_name):
continue
doc: Optional[IRobotDocument] = symbols_cache.get_doc()
if doc is None:
uri = symbols_cache.get_uri()
if uri is None:
continue
doc = typing.cast(
Optional[IRobotDocument],
initial_completion_context.workspace.get_document(
doc_uri=uri, accept_from_file=True
),
)
if doc is None:
log.debug(
"Unable to load document for getting references with uri: %s",
uri,
)
continue
if initial_completion_context.doc.uri == doc.uri:
continue # Skip (already analyzed).
new_completion_context = initial_completion_context.create_copy(doc)
if not is_local_variable:
# Collect references to global variables as well as named arguments.
for ref_range in iter_variable_references_in_doc(
new_completion_context,
variable_found,
named_argument_var_references_computer,
):
ret.append({"uri": doc.uri, "range": ref_range})
else:
# We still need to collect references to named arguments.
named_argument_var_references_computer.add_references_to_named_keyword_arguments_from_doc(
new_completion_context, ret
)
return ret.lst
def references_for_keyword_found(
completion_context: ICompletionContext,
keyword_found: IKeywordFound,
include_declaration: bool,
) -> list:
from robocorp_ls_core import uris
from robotframework_ls.impl.text_utilities import normalize_robot_name
ret = _PreventDuplicatesInList()
normalized_name = normalize_robot_name(keyword_found.keyword_name)
# Ok, we have the keyword definition, now, we must actually look for the
# references...
if include_declaration:
ret.append(
{
"uri": uris.from_fs_path(keyword_found.source),
"range": {
"start": {
"line": keyword_found.lineno,
"character": keyword_found.col_offset,
},
"end": {
"line": keyword_found.end_lineno,
"character": keyword_found.end_col_offset,
},
},
}
)
from robotframework_ls.impl.workspace_symbols import iter_symbols_caches
for symbols_cache in iter_symbols_caches(
None, completion_context, force_all_docs_in_workspace=True, timeout=999999
):
completion_context.check_cancelled()
if symbols_cache.has_keyword_usage(normalized_name):
doc: Optional[IRobotDocument] = symbols_cache.get_doc()
if doc is None:
uri = symbols_cache.get_uri()
if uri is None:
continue
doc = typing.cast(
Optional[IRobotDocument],
completion_context.workspace.get_document(
doc_uri=uri, accept_from_file=True
),
)
if doc is None:
log.debug(
"Unable to load document for getting references with uri: %s",
uri,
)
continue
ref_range: RangeTypedDict
cp = completion_context.create_copy(doc)
for ref_range in iter_keyword_references_in_doc(
cp, doc, normalized_name, keyword_found
):
ret.append({"uri": doc.uri, "range": ref_range})
return ret.lst | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/impl/references.py | 0.883638 | 0.267617 | references.py | pypi |
from robotframework_ls.impl.protocols import (
ICompletionContext,
IRobotDocument,
ILibraryDoc,
IKeywordFound,
CompletionType,
)
from robocorp_ls_core.lsp import (
CompletionItemKind,
TextEditTypedDict,
CompletionItemTypedDict,
InsertTextFormat,
)
from typing import Optional, List, Set, Dict, Any
from robotframework_ls.impl.protocols import NodeInfo
import os.path
from robocorp_ls_core import uris
from robocorp_ls_core.protocols import IWorkspace
from robotframework_ls.impl.protocols import ISymbolsCache
from robotframework_ls.impl.robot_constants import ALL_KEYWORD_RELATED_FILE_EXTENSIONS
class _Collector(object):
def __init__(
self,
selection,
token,
import_location_info: "_ImportLocationInfo",
imported_keyword_name_to_keyword: Dict[str, List[IKeywordFound]],
exact_match: bool,
add_import: bool,
prefix_module: bool,
):
from robotframework_ls.impl.string_matcher import RobotStringMatcher
token_str = token.value
self.completion_items: List[CompletionItemTypedDict] = []
self.selection = selection
self.import_location_info = import_location_info
self.token = token
self.imported_keyword_name_to_keyword = imported_keyword_name_to_keyword
self.exact_match = exact_match
self.add_import = add_import
self.prefix_module = prefix_module
self._matcher = RobotStringMatcher(token_str)
def accepts(self, keyword_name: str) -> bool:
if self.exact_match:
if not self._matcher.is_same_robot_name(keyword_name):
return False
else:
if not self._matcher.accepts_keyword_name(keyword_name):
return False
keywords_found: Optional[
List[IKeywordFound]
] = self.imported_keyword_name_to_keyword.get(keyword_name)
if not keywords_found:
return True
return False
def _create_completion_item(
self,
completion_context: ICompletionContext,
keyword_name: str,
selection,
token,
col_delta: int,
memo: Set[str],
lib_import: Optional[str] = None,
resource_path: Optional[str] = None,
data: Optional[Any] = None,
) -> Optional[CompletionItemTypedDict]:
"""
Note: the lib_import and resource_path are the strings to be added
so that the given library/resource is loaded.
i.e.: It's the name concatenated to the `Library {lib_import}` or
`Resource {resource_path}`.
"""
label = f"{keyword_name} ({lib_import or resource_path})"
if label in memo:
return None
memo.add(label)
prefix = ""
detail = ""
if self.add_import:
import_line = -1
if completion_context.type != CompletionType.shell:
if lib_import is not None:
import_line = self.import_location_info.get_library_import_line()
elif resource_path is not None:
import_line = self.import_location_info.get_resource_import_line()
if import_line == -1:
# There's no existing import, so, let's see if we have a *** Settings *** section.
# If we don't we have to create the whole settings, otherwise, we'll add the statement
# as the first thing in the existing *** Settings *** section.
if completion_context.type == CompletionType.shell:
import_line = 0
prefix = "*** Settings ***\n"
elif self.import_location_info.setting_section_node_info is None:
import_line = 0
prefix = "*** Settings ***\n"
else:
import_line = (
self.import_location_info.setting_section_node_info.node.end_lineno
- 1
)
text = keyword_name
if keyword_name in self.imported_keyword_name_to_keyword or self.prefix_module:
check = lib_import or resource_path
if check:
basename = os.path.basename(check)
if basename.endswith(ALL_KEYWORD_RELATED_FILE_EXTENSIONS):
basename = os.path.splitext(basename)[0]
text = f"{basename}.{keyword_name}"
text_edit: TextEditTypedDict = {
"range": {
"start": {
"line": selection.line,
"character": token.col_offset + col_delta,
},
"end": {"line": selection.line, "character": token.end_col_offset},
},
"newText": text,
}
additional_text_edits: Optional[List[TextEditTypedDict]] = None
if not self.add_import:
if lib_import is not None:
detail = "* Requires Library Import"
elif resource_path is not None:
detail = "* Requires Resource Import"
else:
additional_text_edits = []
if lib_import is not None:
additional_text_edits.append(
{
"range": {
"start": {"line": import_line, "character": 0},
"end": {"line": import_line, "character": 0},
},
"newText": f"{prefix}Library {lib_import}\n",
}
)
detail = "* Adds Library Import"
elif resource_path is not None:
additional_text_edits.append(
{
"range": {
"start": {"line": import_line, "character": 0},
"end": {"line": import_line, "character": 0},
},
"newText": f"{prefix}Resource {resource_path}\n",
}
)
detail = "* Adds Resource Import"
completion_item: CompletionItemTypedDict = {
"label": f"{label}*",
"detail": detail,
"kind": CompletionItemKind.Reference,
"textEdit": text_edit,
"insertText": text_edit["newText"],
"insertTextFormat": InsertTextFormat.Snippet,
"additionalTextEdits": additional_text_edits,
"data": data,
}
self.completion_items.append(completion_item)
return completion_item
def _collect_auto_import_completions(
completion_context: ICompletionContext,
collector: _Collector,
collect_deprecated: bool = False,
):
from robotframework_ls.impl.workspace_symbols import iter_symbols_caches
from robotframework_ls.robot_config import create_convert_keyword_format_func
from robotframework_ls import robot_config
from robotframework_ls.impl.text_utilities import has_deprecated_text
symbols_cache: ISymbolsCache
selection = completion_context.sel
token = collector.token
ws: IWorkspace = completion_context.workspace
folder_paths = []
for folder in ws.iter_folders():
folder_paths.append(uris.to_fs_path(folder.uri))
curr_doc_path = os.path.dirname(uris.to_fs_path(completion_context.doc.uri))
memo: Set[str] = set()
default_convert_keyword_format = create_convert_keyword_format_func(
completion_context.config
)
noop = lambda x: x
deprecated_name_to_replacement = (
robot_config.get_robot_libraries_deprecated_name_to_replacement(
completion_context.config
)
)
for symbols_cache in iter_symbols_caches(
None, completion_context, show_builtins=False
):
library_info: Optional[ILibraryDoc] = symbols_cache.get_library_info()
doc: Optional[IRobotDocument] = symbols_cache.get_doc()
lib_import = None
resource_path = None
if library_info is not None:
if not collect_deprecated and (
library_info.name in deprecated_name_to_replacement
or has_deprecated_text(library_info.doc)
):
continue
if library_info.source:
if (
library_info.source
in collector.import_location_info.imported_libraries
):
continue
elif library_info.name in collector.import_location_info.imported_libraries:
continue
if library_info.source:
for folder_path in folder_paths:
# If the library is found to be in the workspace, use a relative
# path, otherwise use the library name (in which case it's expected
# to be in the pythonpath).
if library_info.source.startswith(folder_path):
try:
lib_import = os.path.relpath(
library_info.source, curr_doc_path
).replace("\\", "/")
break
except:
pass
else:
lib_import = library_info.name
else:
lib_import = library_info.name
convert_keyword_format = default_convert_keyword_format
elif doc is not None:
resource_path = doc.path
try:
resource_path = os.path.relpath(resource_path, curr_doc_path).replace(
"\\", "/"
)
except:
pass
convert_keyword_format = noop
for keyword_info in symbols_cache.iter_keyword_info():
if collector.accepts(keyword_info.name):
item = collector._create_completion_item(
completion_context,
convert_keyword_format(keyword_info.name),
selection,
token,
0,
memo,
lib_import=lib_import,
resource_path=resource_path,
data=None,
)
if item is not None:
completion_context.assign_documentation_resolve(
item, keyword_info.get_documentation
)
class _ImportLocationInfo:
def __init__(self):
self.library_node_info: Optional[NodeInfo] = None
self.resource_node_info: Optional[NodeInfo] = None
self.setting_section_node_info: Optional[NodeInfo] = None
self.imported_libraries: Set[str] = set()
self.imported_resources: Set[str] = set()
def get_library_import_line(self) -> int:
if self.library_node_info is not None:
return self.library_node_info.node.end_lineno
return -1
def get_resource_import_line(self) -> int:
if self.resource_node_info is not None:
return self.resource_node_info.node.end_lineno
return -1
def _obtain_import_location_info(completion_context) -> _ImportLocationInfo:
from robotframework_ls.impl import ast_utils
from robotframework_ls.impl.libspec_manager import LibspecManager
from robot.api import Token
import_location_info = _ImportLocationInfo()
# Ok, we have something, let's discover where we want to add the
# 'Library' or 'Resource'.
ast = completion_context.get_ast()
libspec_manager: LibspecManager = completion_context.workspace.libspec_manager
for node_info in ast_utils.iter_nodes(
ast,
accept_class=ast_utils.LIBRARY_IMPORT_CLASSES
+ ast_utils.RESOURCE_IMPORT_CLASSES
+ ast_utils.SETTING_SECTION_CLASSES,
):
if ast_utils.is_library_node_info(node_info):
import_location_info.library_node_info = node_info
library_name_token = node_info.node.get_token(Token.NAME)
if library_name_token is not None:
library_doc_or_error = libspec_manager.get_library_doc_or_error(
completion_context.token_value_resolving_variables(
library_name_token
),
create=True,
completion_context=completion_context,
args=ast_utils.get_library_arguments_serialized(node_info.node),
)
library_doc = library_doc_or_error.library_doc
if library_doc is not None:
if library_doc.source:
import_location_info.imported_libraries.add(library_doc.source)
else:
import_location_info.imported_libraries.add(library_doc.name)
elif ast_utils.is_resource_node_info(node_info) and node_info.node.name:
import_location_info.resource_node_info = node_info
import_location_info.imported_resources.add(node_info.node.name)
elif ast_utils.is_setting_section_node_info(node_info):
import_location_info.setting_section_node_info = node_info
return import_location_info
def complete(
completion_context: ICompletionContext,
imported_keyword_name_to_keyword: Dict[str, List[IKeywordFound]],
use_for_quick_fix=False,
exact_match=False,
) -> List[CompletionItemTypedDict]:
from robotframework_ls.impl import ast_utils
from robotframework_ls.impl.robot_generated_lsp_constants import (
OPTION_ROBOT_COMPLETIONS_KEYWORDS_NOT_IMPORTED_ENABLE,
OPTION_ROBOT_COMPLETIONS_KEYWORDS_NOT_IMPORTED_ADD_IMPORT,
OPTION_ROBOT_COMPLETIONS_KEYWORDS_PREFIX_IMPORT_NAME,
)
config = completion_context.config
if use_for_quick_fix:
exact_match = True
else:
if config is not None:
if not config.get_setting(
OPTION_ROBOT_COMPLETIONS_KEYWORDS_NOT_IMPORTED_ENABLE, bool, True
):
return []
add_import = True
prefix_module = False
if config is not None:
add_import = config.get_setting(
OPTION_ROBOT_COMPLETIONS_KEYWORDS_NOT_IMPORTED_ADD_IMPORT, bool, True
)
prefix_module = config.get_setting(
OPTION_ROBOT_COMPLETIONS_KEYWORDS_PREFIX_IMPORT_NAME, bool, False
)
if use_for_quick_fix:
add_import = True
token_info = completion_context.get_current_token()
if token_info is not None:
token = ast_utils.get_keyword_name_token(
token_info.stack, token_info.node, token_info.token
)
if token is not None:
import_location_info = _obtain_import_location_info(completion_context)
collector = _Collector(
completion_context.sel,
token,
import_location_info,
imported_keyword_name_to_keyword,
exact_match=exact_match,
add_import=add_import,
prefix_module=prefix_module,
)
_collect_auto_import_completions(
completion_context, collector, collect_deprecated=False
)
return collector.completion_items
return [] | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/impl/auto_import_completions.py | 0.785309 | 0.17883 | auto_import_completions.py | pypi |
from robotframework_ls.impl.text_utilities import (
normalize_robot_name,
matches_name_with_variables,
)
from robotframework_ls.impl.protocols import IKeywordFound
class RobotStringMatcher(object):
def __init__(self, filter_text):
self.filter_text = normalize_robot_name(filter_text)
self._has_variable = None
def accepts(self, word):
if not self.filter_text:
return True
return self.filter_text in normalize_robot_name(word)
def accepts_keyword_name(self, word):
if not self.filter_text:
return True
return self.filter_text in normalize_robot_name(word)
def is_same_robot_name(self, word):
return self.filter_text == normalize_robot_name(word)
def is_keyword_name_match(self, keyword_name):
normalized = normalize_robot_name(keyword_name)
if self.filter_text == normalized:
return True
if "{" in normalized:
return matches_name_with_variables(self.filter_text, normalized)
return False
def is_variable_name_match(self, variable_name):
normalized = normalize_robot_name(variable_name)
if self.filter_text == normalized:
return True
if "{" in normalized:
if matches_name_with_variables(self.filter_text, normalized):
return True
if self._has_variable is None:
from robotframework_ls.impl.variable_resolve import has_variable
self._has_variable = has_variable(self.filter_text)
if self._has_variable:
# We need the other way around if the definition has variables.
if matches_name_with_variables(normalized, self.filter_text):
return True
return False
class MatcherWithResourceOrLibraryName(RobotStringMatcher):
def __init__(self, resource_or_library_name, qualifier):
"""
:param resource_or_library_name str:
The resource or library name to match (i.e.: BuiltIn, my_library).
:param qualifier:
The qualifier of the word to be matched in that library.
"""
RobotStringMatcher.__init__(self, qualifier)
self.resource_or_library_name = resource_or_library_name
self.resource_or_library_name_normalized = normalize_robot_name(
resource_or_library_name
)
def accepts_keyword(self, keyword_found: IKeywordFound):
name = keyword_found.library_alias
if name is None:
name = keyword_found.resource_name or keyword_found.library_name
if normalize_robot_name(name) == self.resource_or_library_name_normalized:
return self.accepts_keyword_name(keyword_found.keyword_name)
return False
def is_keyword_match(self, keyword_found: IKeywordFound):
name = keyword_found.library_alias
if name is None:
name = keyword_found.resource_name or keyword_found.library_name
if normalize_robot_name(name) == self.resource_or_library_name_normalized:
return self.is_keyword_name_match(keyword_found.keyword_name)
return False
def build_matchers_with_resource_or_library_scope(token_str: str):
"""
Given a string such as:
'BuiltIn.Should Contain'
it'll return:
[MatcherWithResourceOrLibraryName('BuiltIn', 'Should Contain')]
"""
from robotframework_ls.impl.text_utilities import iter_dotted_names
resource_matchers = []
for resource_or_library_name, qualifier in iter_dotted_names(token_str):
resource_matchers.append(
MatcherWithResourceOrLibraryName(resource_or_library_name, qualifier)
)
return resource_matchers | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/impl/string_matcher.py | 0.883538 | 0.151655 | string_matcher.py | pypi |
from robocorp_ls_core.lsp import (
HoverTypedDict,
MarkupKind,
SignatureHelp,
SignatureInformation,
MarkupContentTypedDict,
)
from typing import Optional
from robotframework_ls.impl.protocols import ICompletionContext
def hover(completion_context: ICompletionContext) -> Optional[HoverTypedDict]:
from robotframework_ls.impl.find_definition import find_definition_extended
from robotframework_ls.impl import ast_utils
definition_info = find_definition_extended(completion_context)
if definition_info:
for definition in definition_info.definitions:
if hasattr(definition, "keyword_found"):
# If we found a keyword use the signature help.
break
return {
"contents": definition.hover_docs(),
"range": definition_info.origin_selection_range,
}
from robotframework_ls.impl.signature_help import signature_help_internal
sig_help: Optional[SignatureHelp] = signature_help_internal(completion_context)
if sig_help is None:
return None
node = sig_help.node
signatures = sig_help.signatures
if not signatures:
return None
try:
active_signature: SignatureInformation = signatures[sig_help.activeSignature]
except IndexError:
active_signature = signatures[0]
active_parameter = sig_help.activeParameter
optional_documentation_markup: Optional[
MarkupContentTypedDict
] = active_signature.documentation
documentation_markup: MarkupContentTypedDict
if not optional_documentation_markup:
documentation_markup = {"kind": MarkupKind.Markdown, "value": ""}
else:
documentation_markup = optional_documentation_markup
kind = documentation_markup["kind"]
# Now, let's add the signature to the documentation
escape = lambda s: s
if kind == MarkupKind.Markdown:
from robotframework_ls import html_to_markdown
escape = html_to_markdown.escape
add_documentation = True
if kind == MarkupKind.Markdown:
signature_doc = ["**", escape(sig_help.name), "**"]
else:
signature_doc = [sig_help.name]
if kind == MarkupKind.Markdown:
prefix_highlight = "*`"
postfix_highlight = "`*"
else:
prefix_highlight = "`"
postfix_highlight = "`"
if active_signature.parameters:
signature_doc.append("(")
for i, parameter in enumerate(active_signature.parameters):
if i > 0:
signature_doc.append(", ")
escaped_label = escape(parameter.label)
if i == active_parameter:
add_documentation = False
signature_doc.insert(
0,
f"Parameter: {prefix_highlight}{escaped_label}{postfix_highlight} in Keyword Call.\n\n",
)
signature_doc.append(prefix_highlight)
signature_doc.append(escaped_label)
if i == active_parameter:
signature_doc.append(postfix_highlight)
signature_doc.append(")")
if add_documentation:
# When over a parameter, don't add the documentation.
signature_doc.append("\n\n")
signature_doc.append(documentation_markup["value"])
token_info = completion_context.get_current_token()
if token_info and token_info.token:
show_range = ast_utils.create_range_from_token(token_info.token)
else:
show_range = {
"start": {"line": node.lineno - 1, "character": node.col_offset},
"end": {"line": node.end_lineno - 1, "character": node.end_col_offset},
}
return {
"contents": {"kind": kind, "value": "".join(signature_doc)},
"range": show_range,
} | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/impl/hover.py | 0.789356 | 0.225214 | hover.py | pypi |
import sys
from typing import (
TypeVar,
Any,
Optional,
List,
Sequence,
Tuple,
Iterable,
Generic,
Iterator,
Callable,
Hashable,
Dict,
Set,
Union,
)
from robocorp_ls_core.protocols import (
Sentinel,
IMonitor,
IDocument,
IWorkspace,
IConfig,
IDocumentSelection,
ITestInfoFromSymbolsCacheTypedDict,
)
from robocorp_ls_core.constants import NULL
from robocorp_ls_core.protocols import TypedDict
import enum
from robocorp_ls_core.lsp import (
LocationTypedDict,
RangeTypedDict,
MarkupContentTypedDict,
CompletionItemTypedDict,
LSPMessages,
)
import typing
from robocorp_ls_core.ordered_set import OrderedSet
from contextlib import contextmanager
if sys.version_info[:2] < (3, 8):
class Protocol(object):
pass
else:
from typing import Protocol
T = TypeVar("T")
Y = TypeVar("Y", covariant=True)
# We don't want to import robot in this case (just do it when type-checking).
class IRobotToken(Protocol):
SETTING_HEADER: str
VARIABLE_HEADER: str
TESTCASE_HEADER: str
KEYWORD_HEADER: str
COMMENT_HEADER: str
TESTCASE_NAME: str
KEYWORD_NAME: str
DOCUMENTATION: str
SUITE_SETUP: str
SUITE_TEARDOWN: str
METADATA: str
TEST_SETUP: str
TEST_TEARDOWN: str
TEST_TEMPLATE: str
TEST_TIMEOUT: str
FORCE_TAGS: str
DEFAULT_TAGS: str
LIBRARY: str
RESOURCE: str
VARIABLES: str
SETUP: str
TEARDOWN: str
TEMPLATE: str
TIMEOUT: str
TAGS: str
ARGUMENTS: str
# Use ´RETURN_SETTING` type instead of `RETURN`. `[Return]` is deprecated and
# `RETURN` type will be used with `RETURN` statement in the future.
RETURN: str
RETURN_SETTING: str
NAME: str
VARIABLE: str
ARGUMENT: str
ASSIGN: str
KEYWORD: str
WITH_NAME: str
FOR: str
FOR_SEPARATOR: str
END: str
IF: str
INLINE_IF: str
ELSE_IF: str
ELSE: str
TRY: str
EXCEPT: str
FINALLY: str
AS: str
WHILE: str
RETURN_STATEMENT: str
CONTINUE: str
BREAK: str
SEPARATOR: str
COMMENT: str
CONTINUATION: str
EOL: str
EOS: str
ERROR: str
FATAL_ERROR: str
type: str
value: str
lineno: int # 1-based
col_offset: int # 0-based
error: Any
@property
def end_col_offset(self) -> int: # 0-based
pass
def tokenize_variables(self) -> Iterator["IRobotToken"]:
pass
class IRobotVariableMatch(Protocol):
string: str
identifier: str
base: Optional[str]
items: Tuple[str, ...]
start: int
end: int
@property
def name(self) -> str:
pass
@property
def before(self) -> str:
pass
@property
def match(self) -> str:
pass
@property
def after(self) -> str:
pass
class INode(Protocol):
type: str
lineno: int
end_lineno: int
col_offset: int
end_col_offset: int
tokens: List[IRobotToken]
def get_token(self, name: str) -> Optional[IRobotToken]:
pass
def get_tokens(self, name: str) -> List[IRobotToken]:
pass
class ILibraryImportNode(INode, Protocol):
name: str
alias: Optional[str]
args: Optional[Sequence[str]]
class IKeywordNode(INode, Protocol):
name: str
class IResourceImportNode(INode, Protocol):
name: str
class IVariableImportNode(INode, Protocol):
name: str
class NodeInfo(Generic[Y]):
stack: Tuple[INode, ...]
node: Y
__slots__ = ["stack", "node"]
def __init__(self, stack, node):
self.stack = stack
self.node = node
def __str__(self):
return f"NodeInfo({self.node.__class__.__name__})"
__repr__ = __str__
class TokenInfo:
__slots__ = ["stack", "node", "token"]
def __init__(self, stack: Tuple[INode, ...], node: INode, token: IRobotToken):
self.stack = stack
self.node = node
self.token = token
def __str__(self):
return f"TokenInfo({self.token.value} -- in: {self.node.__class__.__name__})"
__repr__ = __str__
class AdditionalVarInfo:
CONTEXT_UNDEFINED = 0
CONTEXT_EXPRESSION = 1
__slots__ = ["var_identifier", "context", "extended_part"]
def __init__(
self,
var_identifier: str = "",
context: int = CONTEXT_UNDEFINED,
extended_part: str = "",
):
"""
:param var_identifier: One of: $,@,%
"""
self.var_identifier = var_identifier
self.context = context
self.extended_part = extended_part
def copy(self, **kwargs):
new_kwargs = {
"var_identifier": self.var_identifier,
"context": self.context,
"extended_part": self.extended_part,
}
new_kwargs.update(kwargs)
return AdditionalVarInfo(**new_kwargs)
def __str__(self):
info = [f"AdditionalVarInfo({self.var_identifier}"]
if self.context:
info.append(f" -- ctx: {self.context}")
if self.extended_part:
info.append(f" -- extended: {self.extended_part}")
info.append(")")
return "".join(info)
__repr__ = __str__
class VarTokenInfo:
__slots__ = ["stack", "node", "token", "var_info"]
def __init__(
self,
stack: Tuple[INode, ...],
node: INode,
token: IRobotToken,
var_info: AdditionalVarInfo,
):
self.stack = stack
self.node = node
self.token = token
self.var_info = var_info
def __str__(self):
return f"VarTokenInfo({self.token.value} (in {self.node.__class__.__name__}) - {self.var_info})"
__repr__ = __str__
class KeywordUsageInfo:
__slots__ = ["stack", "node", "token", "name", "_is_argument_usage", "prefix"]
def __init__(
self,
stack: Tuple[INode, ...],
node: INode,
token: IRobotToken,
name: str,
is_argument_usage: bool = False,
prefix: str = "",
):
self.stack = stack
self.node = node
self.token = token # This is actually the keyword name token.
self.name = name
self._is_argument_usage = is_argument_usage
self.prefix = prefix
def __repr__(self):
if self._is_argument_usage:
return f"KeywordUsageInfo({self.name} - {self.node.__class__.__name__} (argument usage))"
else:
return f"KeywordUsageInfo({self.name} - {self.node.__class__.__name__})"
__str__ = __repr__
class IKeywordArg(Protocol):
@property
def original_arg(self) -> str:
pass
@property
def arg_name(self) -> str:
pass
@property
def is_keyword_arg(self) -> bool:
pass
@property
def is_star_arg(self) -> bool:
pass
def is_arg_type_set(self) -> bool:
pass
@property
def arg_type(self) -> Optional[str]:
pass
def is_default_value_set(self) -> bool:
pass
@property
def default_value(self) -> Optional[str]:
pass
class ILibraryDoc(Protocol):
filename: str
name: str
source: str
symbols_cache: Optional["ISymbolsCache"]
inits: list
doc_format: str
keywords: List["IKeywordDoc"]
doc: str
class ILibraryDocConversions(ILibraryDoc):
"""
Note: these are actually part of the basic library doc but we
put it in a different interface because clients usually shouldn't
use it (it's controlled by the libspec manager).
"""
def convert_docs_to_html(self):
pass
def convert_docs_to_markdown(self):
pass
class IKeywordDoc(Protocol):
name: str
tags: Tuple[str, ...]
lineno: int
doc: str
@property
def args(self) -> Tuple[IKeywordArg, ...]:
pass
@property
def libdoc(self) -> ILibraryDoc:
pass
@property
def deprecated(self) -> bool:
pass
@property
def source(self) -> str:
pass
@property
def doc_format(self) -> str:
pass
def to_dictionary(self) -> dict:
pass
class ILibraryDocOrError(Protocol):
library_doc: Optional[ILibraryDoc]
error: Optional[str]
class IRobotDocument(IDocument, Protocol):
def get_type(self) -> str:
pass
def get_ast(self) -> Any:
pass
def get_python_ast(self) -> Optional[Any]:
pass
def get_yaml_contents(self) -> Optional[Any]:
pass
symbols_cache: Optional["ISymbolsCache"]
class ISymbolsJsonListEntry(TypedDict):
name: str
kind: int # SymbolKind
location: LocationTypedDict
containerName: str
class ISymbolKeywordInfo(Protocol):
name: str
def get_documentation(self) -> MarkupContentTypedDict:
"""
Note: It should be computed on demand (and can be slow).
"""
class ISymbolsCache(Protocol):
def get_uri(self) -> Optional[str]:
"""
If we're referencing a library (and have the symbols from a libspec),
the uri may be None.
"""
def has_keyword_usage(self, normalized_keyword_name: str) -> bool:
pass
def has_global_variable_definition(self, normalized_variable_name: str) -> bool:
pass
def has_variable_reference(self, normalized_variable_name: str) -> bool:
pass
def get_json_list(self) -> List[ISymbolsJsonListEntry]:
pass
def get_library_info(self) -> Optional[ILibraryDoc]:
pass
def get_doc(self) -> Optional[IRobotDocument]:
pass
def get_test_info(self) -> Optional[List[ITestInfoFromSymbolsCacheTypedDict]]:
pass
def iter_keyword_info(self) -> Iterator[ISymbolKeywordInfo]:
pass
class ICompletionContextWorkspaceCaches(Protocol):
cache_hits: int
def on_file_changed(self, filename: str):
pass
def on_updated_document(self, uri: str, document: Optional[IRobotDocument]):
pass
def clear_caches(self):
pass
def dispose(self):
pass
def get_cached_dependency_graph(
self, cache_key: Hashable
) -> Optional["ICompletionContextDependencyGraph"]:
pass
@contextmanager
def invalidation_tracker(self):
"""
Note that it's possible that changes happen in-flight. This means that
we must track changes while the dependency graph is being calculated.
So, one must do something as:
with caches.invalidation_tracker() as invalidation_tracker:
... compute dependency graph
caches.cache_dependency_graph(cache_key, dependency_graph, invalidation_tracker)
"""
def cache_dependency_graph(
self,
cache_key: Hashable,
dependency_graph: "ICompletionContextDependencyGraph",
invalidation_tracker,
) -> None:
pass
class IRobotWorkspace(IWorkspace, Protocol):
completion_context_workspace_caches: ICompletionContextWorkspaceCaches
libspec_manager: Any
def iter_all_doc_uris_in_workspace(
self, extensions: Tuple[str, ...]
) -> Iterable[str]:
pass
class IKeywordFound(Protocol):
"""
:ivar completion_context:
This may be a new completion context, created when a new document is
being analyzed (the keyword was created for that completion context).
For libraries the initial completion context is passed.
:ivar source:
Source where the keyword was found.
:ivar lineno:
Line where it was found (0-based).
"""
@property
def keyword_name(self) -> str:
pass
@property
def keyword_ast(self) -> Optional[INode]:
"""
Only available when we do have a keyword AST (i.e.: not for library
keywords).
"""
@property
def keyword_args(self) -> Sequence[IKeywordArg]:
pass
def is_deprecated(self) -> bool:
pass
def compute_docs_with_signature(self) -> MarkupContentTypedDict:
pass
def compute_docs_without_signature(self) -> MarkupContentTypedDict:
pass
completion_context: Optional["ICompletionContext"]
completion_item_kind: int = -1
@property
def source(self) -> str:
"""
Provides the filesystem location where the keyword was found.
"""
@property
def lineno(self) -> int:
pass
@property
def end_lineno(self) -> int:
pass
@property
def col_offset(self) -> int:
pass
@property
def end_col_offset(self) -> int:
pass
@property
def library_name(self) -> Optional[str]:
# If it's a library, this is the name of the library.
pass
@property
def resource_name(self) -> Optional[str]:
# If it's a resource, this is the basename of the resource without the extension.
pass
@property
def library_alias(self) -> Optional[str]:
pass
# These are added if possible if there's some range to include the
# full scope of the keyword. It should always include
# the lineno/end_lineno range (so, it's a superset).
@property
def scope_lineno(self) -> Optional[int]:
pass
@property
def scope_end_lineno(self) -> Optional[int]:
pass
@property
def scope_col_offset(self) -> Optional[int]:
pass
@property
def scope_end_col_offset(self) -> Optional[int]:
pass
class IKeywordCollector(Protocol):
def accepts(self, keyword_name: str) -> bool:
"""
:param keyword_name:
The name of the keyword to be accepted or not.
:return bool:
If the return is True, on_keyword(...) is called (otherwise it's not
called).
"""
def on_keyword(self, keyword_found: IKeywordFound):
"""
:param IKeywordFound keyword_found:
"""
def on_resolved_library(
self,
completion_context: "ICompletionContext",
library_node: Optional[INode],
library_doc: "ILibraryDoc",
):
pass
def on_unresolved_library(
self,
completion_context: "ICompletionContext",
library_name: str,
lineno: int,
end_lineno: int,
col_offset: int,
end_col_offset: int,
error_msg: Optional[str],
resolved_name: str,
):
pass
def on_unresolved_resource(
self,
completion_context: "ICompletionContext",
resource_name: str,
lineno: int,
end_lineno: int,
col_offset: int,
end_col_offset: int,
error_msg: Optional[str],
resolved_name: str,
):
pass
class AbstractKeywordCollector:
def on_resolved_library(
self,
completion_context: "ICompletionContext",
library_node,
library_doc: ILibraryDoc,
):
pass
def on_unresolved_library(
self,
completion_context: "ICompletionContext",
library_name: str,
lineno: int,
end_lineno: int,
col_offset: int,
end_col_offset: int,
error_msg: Optional[str],
resolved_name: str,
):
pass
def on_unresolved_resource(
self,
completion_context: "ICompletionContext",
resource_name: str,
lineno: int,
end_lineno: int,
col_offset: int,
end_col_offset: int,
error_msg: Optional[str],
resolved_name: str,
):
pass
class IDefinition(Protocol):
keyword_name: str = "" # Can be empty if it's not found as a keyword.
# Note: Could be None (i.e.: we found it in a library spec file which doesn't have the source).
source: str = ""
# Note: if we found it in a library spec file which doesn't have the lineno, it should be 0
lineno: int = 0
# Note: if we found it in a library spec file which doesn't have the lineno, it should be 0
end_lineno: int = 0
col_offset: int = 0
end_col_offset: int = 0
# These are added if possible if there's some range to include the
# full scope (of a keyword, test, etc). It should always include
# the lineno/end_lineno range (so, it's a superset).
scope_lineno: Optional[int] = None
scope_end_lineno: Optional[int] = None
scope_col_offset: Optional[int] = None
scope_end_col_offset: Optional[int] = None
def hover_docs(self) -> MarkupContentTypedDict:
pass
class IKeywordDefinition(IDefinition, Protocol):
keyword_found: IKeywordFound
class IVariableDefinition(IDefinition, Protocol):
variable_found: "IVariableFound"
def cast_to_keyword_definition(definition: IDefinition) -> Optional[IKeywordDefinition]:
if hasattr(definition, "keyword_found"):
return typing.cast(IKeywordDefinition, definition)
return None
def cast_to_variable_definition(
definition: IDefinition,
) -> Optional[IVariableDefinition]:
if hasattr(definition, "variable_found"):
return typing.cast(IVariableDefinition, definition)
return None
class IBaseCompletionContext(Protocol):
@property
def monitor(self) -> Optional[IMonitor]:
pass
@property
def workspace(self) -> IRobotWorkspace:
pass
@property
def config(self) -> Optional[IConfig]:
pass
def check_cancelled(self) -> None:
pass
class CompletionType(enum.Enum):
regular = 1
shell = 2
class LibraryDependencyInfo:
def __init__(
self,
name: str,
alias: Optional[str],
builtin: bool,
args: Optional[str],
node: Optional[ILibraryImportNode],
):
"""
:param builtin:
Note that builtin should only be set == True if it's actually known
that it's a builtin, otherwise it should be set to False (in which
case it's computed internally if it's builtin or not).
"""
self.name = name
self.alias = alias
self.builtin = builtin
self.args = args
self.node = node
def to_dict(self):
ret = {
"name": self.name,
}
if self.alias:
ret["alias"] = self.alias
if self.builtin:
ret["builtin"] = self.builtin
if self.args:
ret["args"] = self.args
return ret
class ISymbolsCacheReverseIndex(Protocol):
def get_global_variable_uri_definitions(
self, normalized_var_name: str
) -> Optional[Set[str]]:
pass
def has_global_variable(self, normalized_var_name: str) -> bool:
pass
class ICompletionContextDependencyGraph(Protocol):
def add_library_infos(
self,
doc_uri: str,
library_infos: OrderedSet[LibraryDependencyInfo],
):
pass
def add_resource_infos(
self,
doc_uri: str,
resource_imports_as_docs: Sequence[
Tuple[IResourceImportNode, Optional[IRobotDocument]]
],
):
pass
def add_variable_infos(
self,
doc_uri: str,
new_variable_imports: Sequence[
Tuple[IVariableImportNode, Optional[IRobotDocument]]
],
):
pass
def get_root_doc(self) -> IRobotDocument:
pass
def iter_libraries(self, doc_uri: str) -> Iterator[LibraryDependencyInfo]:
"""
Provides an iterator(doc_uri, library_dependency_infos)
"""
def iter_all_libraries(self) -> Iterator[LibraryDependencyInfo]:
pass
def iter_resource_imports_with_docs(
self, doc_uri: str
) -> Iterator[Tuple[IResourceImportNode, Optional[IRobotDocument]]]:
pass
def iter_all_resource_imports_with_docs(
self,
) -> Iterator[Tuple[IResourceImportNode, Optional[IRobotDocument]]]:
pass
def iter_variable_imports_as_docs(
self, doc_uri: str
) -> Iterator[Tuple[IVariableImportNode, Optional[IRobotDocument]]]:
pass
def iter_all_variable_imports_as_docs(
self,
) -> Iterator[Tuple[IVariableImportNode, Optional[IRobotDocument]]]:
pass
def to_dict(self) -> dict:
pass
def do_invalidate_on_uri_change(self, uri: str) -> bool:
pass
class IVariablesFromArgumentsFileLoader(Protocol):
def get_variables(self) -> Tuple["IVariableFound", ...]:
pass
class ILocalizationInfo(Protocol):
def __init__(self, language_codes: Union[Tuple[str, ...], str]):
pass
@property
def language_codes(self) -> Tuple[str, ...]:
pass
def iter_bdd_prefixes_on_read(self) -> Iterator[str]:
"""
Note that we specify the reason for iterating because for instance, when
writing code we could want just the completions for the specified
language in the file and while reading (i.e.: analyzing) we'd want it
for all languages.
"""
def iter_languages_on_write(
self,
) -> Iterator[Any]: # Actually Iterator[robot.api.Language]
"""
Provides the languages used when writing a doc (i.e.: completions, ...).
"""
class ICompletionContext(Protocol):
tracing: bool
def __init__(
self,
doc,
line=Sentinel.SENTINEL,
col=Sentinel.SENTINEL,
workspace=None,
config=None,
memo=None,
monitor: IMonitor = NULL,
variables_from_arguments_files_loader: Sequence[
IVariablesFromArgumentsFileLoader
] = (),
) -> None:
pass
def resolve_completion_item(
self, data, completion_item: CompletionItemTypedDict, monaco: bool = False
) -> None:
pass
@property
def lsp_messages(
self,
) -> Optional[LSPMessages]:
pass
@property
def variables_from_arguments_files_loader(
self,
) -> Sequence[IVariablesFromArgumentsFileLoader]:
pass
@property
def type(self) -> CompletionType:
pass
@property
def monitor(self) -> IMonitor:
pass
def check_cancelled(self):
pass
def create_copy_with_selection(self, line: int, col: int) -> "ICompletionContext":
pass
def create_copy(self, doc: IRobotDocument) -> "ICompletionContext":
pass
def create_copy_with_config(self, config: IConfig) -> "ICompletionContext":
pass
def create_copy_doc_line_col(
self, doc: IRobotDocument, line: int, col: int
) -> "ICompletionContext":
pass
@property
def original_doc(self) -> IRobotDocument:
pass
@property
def original_sel(self) -> Any:
pass
@property
def doc(self) -> IRobotDocument:
pass
@property
def sel(self) -> IDocumentSelection:
pass
@property
def memo(self) -> Any:
pass
@property
def config(self) -> Optional[IConfig]:
pass
@property
def workspace(self) -> IRobotWorkspace:
pass
def get_type(self) -> Any:
pass
def get_ast(self) -> Any:
pass
def get_ast_current_section(self) -> Optional[INode]:
"""
:rtype: robot.parsing.model.blocks.Section|NoneType
"""
def get_current_section_name(self) -> Optional[str]:
pass
def get_current_token(self) -> Optional[TokenInfo]:
pass
def get_all_variables(self) -> Tuple[NodeInfo, ...]:
pass
def get_doc_normalized_var_name_to_var_found(self) -> Dict[str, "IVariableFound"]:
pass
def get_settings_normalized_var_name_to_var_found(
self,
) -> Dict[str, "IVariableFound"]:
pass
def get_builtins_normalized_var_name_to_var_found(
self, resolved
) -> Dict[str, "IVariableFound"]:
pass
def get_arguments_files_normalized_var_name_to_var_found(
self,
) -> Dict[str, "IVariableFound"]:
pass
def get_current_variable(self, section=None) -> Optional[VarTokenInfo]:
"""
Provides the current variable token. Note that it won't include '{' nor '}'.
"""
def get_resource_import_as_doc(
self, resource_import: INode, check_as_module: bool = False
) -> Optional[IRobotDocument]:
pass
def get_variable_imports(self) -> Tuple[INode, ...]:
pass
def get_variable_import_as_doc(self, variables_import) -> Optional[IRobotDocument]:
pass
def get_current_keyword_definition(self) -> Optional[IKeywordDefinition]:
pass
def get_resource_imports(
self,
) -> Tuple[IResourceImportNode, ...]:
pass
def get_resource_imports_as_docs(
self,
) -> Tuple[Tuple[IResourceImportNode, Optional[IRobotDocument]], ...]:
pass
def get_resource_inits_as_docs(self) -> Tuple[IRobotDocument, ...]:
pass
def get_variable_imports_as_docs(
self,
) -> Tuple[Tuple[IVariableImportNode, Optional[IRobotDocument]], ...]:
pass
def get_imported_libraries(self) -> Tuple[ILibraryImportNode, ...]:
pass
def token_value_resolving_variables(self, token: IRobotToken) -> str:
pass
def token_value_and_unresolved_resolving_variables(
self, token: IRobotToken
) -> Tuple[str, Tuple[Tuple[IRobotToken, str], ...]]:
pass
def get_current_keyword_definition_and_usage_info(
self,
) -> Optional[Tuple[IKeywordDefinition, KeywordUsageInfo]]:
pass
def get_current_keyword_usage_info(
self,
) -> Optional[KeywordUsageInfo]:
pass
def assign_documentation_resolve(
self,
completion_item: CompletionItemTypedDict,
compute_documentation: Callable[[], MarkupContentTypedDict],
) -> None:
pass
def collect_dependency_graph(self) -> ICompletionContextDependencyGraph:
pass
def iter_dependency_and_init_resource_docs(
self, dependency_graph
) -> Iterator[IRobotDocument]:
pass
def obtain_symbols_cache_reverse_index(self) -> Optional[ISymbolsCacheReverseIndex]:
pass
def get_ast_localization_info(self) -> ILocalizationInfo:
pass
class VariableKind:
VARIABLE = "Variable"
BUILTIN = "Builtin Variable"
ARGUMENT = "Argument"
ENV_VARIABLE = "Environment Variable"
SETTINGS = "Variable (settings)"
PYTHON = "Variable (python)"
YAML = "Variable (yaml)"
ARGUMENTS_FILE = "Arguments file"
LOCAL_ASSIGN_VARIABLE = "Variable (local assign)"
LOCAL_SET_VARIABLE = "Variable (local set)"
TASK_SET_VARIABLE = "Variable (task set)"
TEST_SET_VARIABLE = "Variable (test set)"
SUITE_SET_VARIABLE = "Variable (suite set)"
GLOBAL_SET_VARIABLE = "Variable (global)"
ENV_SET_VARIABLE = "Variable (environment)"
LOCAL_ASSIGNS_VARIABLE_KIND = {
VariableKind.ARGUMENT,
VariableKind.LOCAL_ASSIGN_VARIABLE,
VariableKind.LOCAL_SET_VARIABLE,
}
class IVariableFound(Protocol):
"""
:ivar variable_name:
This is the value that we should use when completing.
It's the name of the variable without `${}` chars.
:ivar variable_value:
The value of the variable -- in general used to show information
regarding that variable to the user.
:ivar completion_context:
This may be a new completion context, created when a new document is
being analyzed (the variable was created for that completion context).
:ivar source:
Source where the variable was found.
:ivar lineno:
Line where it was found (0-based).
:ivar stack:
The stack where the variable was found (only available if it was
found in a robot file where the ast is available -- i.e.: settings,
yaml, python, etc. variables don't have a stack available).
"""
variable_name: str = ""
variable_value: str = ""
variable_kind: str = VariableKind.VARIABLE
completion_context: Optional[ICompletionContext] = None
stack: Optional[Tuple[INode, ...]] = None
@property
def is_local_variable(self) -> bool:
pass
@property
def source(self) -> str:
pass
# Note: line/offsets 0-based.
@property
def lineno(self) -> int:
pass
@property
def end_lineno(self) -> int:
pass
@property
def col_offset(self) -> int:
pass
@property
def end_col_offset(self) -> int:
pass
class IVariablesCollector(Protocol):
def accepts(self, variable_name: str) -> bool:
"""
:param variable_name:
The name of the variable (i.e.: ${some_var}).
"""
def on_variable(self, variable_found: IVariableFound):
pass
def on_unresolved_variable_import(
self,
completion_context: "ICompletionContext",
variable_import_name: str,
lineno: int,
end_lineno: int,
col_offset: int,
end_col_offset: int,
error_msg: Optional[str],
resolved_name: str,
):
pass
def on_env_variable(self, variable_found: IVariableFound):
"""
Called for environment variables using the
'Set Keyword Variable' keyword.
Note: doesn't call `accepts` first.
"""
class AbstractVariablesCollector:
def on_env_variable(self, variable_found: IVariableFound):
pass
def on_unresolved_variable_import(
self,
completion_context: "ICompletionContext",
variable_import_name: str,
lineno: int,
end_lineno: int,
col_offset: int,
end_col_offset: int,
error_msg: Optional[str],
resolved_name: str,
):
pass
class IOnDependencyChanged(Protocol):
def __call__(self, uri: str):
pass | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/impl/protocols.py | 0.611962 | 0.174077 | protocols.py | pypi |
import os.path
from robocorp_ls_core.robotframework_log import get_logger
from typing import Optional, List, Tuple
from robotframework_ls.impl.protocols import ICompletionContext
from robocorp_ls_core.lsp import CompletionItemTypedDict
from robocorp_ls_core.basic import normalize_filename
from robotframework_ls.impl.robot_constants import (
ROBOT_AND_TXT_FILE_EXTENSIONS,
LIBRARY_FILE_EXTENSIONS,
VARIABLE_FILE_EXTENSIONS,
)
log = get_logger(__name__)
def _create_completion_item(
library_name, selection, token, start_col_offset=None
) -> CompletionItemTypedDict:
from robocorp_ls_core.lsp import (
CompletionItem,
InsertTextFormat,
Position,
Range,
TextEdit,
)
from robocorp_ls_core.lsp import CompletionItemKind
text_edit = TextEdit(
Range(
start=Position(
selection.line,
start_col_offset if start_col_offset is not None else token.col_offset,
),
end=Position(selection.line, token.end_col_offset),
),
library_name,
)
# text_edit = None
return CompletionItem(
library_name,
kind=CompletionItemKind.Module,
text_edit=text_edit,
insertText=text_edit.newText,
documentation="",
insertTextFormat=InsertTextFormat.Snippet,
).to_dict()
def _add_completions_from_dir(
completion_context,
directory,
matcher,
ret: List[CompletionItemTypedDict],
sel,
token,
qualifier,
extensions,
skip_current,
):
from robocorp_ls_core import uris
def normfile(path):
return normalize_filename(path)
curr_file = normfile(uris.to_fs_path(completion_context.doc.uri))
try:
# This is ok if the directory doesn't exist.
contents = sorted(os.listdir(directory))
except:
return
for filename in contents:
use_path = None
if filename.endswith(extensions):
# If that'd be a match for the current .robot file, don't show it.
if skip_current and curr_file == normfile(
os.path.join(directory, filename)
):
continue
use_path = filename
elif filename not in ("__pycache__", ".git") and os.path.isdir(
os.path.join(directory, filename)
):
use_path = filename + "/"
else:
continue
if matcher.accepts(use_path):
ret.append(
_create_completion_item(
use_path, sel, token, start_col_offset=sel.col - len(qualifier)
)
)
def _get_completions(
completion_context: ICompletionContext,
token,
match_libs,
extensions: Tuple[str, ...],
skip_current: bool,
) -> List[CompletionItemTypedDict]:
"""
:param skip_current:
If we'd get a match for the current (.robot or .resource)
file it will not be added.
"""
from robotframework_ls.impl.string_matcher import RobotStringMatcher
from robocorp_ls_core import uris
from robotframework_ls.impl.robot_constants import BUILTIN_LIB, RESERVED_LIB
from robotframework_ls.impl import ast_utils
ret: List[CompletionItemTypedDict] = []
sel = completion_context.sel
value_to_cursor = token.value
if token.end_col_offset > sel.col:
value_to_cursor = value_to_cursor[: -(token.end_col_offset - sel.col)]
if "{" in value_to_cursor:
value_to_cursor = completion_context.token_value_resolving_variables(
ast_utils.create_token(value_to_cursor)
)
value_to_cursor_split = os.path.split(value_to_cursor)
if os.path.isabs(value_to_cursor):
_add_completions_from_dir(
completion_context,
value_to_cursor_split[0],
RobotStringMatcher(value_to_cursor_split[1]),
ret,
sel,
token,
value_to_cursor_split[1],
extensions,
skip_current=skip_current,
)
else:
if match_libs:
matcher = RobotStringMatcher(value_to_cursor)
libspec_manager = completion_context.workspace.libspec_manager
library_names = set(libspec_manager.get_library_names())
library_names.discard(BUILTIN_LIB)
library_names.discard(RESERVED_LIB)
for library_name in library_names:
if matcher.accepts(library_name):
ret.append(_create_completion_item(library_name, sel, token))
# After checking the existing library names in memory (because we
# loaded them at least once), check libraries in the filesystem.
uri = completion_context.doc.uri
path = uris.to_fs_path(uri)
dirname = os.path.dirname(path)
matcher = RobotStringMatcher(value_to_cursor_split[1])
directory = os.path.join(dirname, value_to_cursor_split[0])
_add_completions_from_dir(
completion_context,
directory,
matcher,
ret,
sel,
token,
value_to_cursor_split[1],
extensions,
skip_current=skip_current,
)
return ret
def _get_resource_completions(
completion_context, token
) -> List[CompletionItemTypedDict]:
return _get_completions(
completion_context,
token,
False,
ROBOT_AND_TXT_FILE_EXTENSIONS,
skip_current=True,
)
def _get_library_completions(
completion_context, token
) -> List[CompletionItemTypedDict]:
return _get_completions(
completion_context, token, True, LIBRARY_FILE_EXTENSIONS, skip_current=False
)
def _get_variable_completions(
completion_context, token
) -> List[CompletionItemTypedDict]:
return _get_completions(
completion_context,
token,
True,
LIBRARY_FILE_EXTENSIONS + VARIABLE_FILE_EXTENSIONS,
skip_current=False,
)
class _Requisites(object):
def __init__(self, token, found_type: str):
self.token = token
self._type = found_type
@property
def is_library(self):
return self._type == "library"
@property
def is_resource(self):
return self._type == "resource"
@property
def is_variables(self):
return self._type == "variables"
def get_requisites(completion_context: ICompletionContext) -> Optional[_Requisites]:
from robotframework_ls.impl import ast_utils
token_info = completion_context.get_current_token()
if token_info is not None:
# Library
token = ast_utils.get_library_import_name_token(
token_info.node, token_info.token, generate_empty_on_eol=True
)
if token is not None:
return _Requisites(token, "library")
# Resource
token = ast_utils.get_resource_import_name_token(
token_info.node, token_info.token, generate_empty_on_eol=True
)
if token is not None:
return _Requisites(token, "resource")
# Variable
token = ast_utils.get_variables_import_name_token(
token_info.node, token_info.token, generate_empty_on_eol=True
)
if token is not None:
return _Requisites(token, "variables")
return None
def complete(completion_context: ICompletionContext) -> List[CompletionItemTypedDict]:
"""
Provides the completions for 'Library', 'Resource' and 'Variables' imports.
"""
try:
requisites = get_requisites(completion_context)
if requisites is None:
return []
return complete_with_requisites(completion_context, requisites)
except:
log.exception()
return []
def complete_with_requisites(
completion_context: ICompletionContext, requisites: _Requisites
) -> List[CompletionItemTypedDict]:
try:
if requisites.is_library:
return _get_library_completions(completion_context, requisites.token)
elif requisites.is_resource:
return _get_resource_completions(completion_context, requisites.token)
elif requisites.is_variables:
return _get_variable_completions(completion_context, requisites.token)
except:
log.exception()
return [] | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/impl/filesystem_section_completions.py | 0.648911 | 0.16378 | filesystem_section_completions.py | pypi |
import os
from contextlib import contextmanager
from typing import List, Any, Dict, Set, Tuple, Optional
from robocorp_ls_core.protocols import TypedDict
from robocorp_ls_core.basic import isinstance_name
from robotframework_ls.impl.protocols import ICompletionContext, IKeywordFound, INode
from robotframework_ls.impl.text_utilities import (
normalize_robot_name as get_internal_name,
)
class _UserKeywordType(TypedDict):
type: str
name: str
internal_name: str
kind: str
doc: str
args: List[str]
body: List[Dict]
class _KeywordRecursionStack:
_stack: Set[str]
def __init__(self) -> None:
self._stack = set()
def __contains__(self, keyword_name: str) -> bool:
return get_internal_name(keyword_name) in self._stack
@contextmanager
def scoped(self, keyword_name):
self._stack.add(get_internal_name(keyword_name))
yield
self._stack.remove(get_internal_name(keyword_name))
class _UserKeywordCollector:
_user_keywords: List[_UserKeywordType]
_name_collection: Set[str]
def __init__(self) -> None:
self._user_keywords = []
self._name_collection = set()
@property
def keywords(self) -> List[_UserKeywordType]:
return self._user_keywords
def __contains__(self, keyword_name: str) -> bool:
return get_internal_name(keyword_name) in self._name_collection
def append(self, keyword: _UserKeywordType) -> None:
if "name" in keyword:
self._user_keywords.append(keyword)
self._name_collection.add(get_internal_name(keyword["name"]))
def _compute_suite_name(completion_context: ICompletionContext) -> str:
suite_name = os.path.splitext(os.path.basename(completion_context.doc.uri))[0]
suite_name = suite_name.title()
return suite_name
def build_flow_explorer_model(completion_contexts: List[ICompletionContext]) -> dict:
from robotframework_ls.impl import ast_utils
suites: list = []
recursion_stack: _KeywordRecursionStack = _KeywordRecursionStack()
for completion_context in completion_contexts:
ast = completion_context.get_ast()
# Uncomment to print ast.
# ast_utils.print_ast(ast)
if ast:
user_keywords_collector = _UserKeywordCollector()
suite_name = _compute_suite_name(completion_context)
tasks: list = []
keywords: list = user_keywords_collector.keywords
suite = {
"type": "suite",
"name": suite_name,
"source": completion_context.doc.uri,
"tasks": tasks,
"keywords": keywords,
"setup": None,
"teardown": None,
}
suites.append(suite)
for test in ast_utils.iter_tests(ast):
test_name = f"{test.node.name} ({suite_name.lower()})"
test_body: list = []
test_info = {
"type": "task",
"name": test_name,
"internal_name": get_internal_name(test_name),
"doc": "",
"setup": None,
"teardown": None,
"body": test_body,
}
tasks.append(test_info)
for node_info in ast_utils.iter_all_nodes(test.node, recursive=False):
with recursion_stack.scoped(test_name):
_build_hierarchy(
completion_context=completion_context,
curr_stack=node_info.stack,
curr_ast=node_info.node,
suite_name=suite_name,
parent_body=test_body,
memo={},
recursion_stack=recursion_stack,
user_keywords_collector=user_keywords_collector,
parent_node=test_info,
)
for user_keyword in ast_utils.iter_keywords(ast):
user_keyword_name = f"{user_keyword.node.name} ({suite_name.lower()})"
user_keyword_body: list = []
user_keyword_info = {
"type": "user-keyword",
"kind": "implemented",
"name": user_keyword_name,
"internal_name": get_internal_name(user_keyword_name),
"doc": "",
"body": user_keyword_body,
}
# Keywords var will be populated when building hierarchy if importing statements
# Checking to see if it already exists before appending
if get_internal_name(user_keyword_name) not in user_keywords_collector:
keywords.append(user_keyword_info)
for node_info in ast_utils.iter_all_nodes(
user_keyword.node, recursive=False
):
with recursion_stack.scoped(user_keyword_name):
_build_hierarchy(
completion_context=completion_context,
curr_stack=node_info.stack,
curr_ast=node_info.node,
suite_name=suite_name,
parent_body=user_keyword_body,
memo={},
recursion_stack=recursion_stack,
user_keywords_collector=user_keywords_collector,
)
if not suites:
return {}
# Reorder to the expected structure where we must specify the root suite.
root_suite = suites[0]
if len(suites) > 1:
root_suite["suites"] = suites[1:]
return root_suite
def _build_hierarchy(
completion_context: ICompletionContext,
curr_stack: Tuple[INode, ...],
curr_ast: Any,
suite_name: str,
parent_body: List[Any],
memo: dict,
recursion_stack: _KeywordRecursionStack,
user_keywords_collector: _UserKeywordCollector,
parent_node: Optional[Dict] = None,
):
key = (completion_context.doc.uri, curr_ast.lineno, curr_ast.col_offset)
found = memo.get(key)
if found is not None:
parent_body.extend(found)
return
temp_parent_body: List[Any] = []
memo[key] = temp_parent_body
ret = __build_hierarchy(
completion_context,
curr_stack,
curr_ast,
suite_name,
temp_parent_body,
memo,
recursion_stack,
user_keywords_collector,
parent_node,
)
parent_body.extend(temp_parent_body)
return
def __build_hierarchy(
completion_context: ICompletionContext,
curr_stack: Tuple[INode, ...],
curr_ast: Any,
suite_name: str,
parent_body: List[Any],
memo: dict,
recursion_stack: _KeywordRecursionStack,
user_keywords_collector: _UserKeywordCollector,
parent_node: Optional[Dict] = None,
):
from robotframework_ls.impl import ast_utils
from robotframework_ls.impl import ast_utils_keyword_usage
from robotframework_ls.impl.find_definition import find_keyword_definition
from robotframework_ls.impl.protocols import TokenInfo
if ast_utils.is_keyword_usage_node(curr_ast):
keyword_usage_handler = ast_utils_keyword_usage.obtain_keyword_usage_handler(
curr_stack, curr_ast, recursive=True
)
if keyword_usage_handler is not None:
for keyword_usage in keyword_usage_handler.iter_keyword_usages_from_node():
keyword_body: list = []
keyword_usage_node: Any = keyword_usage.node
if isinstance_name(keyword_usage_node, "KeywordCall"):
keyword = {
"type": "keyword",
"kind": "simple",
"assign": keyword_usage_node.assign,
"args": keyword_usage_node.args,
"body": keyword_body,
"doc": "",
}
parent_body.append(keyword)
# Now, we need to follow the keyword and build its own structure
token_info = TokenInfo(
keyword_usage.stack, keyword_usage.node, keyword_usage.token
)
definitions = find_keyword_definition(
completion_context.create_copy_with_selection(
keyword_usage.token.lineno - 1,
keyword_usage.token.col_offset,
),
token_info,
)
# Fallback name if we don't know where it's defined.
keyword["name"] = f"{keyword_usage_node.keyword}"
keyword["internal_name"] = get_internal_name(keyword["name"])
if definitions:
# Use the first one
definition = next(iter(definitions))
keyword_found: IKeywordFound = definition.keyword_found
if keyword_found.library_name:
keyword[
"name"
] = f"{keyword_usage_node.keyword} ({keyword_found.library_name.lower()})"
elif keyword_found.resource_name:
keyword[
"name"
] = f"{keyword_usage_node.keyword} ({keyword_found.resource_name.lower()})"
keyword["internal_name"] = get_internal_name(keyword["name"])
# If it was found in a library we don't recurse anymore.
keyword_ast = keyword_found.keyword_ast
if keyword_ast is None:
continue
definition_completion_context = keyword_found.completion_context
if definition_completion_context is None:
continue
# If found in recursion stack we don't recurse anymore.
if keyword["name"] in recursion_stack:
keyword["kind"] = "recursion-leaf"
continue
suite_name = _compute_suite_name(definition_completion_context)
# Ok, it isn't a library keyword (as we have its AST). Keep recursing.
for node_info in ast_utils.iter_all_nodes(
keyword_ast, recursive=False
):
with recursion_stack.scoped(keyword["name"]):
_build_hierarchy(
completion_context=definition_completion_context,
curr_stack=node_info.stack,
curr_ast=node_info.node,
suite_name=suite_name,
parent_body=keyword_body,
memo=memo,
recursion_stack=recursion_stack,
user_keywords_collector=user_keywords_collector,
)
# If the current keyword has body, the it is a User Keyword
if (
len(keyword_body) > 0
and get_internal_name(keyword["name"])
not in user_keywords_collector
):
user_keyword: _UserKeywordType = {
"type": "user-keyword",
"kind": "implemented",
"body": keyword["body"],
"name": keyword["name"],
"internal_name": keyword["internal_name"],
"doc": keyword["doc"],
"args": keyword["args"],
}
user_keywords_collector.append(user_keyword)
elif isinstance_name(keyword_usage_node, "Teardown") and parent_node:
parent_node["teardown"] = {
"type": "keyword",
"subtype": "KEYWORD",
"args": keyword_usage_node.args,
"name": keyword_usage_node.name,
}
elif isinstance_name(keyword_usage_node, "Setup") and parent_node:
parent_node["setup"] = {
"type": "keyword",
"subtype": "KEYWORD",
"args": keyword_usage_node.args,
"name": keyword_usage_node.name,
}
elif isinstance_name(curr_ast, "If"):
if_body: list = []
if_info: Dict[str, Any] = {"type": "if", "body": if_body}
parent_body.append(if_info)
condition = " ".join(
str(tok) for tok in ast_utils.iter_argument_tokens(curr_ast.header)
)
if_branch_body: list = []
if_branch_info: Dict[str, Any] = {
"type": "if-branch",
"condition": condition,
"body": if_branch_body,
}
if_body.append(if_branch_info)
for body_ast in curr_ast.body:
_build_hierarchy(
completion_context=completion_context,
curr_stack=curr_stack + (curr_ast,),
curr_ast=body_ast,
suite_name=suite_name,
parent_body=if_branch_body,
memo=memo,
recursion_stack=recursion_stack,
user_keywords_collector=user_keywords_collector,
)
orelse = curr_ast.orelse
def explore_elseifs(elseifbranch):
if elseifbranch and isinstance_name(elseifbranch.header, "ElseIfHeader"):
condition = " ".join(
str(tok)
for tok in ast_utils.iter_argument_tokens(elseifbranch.header)
)
else_if_body: list = []
else_if_info: Dict[str, Any] = {
"type": "else-if-branch",
"condition": condition,
"body": else_if_body,
}
if_body.append(else_if_info)
for body_ast in elseifbranch.body:
_build_hierarchy(
completion_context=completion_context,
curr_stack=curr_stack + (elseifbranch,),
curr_ast=body_ast,
suite_name=suite_name,
parent_body=else_if_body,
memo=memo,
recursion_stack=recursion_stack,
user_keywords_collector=user_keywords_collector,
)
elseifbranch = elseifbranch.orelse
if elseifbranch and isinstance_name(
elseifbranch.header, "ElseIfHeader"
):
explore_elseifs(elseifbranch)
explore_elseifs(orelse)
# To finish, handle the orelse.
orelse = orelse.orelse if orelse and orelse.orelse else orelse
if orelse:
orelse_body: list = []
orelse_info: Dict[str, Any] = {
"type": "else-branch",
"body": orelse_body,
}
if_body.append(orelse_info)
for body_ast in orelse.body:
_build_hierarchy(
completion_context=completion_context,
curr_stack=curr_stack + (orelse,),
curr_ast=body_ast,
suite_name=suite_name,
parent_body=orelse_body,
memo=memo,
recursion_stack=recursion_stack,
user_keywords_collector=user_keywords_collector,
)
elif isinstance_name(curr_ast, "For"):
for_body: list = []
for_info: Dict[str, Any] = {
"type": "for",
"kind": curr_ast.flavor,
"values": list(curr_ast.values),
"variables": list(curr_ast.variables),
"body": for_body,
}
parent_body.append(for_info)
for body_ast in curr_ast.body:
_build_hierarchy(
completion_context=completion_context,
curr_stack=curr_stack + (curr_ast,),
curr_ast=body_ast,
suite_name=suite_name,
parent_body=for_body,
memo=memo,
recursion_stack=recursion_stack,
user_keywords_collector=user_keywords_collector,
)
elif isinstance_name(curr_ast, "While"):
condition = " ".join(
str(tok) for tok in ast_utils.iter_argument_tokens(curr_ast.header)
)
while_body: list = []
while_info: Dict[str, Any] = {
"type": "while",
"condition": condition,
"body": while_body,
}
parent_body.append(while_info)
for body_ast in curr_ast.body:
_build_hierarchy(
completion_context=completion_context,
curr_stack=curr_stack + (curr_ast,),
curr_ast=body_ast,
suite_name=suite_name,
parent_body=while_body,
memo=memo,
recursion_stack=recursion_stack,
user_keywords_collector=user_keywords_collector,
)
elif isinstance_name(curr_ast, "Try"):
try_body: list = []
try_info: Dict[str, Any] = {
"type": "try",
"body": try_body,
}
parent_body.append(try_info)
try_branch_body: list = []
try_branch_info: Dict[str, Any] = {
"type": "try-branch",
"body": try_branch_body,
}
try_body.append(try_branch_info)
for body_ast in curr_ast.body:
_build_hierarchy(
completion_context=completion_context,
curr_stack=curr_stack + (curr_ast,),
curr_ast=body_ast,
suite_name=suite_name,
parent_body=try_branch_body,
memo=memo,
recursion_stack=recursion_stack,
user_keywords_collector=user_keywords_collector,
)
def explore_try(ast):
if ast:
next_type = None
next_patterns = None
if isinstance_name(ast.header, "ExceptHeader"):
next_type = "except-branch"
next_patterns = ast.patterns
elif isinstance_name(ast.header, "FinallyHeader"):
next_type = "finally-branch"
elif isinstance_name(ast.header, "ElseHeader"):
next_type = "else-branch"
if not next_type:
return
next_branch_body: list = []
next_branch_info: Dict[str, Any] = {
"type": next_type,
"body": next_branch_body,
}
if next_patterns:
next_branch_info["patterns"] = next_patterns
for body_ast in ast.body:
_build_hierarchy(
completion_context=completion_context,
curr_stack=curr_stack + (ast,),
curr_ast=body_ast,
suite_name=suite_name,
parent_body=next_branch_body,
memo=memo,
recursion_stack=recursion_stack,
user_keywords_collector=user_keywords_collector,
)
try_body.append(next_branch_info)
if ast.next:
explore_try(ast.next)
explore_try(curr_ast.next)
elif isinstance_name(curr_ast, "Break"):
break_info: Dict[str, Any] = {
"type": "break",
}
parent_body.append(break_info)
elif isinstance_name(curr_ast, "Continue"):
continue_info: Dict[str, Any] = {
"type": "continue",
}
parent_body.append(continue_info)
elif isinstance_name(curr_ast, "ReturnStatement"):
return_info: Dict[str, Any] = {
"type": "return",
}
parent_body.append(return_info) | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/impl/flow_explorer_model_builder.py | 0.759404 | 0.208723 | flow_explorer_model_builder.py | pypi |
from typing import List
from robotframework_ls.impl.protocols import ICompletionContext
from robocorp_ls_core.protocols import IDocumentSelection
def _create_completion_item(
label: str,
new_text: str,
selection: IDocumentSelection,
col_start: int,
col_end: int,
documentation: str,
) -> dict:
from robocorp_ls_core.lsp import (
CompletionItem,
InsertTextFormat,
Position,
Range,
TextEdit,
)
from robocorp_ls_core.lsp import CompletionItemKind
text_edit = TextEdit(
Range(
start=Position(selection.line, col_start),
end=Position(selection.line, col_end),
),
new_text,
)
return CompletionItem(
label,
kind=CompletionItemKind.Field,
text_edit=text_edit,
insertText=label,
documentation=documentation,
insertTextFormat=InsertTextFormat.PlainText,
).to_dict()
def complete(completion_context: ICompletionContext) -> List[dict]:
from robotframework_ls.impl.protocols import IKeywordFound
from robotframework_ls.impl.protocols import IKeywordArg
ret: List[dict] = []
sel = completion_context.sel
if sel.word_from_column:
# i.e.: if there's any word after the column, skip it (could work, but
# let's simplify for now).
return ret
token_info = completion_context.get_current_token()
if token_info and token_info.token:
token = token_info.token
if token.type not in (token.ARGUMENT, token.EOL):
return []
current_keyword_definition = completion_context.get_current_keyword_definition()
if current_keyword_definition is not None:
keyword_found: IKeywordFound = current_keyword_definition.keyword_found
keyword_args = keyword_found.keyword_args
if keyword_args:
curr_token_value = token.value
if "=" in curr_token_value:
return ret
# Note: If it's an empty word, it's okay to be in the middle.
if token.end_col_offset > sel.col and curr_token_value.strip():
return []
word_to_column = curr_token_value.strip()
arg: IKeywordArg
for arg in keyword_args:
if arg.is_keyword_arg or arg.is_star_arg:
continue
arg_name = arg.arg_name
if arg_name.startswith("${") and arg_name.endswith("}"):
arg_name = arg_name[2:-1]
arg_name = arg_name.strip()
if arg_name:
arg_name += "="
col_start = sel.col
col_end = sel.col
new_text = arg_name
if word_to_column:
if not arg_name.startswith(word_to_column):
continue
new_text = arg_name[len(word_to_column) :]
documentation = arg.original_arg
ret.append(
_create_completion_item(
arg_name, new_text, sel, col_start, col_end, documentation
)
)
return ret | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/impl/keyword_parameter_completions.py | 0.66454 | 0.168292 | keyword_parameter_completions.py | pypi |
from typing import List
from robocorp_ls_core.lsp import CompletionItemTypedDict
from robocorp_ls_core.lsp import TextEditTypedDict
from robocorp_ls_core.lsp import InsertTextFormat
from robocorp_ls_core.lsp import CompletionItemKind
from robotframework_ls.impl.protocols import ICompletionContext
from robotframework_ls.impl.protocols import ILibraryImportNode
import os
def _iter_import_names(completion_context):
imported_libraries = completion_context.get_imported_libraries()
lib: ILibraryImportNode
for lib in imported_libraries:
alias = lib.alias
if alias:
use = alias
else:
use = lib.name
if use.endswith(".py"):
use = use[:-3]
if use:
yield use
for resource_import in completion_context.get_resource_imports():
use = resource_import.name
use = os.path.splitext(use)[0]
if use:
yield use
def complete(completion_context: ICompletionContext) -> List[CompletionItemTypedDict]:
from robotframework_ls.impl import ast_utils
from robotframework_ls.impl.text_utilities import normalize_robot_name
ret: List[CompletionItemTypedDict] = []
token_info = completion_context.get_current_token()
if token_info is not None:
keyword_usage = ast_utils.create_keyword_usage_info_from_token(
token_info.stack, token_info.node, token_info.token
)
if keyword_usage is not None and "{" not in keyword_usage.token.value:
full_tok_name = keyword_usage.token.value
diff = token_info.token.end_col_offset - completion_context.sel.col
if diff < 0:
return ret
if diff > 0:
full_tok_name = full_tok_name[:-diff]
replace_up_to_col = completion_context.sel.col
curr_normalized = normalize_robot_name(full_tok_name).replace(".", "")
for use in _iter_import_names(completion_context):
i = use.rfind("}")
if i >= 0:
use = use[i + 1 :]
use = os.path.basename(use)
if curr_normalized in normalize_robot_name(use).replace(".", ""):
text_edit: TextEditTypedDict = {
"range": {
"start": {
"line": completion_context.sel.line,
"character": token_info.token.col_offset,
},
"end": {
"line": completion_context.sel.line,
"character": replace_up_to_col,
},
},
"newText": use,
}
label = use
ret.append(
{
"label": label,
"kind": CompletionItemKind.Module,
"textEdit": text_edit,
"insertText": text_edit["newText"],
"insertTextFormat": InsertTextFormat.Snippet,
}
)
return ret | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/impl/library_names_completions.py | 0.632162 | 0.19521 | library_names_completions.py | pypi |
from collections.abc import Mapping
def is_dict_like(item):
return isinstance(item, Mapping)
def is_string(item):
return isinstance(item, str)
from collections.abc import MutableMapping
import re
def normalize(string, ignore=(), caseless=True, spaceless=True):
"""Normalizes given string according to given spec.
By default string is turned to lower case and all whitespace is removed.
Additional characters can be removed by giving them in ``ignore`` list.
"""
empty = "" if is_string(string) else b""
if isinstance(ignore, bytes):
# Iterating bytes in Python3 yields integers.
ignore = [bytes([i]) for i in ignore]
if spaceless:
string = empty.join(string.split())
if caseless:
string = string.lower()
ignore = [i.lower() for i in ignore]
# both if statements below enhance performance a little
if ignore:
for ign in ignore:
if ign in string:
string = string.replace(ign, empty)
return string
def normalize_whitespace(string):
return re.sub(r"\s", " ", string, flags=re.UNICODE)
class NormalizedDict(MutableMapping):
"""Custom dictionary implementation automatically normalizing keys."""
def __init__(self, initial=None, ignore=(), caseless=True, spaceless=True):
"""Initialized with possible initial value and normalizing spec.
Initial values can be either a dictionary or an iterable of name/value
pairs. In the latter case items are added in the given order.
Normalizing spec has exact same semantics as with the :func:`normalize`
function.
"""
self._data = {}
self._keys = {}
self._normalize = lambda s: normalize(s, ignore, caseless, spaceless)
if initial:
self._add_initial(initial)
def _add_initial(self, initial):
items = initial.items() if hasattr(initial, "items") else initial
for key, value in items:
self[key] = value
def __getitem__(self, key):
return self._data[self._normalize(key)]
def __setitem__(self, key, value):
norm_key = self._normalize(key)
self._data[norm_key] = value
self._keys.setdefault(norm_key, key)
def __delitem__(self, key):
norm_key = self._normalize(key)
del self._data[norm_key]
del self._keys[norm_key]
def __iter__(self):
return (self._keys[norm_key] for norm_key in sorted(self._keys))
def __len__(self):
return len(self._data)
def __str__(self):
return "{%s}" % ", ".join("%r: %r" % (key, self[key]) for key in self)
def __eq__(self, other):
if not is_dict_like(other):
return False
if not isinstance(other, NormalizedDict):
other = NormalizedDict(other)
return self._data == other._data
def copy(self):
copy = NormalizedDict()
copy._data = self._data.copy()
copy._keys = self._keys.copy()
copy._normalize = self._normalize
return copy
# Speed-ups. Following methods are faster than default implementations.
def __contains__(self, key):
return self._normalize(key) in self._data
def clear(self):
self._data.clear()
self._keys.clear() | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/impl/robot_formatting/robot_normalizing.py | 0.928449 | 0.455744 | robot_normalizing.py | pypi |
import re
from urllib.parse import quote
from .robot_markuputils import html_escape, html_format
from .robot_normalizing import NormalizedDict
from .robot_html_formatters import HeaderFormatter
class DocFormatter:
_header_regexp = re.compile(r"<h([234])>(.+?)</h\1>")
_name_regexp = re.compile("`(.+?)`")
def __init__(self, keywords, type_info, introduction, doc_format="ROBOT"):
self._doc_to_html = DocToHtml(doc_format)
self._targets = self._get_targets(
keywords, introduction, robot_format=doc_format == "ROBOT"
)
self._type_info_targets = self._get_type_info_targets(type_info)
def _get_targets(self, keywords, introduction, robot_format):
targets = {
"introduction": "Introduction",
"library introduction": "Introduction",
"importing": "Importing",
"library importing": "Importing",
"keywords": "Keywords",
}
for kw in keywords:
targets[kw.name] = kw.name
if robot_format:
for header in self._yield_header_targets(introduction):
targets[header] = header
return self._escape_and_encode_targets(targets)
def _get_type_info_targets(self, type_info):
targets = {info.name: info.name for info in type_info}
return self._escape_and_encode_targets(targets)
def _yield_header_targets(self, introduction):
headers = HeaderFormatter()
for line in introduction.splitlines():
match = headers.match(line.strip())
if match:
yield match.group(2)
def _escape_and_encode_targets(self, targets):
return NormalizedDict(
(html_escape(key), self._encode_uri_component(value))
for key, value in targets.items()
)
def _encode_uri_component(self, value):
# Emulates encodeURIComponent javascript function
return quote(value.encode("UTF-8"), safe="-_.!~*'()")
def html(self, doc, intro=False):
doc = self._doc_to_html(doc)
if intro:
doc = self._header_regexp.sub(r'<h\1 id="\2">\2</h\1>', doc)
return self._name_regexp.sub(self._link_keywords, doc)
def _link_keywords(self, match):
name = match.group(1)
targets = self._targets
types = self._type_info_targets
if name in targets:
return f'<a href="#{targets[name]}" class="name">{name}</a>'
elif name in types:
return f'<a href="#type-{types[name]}" class="name">{name}</a>'
return f'<span class="name">{name}</span>'
class DocToHtml:
def __init__(self, doc_format):
self._formatter = self._get_formatter(doc_format)
def _get_formatter(self, doc_format):
try:
return {
"ROBOT": html_format,
"TEXT": self._format_text,
"HTML": lambda doc: doc,
"REST": self._format_rest,
}[doc_format]
except KeyError:
raise RuntimeError("Invalid documentation format '%s'." % doc_format)
def _format_text(self, doc):
return '<p style="white-space: pre-wrap">%s</p>' % html_escape(doc)
def _format_rest(self, doc):
try:
from docutils.core import publish_parts
except ImportError:
raise RuntimeError(
"reST format requires 'docutils' module to be installed."
)
parts = publish_parts(
doc, writer_name="html", settings_overrides={"syntax_highlight": "short"}
)
return parts["html_body"]
def __call__(self, doc):
return self._formatter(doc)
class HtmlToText:
html_tags = {
"b": "*",
"i": "_",
"strong": "*",
"em": "_",
"code": "``",
"div.*?": "",
}
html_chars = {
"<br */?>": "\n",
"&": "&",
"<": "<",
">": ">",
""": '"',
"'": "'",
}
def get_shortdoc_from_html(self, doc):
match = re.search(r"<p.*?>(.*?)</?p>", doc, re.DOTALL)
if match:
doc = match.group(1)
doc = self.html_to_plain_text(doc)
return doc
def html_to_plain_text(self, doc):
for tag, repl in self.html_tags.items():
doc = re.sub(
r"<%(tag)s>(.*?)</%(tag)s>" % {"tag": tag},
r"%(repl)s\1%(repl)s" % {"repl": repl},
doc,
flags=re.DOTALL,
)
for html, text in self.html_chars.items():
doc = re.sub(html, text, doc)
return doc | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/impl/robot_formatting/robot_html_utils.py | 0.538498 | 0.261826 | robot_html_utils.py | pypi |
from robocorp_ls_core.robotframework_log import get_logger
from robocorp_ls_core.basic import implements
from robocorp_ls_core.protocols import IConfig, Sentinel
from typing import Any, FrozenSet, Optional
import os
log = get_logger(__name__)
def flatten_keys(d: dict, parent_key="", all_options=frozenset(), result_dict=None):
if result_dict is None:
result_dict = {}
for k, v in d.items():
new_key = parent_key + "." + k if parent_key else k
if new_key not in all_options and isinstance(v, dict):
flatten_keys(v, new_key, all_options, result_dict)
continue
result_dict[new_key] = v
return result_dict
class Config(object):
ALL_OPTIONS: FrozenSet[str] = frozenset()
def __init__(self, all_options: FrozenSet[str] = frozenset()):
if all_options:
self.ALL_OPTIONS = all_options
self._settings: dict = {}
self._override_settings: dict = {}
self._original_settings: dict = {}
self._original_override_settings: dict = {}
self._full_settings: dict = {}
self._workspace_dir: Optional[str] = None
@implements(IConfig.get_setting)
def get_setting(self, key, expected_type, default=Sentinel.SENTINEL) -> Any:
try:
s = self._full_settings[key]
if not isinstance(s, expected_type):
if isinstance(expected_type, tuple):
# Don't try to make a cast if a tuple of classes was passed.
if default is not Sentinel.SENTINEL:
return default
raise KeyError(
"Expected %s to be a setting of type: %s. Found: %s"
% (key, expected_type, type(s))
)
try:
if expected_type in (list, tuple):
if expected_type == list and isinstance(s, tuple):
return expected_type(s)
if expected_type == tuple and isinstance(s, list):
return expected_type(s)
# Don't try to make a cast for list or tuple (we don't
# want a string to end up being a list of chars).
if default is not Sentinel.SENTINEL:
return default
raise KeyError(
"Expected %s to be a setting of type: %s. Found: %s"
% (key, expected_type, type(s))
)
# Check if we can cast it...
return expected_type(s)
except:
if default is not Sentinel.SENTINEL:
return default
raise KeyError(
"Expected %s to be a setting of type: %s. Found: %s"
% (key, expected_type, type(s))
)
except KeyError:
if default is not Sentinel.SENTINEL:
return default
raise
return s
def _update_full_settings(self):
full_settings = self._settings.copy()
full_settings.update(self._override_settings)
self._full_settings = full_settings
log.debug("Updated settings to %s", full_settings)
def _get_var_value(self, name):
ret = name
if name in ("${workspace}", "${workspaceRoot}", "${workspaceFolder}"):
if self._workspace_dir is not None:
ret = self._workspace_dir
else:
log.info("Unable to make workspace replacement for variable: %s", name)
elif (name.startswith("${env.") or name.startswith("${env:")) and name.endswith(
"}"
):
name = name[6:-1]
ret = os.environ.get(name) # Note: should be case-insensitive on windows.
else:
log.info("Unable to resolve variable: %s", name)
return ret
def _var_replace(self, option, value):
import re
compiled = re.compile(r"\${([^{}]*)}")
lasti = 0
new_value = []
for o in compiled.finditer(value):
new_value.append(value[lasti : o.start()])
new_value.append(self._get_var_value(o.group(0)))
lasti = o.end()
if lasti == 0:
# Nothing changed
return value
new_value.append(value[lasti:])
ret = "".join(new_value)
if ret.startswith("~"):
ret = os.path.expanduser(ret)
log.debug("Changed setting: %s from %s to %s", option, value, ret)
return ret
def _replace_variables_in_settings(self, settings: dict) -> dict:
"""
:param settings:
The settings where the variables should be replaced.
Note that this instance is unchanged.
:return dict:
Returns a new dict with the variables replaced.
"""
settings = settings.copy()
for option in self.ALL_OPTIONS:
value = settings.get(option)
if isinstance(value, str):
settings[option] = self._var_replace(option, value)
elif isinstance(value, list):
new_value = []
for val in value:
if isinstance(val, str):
new_value.append(self._var_replace(option, val))
else:
new_value.append(val)
settings[option] = new_value
elif isinstance(value, dict):
new_dct = {}
for key, val in value.items():
if isinstance(val, str):
new_dct[key] = self._var_replace(option, val)
else:
new_dct[key] = val
settings[option] = new_dct
return settings
@implements(IConfig.update)
def update(self, settings: dict):
settings = flatten_keys(settings, all_options=self.ALL_OPTIONS)
self._original_settings = settings
self._settings = self._replace_variables_in_settings(settings)
self._update_full_settings()
@implements(IConfig.set_override_settings)
def set_override_settings(self, override_settings):
settings = flatten_keys(override_settings, all_options=self.ALL_OPTIONS)
self._original_override_settings = settings
self._override_settings = self._replace_variables_in_settings(settings)
self._update_full_settings()
@implements(IConfig.update_override_settings)
def update_override_settings(self, override_settings):
settings = flatten_keys(override_settings, all_options=self.ALL_OPTIONS)
original = self._original_override_settings.copy()
original.update(settings)
self._original_override_settings = original
self._override_settings = self._replace_variables_in_settings(original)
self._update_full_settings()
@implements(IConfig.get_full_settings)
def get_full_settings(self):
return self._full_settings
@implements(IConfig.set_workspace_dir)
def set_workspace_dir(self, workspace: str):
self._workspace_dir = workspace
self._settings = self._replace_variables_in_settings(self._original_settings)
self._override_settings = self._replace_variables_in_settings(
self._original_override_settings
)
self._update_full_settings() | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/vendored/robocorp_ls_core/config.py | 0.738763 | 0.192426 | config.py | pypi |
import re
from robocorp_ls_core.protocols import IDocumentSelection, IDocument
RE_START_WORD = re.compile("[\w]*$")
RE_END_WORD = re.compile("^[\w]*")
def word_to_column(line_to_cursor):
m_start = RE_START_WORD.findall(line_to_cursor)
return m_start[0]
class DocumentSelection(object):
def __init__(self, doc: IDocument, line: int, col: int):
if line < 0:
line = 0
if col < 0:
col = 0
self.doc = doc
self.line = line
self.col = col
@property
def offset_at_position(self):
"""Return the byte-offset pointed at by the given position."""
offset = 0
for i, curr_line in enumerate(self.doc.iter_lines()):
if i == self.line:
break
offset += len(curr_line)
return offset + self.col
@property
def current_line(self) -> str:
return self.doc.get_line(self.line)
@property
def line_to_column(self) -> str:
current_line = self.current_line
if not current_line:
return ""
line_start = current_line[: self.col]
return line_start
@property
def line_to_end(self) -> str:
current_line = self.current_line
if not current_line:
return ""
return current_line[self.col :]
@property
def word_at_column(self) -> str:
current_line = self.current_line
if not current_line:
return ""
col = self.col
# Split word in two
start = current_line[:col]
end = current_line[col:]
# Take end of start and start of end to find word
# These are guaranteed to match, even if they match the empty string
m_start = RE_START_WORD.findall(start)
m_end = RE_END_WORD.findall(end)
return m_start[0] + m_end[-1]
@property
def word_to_column(self) -> str:
line_to_cursor = self.line_to_column
return word_to_column(line_to_cursor)
@property
def word_from_column(self) -> str:
current_line = self.current_line
if not current_line:
return ""
col = self.col
# Split word in two
end = current_line[col:]
m_end = RE_END_WORD.findall(end)
return m_end[-1]
def __typecheckself__(self) -> None:
from robocorp_ls_core.protocols import check_implements
_: IDocumentSelection = check_implements(self) | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/vendored/robocorp_ls_core/document_selection.py | 0.68637 | 0.201656 | document_selection.py | pypi |
from typing import Optional, List, Dict
import sys
# Hack so that we don't break the runtime on versions prior to Python 3.8.
if sys.version_info[:2] < (3, 8):
class Protocol(object):
pass
else:
from typing import Protocol
class IInterpreterInfo(Protocol):
def get_interpreter_id(self) -> str:
"""
This is an identification and should relate to the place which allows
to identify this info (it should usually be given by some location --
for instance, it could be identified by the robot.yaml location).
Note that different interpreter ids can point to the same python
executable (if they'd have the same robot.yaml contents).
"""
def get_python_exe(self) -> str:
"""
The python executable that should be used.
"""
def get_environ(self) -> Optional[Dict[str, str]]:
"""
The environment to be used.
"""
def get_additional_pythonpath_entries(self) -> List[str]:
"""
Any additional PYTHONPATH entries to be considered.
"""
class EPResolveInterpreter(Protocol):
def get_interpreter_info_for_doc_uri(self, doc_uri) -> Optional[IInterpreterInfo]:
"""
Provides a customized interpreter for a given document uri.
"""
class DefaultInterpreterInfo(object):
"""
A Default implementation for the interpreter info where everything is
pre-computed.
"""
def __init__(
self,
interpreter_id: str,
python_exe: str,
environ: Optional[Dict[str, str]],
additional_pythonpath_entries: List[str],
) -> None:
self.interpreter_id = interpreter_id
self.python_exe = python_exe
self.environ = environ
self.additional_pythonpath_entries = additional_pythonpath_entries
def get_interpreter_id(self) -> str:
return self.interpreter_id
def get_python_exe(self) -> str:
return self.python_exe
def get_environ(self) -> Optional[Dict[str, str]]:
return self.environ
def get_additional_pythonpath_entries(self) -> List[str]:
return self.additional_pythonpath_entries
def __str__(self):
return f"DefaultInterpreterInfo({self.interpreter_id}, {self.python_exe})"
def __typecheckself__(self) -> None:
from robocorp_ls_core.protocols import check_implements
_: IInterpreterInfo = check_implements(self) | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/vendored/robocorp_ls_core/ep_resolve_interpreter.py | 0.735737 | 0.358241 | ep_resolve_interpreter.py | pypi |
import threading
from robocorp_ls_core.protocols import IEndPoint, IDirCache, IProgressReporter
from contextlib import contextmanager
from typing import Optional, Iterator, Dict
from robocorp_ls_core.robotframework_log import get_logger
from robocorp_ls_core.basic import implements
import os
log = get_logger(__name__)
def _next_id():
# Note: changed to uuid from incremental because multiple processes
# may start the progress and it shouldn't conflict from one to the
# other.
import uuid
return str(uuid.uuid4()) + "-" + str(os.getpid())
_progress_id_to_progress_reporter: Dict[str, "_ProgressReporter"] = {}
class _ProgressReporter(object):
_MIN_TIME = 0.25
def __init__(
self,
endpoint: IEndPoint,
title: str,
dir_cache: Optional[IDirCache],
elapsed_time_key=None,
cancellable: bool = False,
) -> None:
from robocorp_ls_core.timeouts import TimeoutTracker
import time
self.endpoint = endpoint
self.title = title
self._started = False
self._finished = False
self._lock = threading.Lock()
self._id = _next_id()
self._expected_time = None
self._initial_time = time.time()
self._additional_info: str = ""
self._cancellable = cancellable
self._cancelled = False
self._dir_cache = dir_cache
self._last_elapsed_time_key = (
elapsed_time_key
if elapsed_time_key is not None
else ("operation_time", title)
)
try:
if dir_cache:
expected_time = dir_cache.load(self._last_elapsed_time_key, float)
# Leave some gap on the expected.
self._expected_time = expected_time * 1.2
except KeyError:
pass
self._last_progress = 0.0
self.timeout_tracker = TimeoutTracker.get_singleton()
self.timeout_tracker.call_on_timeout(self._MIN_TIME, self._on_first_timeout)
@property
def id(self):
return self._id
def cancel(self):
self._cancelled = True
@property
def cancelled(self) -> bool:
return self._cancelled
def _on_first_timeout(self):
with self._lock:
if not self._finished and not self._started:
self._started = True
self.endpoint.notify(
"$/customProgress",
{
"kind": "begin",
"id": self._id,
"title": self.title,
"cancellable": self._cancellable,
},
)
if self._expected_time:
update_time = self._expected_time / 30.0
else:
update_time = 0.25
self.timeout_tracker.call_on_timeout(
update_time, self._on_recurrent_timeout
)
def _on_recurrent_timeout(self) -> None:
import time
with self._lock:
if not self._finished and self._started:
elapsed_time = time.time() - self._initial_time
expected_time = self._expected_time
if not self._additional_info:
msg = "Elapsed: %.1fs" % (elapsed_time,)
else:
msg = "Elapsed: %.1fs : %s" % (
elapsed_time,
self._additional_info,
)
args = {
"kind": "report",
"id": self._id,
"message": msg,
}
if expected_time:
progress = elapsed_time / expected_time
if progress > 0.95:
progress = 0.95
increment = (progress - self._last_progress) * 100
self._last_progress = progress
args["increment"] = increment
self.endpoint.notify("$/customProgress", args)
self.timeout_tracker.call_on_timeout(0.5, self._on_recurrent_timeout)
@implements(IProgressReporter.set_additional_info)
def set_additional_info(self, additional_info: str) -> None:
self._additional_info = additional_info
def finish(self) -> None:
import time
with self._lock:
if not self._finished:
self._finished = True
self.endpoint.notify(
"$/customProgress", {"kind": "end", "id": self._id}
)
total_elapsed_time = time.time() - self._initial_time
if total_elapsed_time > self._MIN_TIME:
dir_cache = self._dir_cache
if dir_cache:
dir_cache.store(self._last_elapsed_time_key, total_elapsed_time)
def __typecheckself__(self) -> None:
from robocorp_ls_core.protocols import check_implements
_: IProgressReporter = check_implements(self)
_progress_context = threading.local()
def get_current_progress_reporter() -> Optional[_ProgressReporter]:
try:
try:
stack = _progress_context._stack
except AttributeError:
return None
else:
try:
return stack[-1]
except IndexError:
return None
except Exception:
log.exception("Unexpected error getting current progress reporter.")
return None
class ProgressWrapperForTotalWork:
"""
Wraps an IProgressReporter to have a quick way to show stes/total steps.
i.e.:
with progress_context(...) as progress_reporter:
progress_wrapper = ProgressWrapperForTotalWork(progress_reporter)
# Schedule many steps and at each point call.
progress_reporter.increment_total_steps()
# When a step is done, increment steps done.
progress_reporter.increment_step_done()
"""
def __init__(
self,
progress_reporter: IProgressReporter,
message: str = "%s of %s",
) -> None:
self.progress_reporter = progress_reporter
self.message = message
self._lock = threading.Lock()
self._total_steps = 0
self._current_step = 0
def increment_total_steps(self):
with self._lock:
self._total_steps += 1
self._update_message()
def increment_step_done(self):
with self._lock:
self._current_step += 1
self._update_message()
def _update_message(self):
self.progress_reporter.set_additional_info(
self.message % (self._current_step, self._total_steps)
)
def cancel(progress_id: str) -> bool:
progress_reporter = _progress_id_to_progress_reporter.get(progress_id)
if progress_reporter:
progress_reporter.cancel()
return True
return False
@contextmanager
def progress_context(
endpoint: IEndPoint,
title: str,
dir_cache: Optional[IDirCache],
elapsed_time_key=None,
cancellable: bool = False,
) -> Iterator[IProgressReporter]:
"""
Creates a progress context which submits $/customProgress notifications to the
client.
Automatically updates the progress based on a previous invocation for some
action with the same title (stores the elapsed time at the dir_cache).
:param dir_cache:
If None, an estimate for the task is not loaded/saved.
:param elapsed_time_key:
If None, the default is using the title as an entry in the dir cache,
otherwise, the given key is used to load/save the time taken in the
cache dir.
"""
progress_reporter = _ProgressReporter(
endpoint,
title,
dir_cache,
elapsed_time_key=elapsed_time_key,
cancellable=cancellable,
)
_progress_id_to_progress_reporter[progress_reporter.id] = progress_reporter
try:
stack = _progress_context._stack
except AttributeError:
stack = _progress_context._stack = []
stack.append(progress_reporter)
try:
yield progress_reporter
finally:
del _progress_id_to_progress_reporter[progress_reporter.id]
del stack[-1]
progress_reporter.finish() | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/vendored/robocorp_ls_core/progress_report.py | 0.739799 | 0.151843 | progress_report.py | pypi |
from functools import partial
import itertools
from robocorp_ls_core.robotframework_log import get_logger, get_log_level
import json
from typing import Optional, Dict
log = get_logger(__name__)
# Note: sentinel. Signals that the writer thread should stop processing.
STOP_WRITER_THREAD = "STOP_WRITER_THREAD"
# Note: sentinel. Sent by reader thread when stopped.
READER_THREAD_STOPPED = "READER_THREAD_STOPPED"
def read(stream, debug_prefix=b"read") -> Optional[Dict]:
"""
Reads one message from the stream and returns the related dict (or None if EOF was reached).
:param stream:
The stream we should be reading from.
:return dict|NoneType:
The dict which represents a message or None if the stream was closed.
"""
headers = {}
while True:
# Interpret the http protocol headers
line = stream.readline() # The trailing \r\n should be there.
if get_log_level() > 1:
log.debug(
(
debug_prefix
+ b": >>%s<<\n"
% (line.replace(b"\r", b"\\r").replace(b"\n", b"\\n"))
).decode("utf-8", "replace")
)
if not line: # EOF
return None
line = line.strip().decode("ascii")
if not line: # Read just a new line without any contents
break
try:
name, value = line.split(": ", 1)
except ValueError:
raise RuntimeError("Invalid header line: {}.".format(line))
headers[name.strip()] = value.strip()
if not headers:
raise RuntimeError("Got message without headers.")
content_length = int(headers["Content-Length"])
# Get the actual json
body = _read_len(stream, content_length)
if get_log_level() > 1:
log.debug((debug_prefix + b": %s" % (body,)).decode("utf-8", "replace"))
try:
return json.loads(body.decode("utf-8"))
except:
raise RuntimeError(f"Error reading: {body!r}")
def _read_len(stream, content_length) -> bytes:
buf = b""
if not content_length:
return buf
# Grab the body
while True:
data = stream.read(content_length - len(buf))
if not buf and len(data) == content_length:
# Common case
return data
buf += data
if len(buf) == content_length:
return buf
if len(buf) > content_length:
raise AssertionError(
"Expected to read message up to len == %s (already read: %s). Found:\n%s"
% (content_length, len(buf), buf.decode("utf-8", "replace"))
)
# len(buf) < content_length (just keep on going).
def reader_thread(
stream,
process_command,
write_queue,
debug_prefix=b"read",
update_ids_from_dap=False,
):
from robocorp_ls_core.debug_adapter_core.dap import dap_base_schema
from robocorp_ls_core.debug_adapter_core.dap import (
dap_schema, # @UnusedImport -- register classes
)
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import Response
try:
while True:
data = read(stream, debug_prefix)
if data is None:
break
try:
# A response with success == False doesn't need to be translated
# as the original response (to avoid the validation).
if not data.get("success", True) and data.get("type") == "response":
protocol_message = dap_base_schema.from_dict(
data, update_ids_from_dap=update_ids_from_dap, cls=Response
)
else:
protocol_message = dap_base_schema.from_dict(
data, update_ids_from_dap=update_ids_from_dap
)
process_command(protocol_message)
except Exception as e:
log.exception("Error processing message.")
seq = data.get("seq")
if seq:
error_msg = {
"type": "response",
"request_seq": seq,
"success": False,
"command": data.get("command", "<unknown"),
"message": "Error processing message: %s" % (e,),
}
write_queue.put(error_msg)
except ConnectionError:
if get_log_level() > 2:
log.exception("ConnectionError (ignored).")
except:
log.exception("Error reading message.")
finally:
process_command(READER_THREAD_STOPPED)
def writer_thread_no_auto_seq(
stream, queue, debug_prefix="write", update_ids_to_dap=False
):
"""
Same as writer_thread but does not set the message 'seq' automatically
(meant to be used when responses, which need the seq id set need to be handled).
"""
try:
while True:
to_write = queue.get()
if to_write is STOP_WRITER_THREAD:
log.debug("STOP_WRITER_THREAD")
stream.close()
break
if isinstance(to_write, dict):
assert "seq" in to_write
try:
to_write = json.dumps(to_write)
except:
log.exception("Error serializing %s to json.", to_write)
continue
else:
to_json = getattr(to_write, "to_json", None)
if to_json is not None:
# Some protocol message
assert to_write.seq >= 0
try:
to_write = to_json(update_ids_to_dap=update_ids_to_dap)
except:
log.exception("Error serializing %s to json.", to_write)
continue
if get_log_level() > 1:
log.debug(debug_prefix + ": %s\n", to_write)
if to_write.__class__ == bytes:
as_bytes = to_write
else:
as_bytes = to_write.encode("utf-8")
stream.write(
("Content-Length: %s\r\n\r\n" % (len(as_bytes))).encode("ascii")
)
stream.write(as_bytes)
stream.flush()
except:
log.exception("Error writing message.")
finally:
log.debug("Exit reader thread.")
def writer_thread(stream, queue, debug_prefix="write", update_ids_to_dap=False):
"""
Same as writer_thread_no_auto_seq but sets the message 'seq' automatically.
"""
_next_seq = partial(next, itertools.count())
try:
while True:
to_write = queue.get()
if to_write is STOP_WRITER_THREAD:
log.debug("STOP_WRITER_THREAD")
stream.close()
break
if isinstance(to_write, dict):
to_write["seq"] = _next_seq()
try:
to_write = json.dumps(to_write)
except:
log.exception("Error serializing %s to json.", to_write)
continue
else:
to_json = getattr(to_write, "to_json", None)
if to_json is not None:
# Some protocol message
to_write.seq = _next_seq()
try:
to_write = to_json(update_ids_to_dap=update_ids_to_dap)
except:
log.exception("Error serializing %s to json.", to_write)
continue
if get_log_level() > 1:
log.debug(debug_prefix + ": %s\n", to_write)
if to_write.__class__ == bytes:
as_bytes = to_write
else:
as_bytes = to_write.encode("utf-8")
stream.write(
("Content-Length: %s\r\n\r\n" % (len(as_bytes))).encode("ascii")
)
stream.write(as_bytes)
stream.flush()
except ConnectionResetError:
pass # No need to log this
except:
log.exception("Error writing message.")
finally:
log.debug("Exit reader thread.") | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/vendored/robocorp_ls_core/debug_adapter_core/debug_adapter_threads.py | 0.650356 | 0.162347 | debug_adapter_threads.py | pypi |
import json
import itertools
from functools import partial
from robocorp_ls_core.robotframework_log import get_logger
log = get_logger(__name__)
class BaseSchema(object):
type: str
seq: int
@staticmethod
def initialize_ids_translation():
BaseSchema._dap_id_to_obj_id = {0: 0, None: None}
BaseSchema._obj_id_to_dap_id = {0: 0, None: None}
BaseSchema._next_dap_id = partial(next, itertools.count(1))
def to_json(self, update_ids_to_dap=False):
return json.dumps(self.to_dict(update_ids_to_dap=update_ids_to_dap))
def to_dict(self, update_ids_to_dap=False) -> dict:
raise NotImplementedError("Must be overridden.")
@staticmethod
def _translate_id_to_dap(obj_id):
if obj_id == "*":
return "*"
# Note: we don't invalidate ids, so, if some object starts using the same id
# of another object, the same id will be used.
dap_id = BaseSchema._obj_id_to_dap_id.get(obj_id)
if dap_id is None:
dap_id = BaseSchema._obj_id_to_dap_id[obj_id] = BaseSchema._next_dap_id()
BaseSchema._dap_id_to_obj_id[dap_id] = obj_id
return dap_id
@staticmethod
def _translate_id_from_dap(dap_id):
if dap_id == "*":
return "*"
try:
return BaseSchema._dap_id_to_obj_id[dap_id]
except:
raise KeyError("Wrong ID sent from the client: %s" % (dap_id,))
@staticmethod
def update_dict_ids_to_dap(dct):
return dct
@staticmethod
def update_dict_ids_from_dap(dct):
return dct
BaseSchema.initialize_ids_translation()
_requests_to_types = {}
_responses_to_types = {}
_event_to_types = {}
_all_messages = {}
def register(cls):
_all_messages[cls.__name__] = cls
return cls
def register_request(command):
def do_register(cls):
_requests_to_types[command] = cls
return cls
return do_register
def register_response(command):
def do_register(cls):
_responses_to_types[command] = cls
return cls
return do_register
def register_event(event):
def do_register(cls):
_event_to_types[event] = cls
return cls
return do_register
def from_dict(dct, update_ids_from_dap=False, cls=None):
msg_type = dct.get("type")
if msg_type is None:
raise ValueError("Unable to make sense of message: %s" % (dct,))
if cls is None:
if msg_type == "request":
to_type = _requests_to_types
use = dct["command"]
elif msg_type == "response":
to_type = _responses_to_types
use = dct["command"]
else:
to_type = _event_to_types
use = dct["event"]
cls = to_type.get(use)
if cls is None:
raise ValueError(
"Unable to create message from dict: %s. %s not in %s"
% (dct, use, sorted(to_type.keys()))
)
try:
return cls(update_ids_from_dap=update_ids_from_dap, **dct)
except:
msg = "Error creating %s from %s" % (cls, dct)
log.exception(msg)
raise
def from_json(json_msg, update_ids_from_dap=False, on_dict_loaded=lambda dct: None):
if isinstance(json_msg, bytes):
json_msg = json_msg.decode("utf-8")
as_dict = json.loads(json_msg)
on_dict_loaded(as_dict)
try:
return from_dict(as_dict, update_ids_from_dap=update_ids_from_dap)
except:
if as_dict.get("type") == "response" and not as_dict.get("success"):
# Error messages may not have required body (return as a generic Response).
Response = _all_messages["Response"]
return Response(**as_dict)
else:
raise
def get_response_class(request):
if request.__class__ == dict:
return _responses_to_types[request["command"]]
return _responses_to_types[request.command]
def build_response(request, kwargs=None):
if kwargs is None:
kwargs = {"success": True}
else:
if "success" not in kwargs:
kwargs["success"] = True
response_class = _responses_to_types[request.command]
kwargs.setdefault("seq", -1) # To be overwritten before sending
return response_class(command=request.command, request_seq=request.seq, **kwargs) | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/vendored/robocorp_ls_core/debug_adapter_core/dap/dap_base_schema.py | 0.458591 | 0.15428 | dap_base_schema.py | pypi |
def is_variable_to_translate(cls_name, var_name):
if var_name in ("variablesReference", "frameId", "threadId"):
return True
if cls_name == "StackFrame" and var_name == "id":
# It's frameId everywhere except on StackFrame.
return True
if cls_name == "Thread" and var_name == "id":
# It's threadId everywhere except on Thread.
return True
return False
def _get_noqa_for_var(prop_name):
return (
" # noqa (assign to builtin)"
if prop_name in ("type", "format", "id", "hex", "breakpoint", "filter")
else ""
)
class _OrderedSet(object):
# Not a good ordered set (just something to be small without adding any deps)
def __init__(self, initial_contents=None):
self._contents = []
self._contents_as_set = set()
if initial_contents is not None:
for x in initial_contents:
self.add(x)
def add(self, x):
if x not in self._contents_as_set:
self._contents_as_set.add(x)
self._contents.append(x)
def discard(self, x):
if x in self._contents_as_set:
self._contents_as_set.remove(x)
self._contents.remove(x)
def copy(self):
return _OrderedSet(self._contents)
def update(self, contents):
for x in contents:
self.add(x)
def __iter__(self):
return iter(self._contents)
def __contains__(self, item):
return item in self._contents_as_set
def __len__(self):
return len(self._contents)
def set_repr(self):
if len(self) == 0:
return "set()"
lst = [repr(x) for x in self]
return "set([" + ", ".join(lst) + "])"
class Ref(object):
def __init__(self, ref, ref_data):
self.ref = ref
self.ref_data = ref_data
def __str__(self):
return self.ref
def load_schema_data():
import os.path
import json
json_file = os.path.join(os.path.dirname(__file__), "debugProtocol.json")
if not os.path.exists(json_file):
import requests
req = requests.get(
"https://raw.githubusercontent.com/microsoft/debug-adapter-protocol/gh-pages/debugAdapterProtocol.json"
)
assert req.status_code == 200
with open(json_file, "wb") as stream:
stream.write(req.content)
with open(json_file, "rb") as json_contents:
json_schema_data = json.loads(json_contents.read())
return json_schema_data
def load_custom_schema_data():
import os.path
import json
json_file = os.path.join(os.path.dirname(__file__), "debugProtocolCustom.json")
with open(json_file, "rb") as json_contents:
json_schema_data = json.loads(json_contents.read())
return json_schema_data
def create_classes_to_generate_structure(json_schema_data):
definitions = json_schema_data["definitions"]
class_to_generatees = {}
for name, definition in definitions.items():
all_of = definition.get("allOf")
description = definition.get("description")
is_enum = definition.get("type") == "string" and "enum" in definition
enum_values = None
if is_enum:
enum_values = definition["enum"]
properties = {}
properties.update(definition.get("properties", {}))
required = _OrderedSet(definition.get("required", _OrderedSet()))
base_definitions = []
if all_of is not None:
for definition in all_of:
ref = definition.get("$ref")
if ref is not None:
assert ref.startswith("#/definitions/")
ref = ref[len("#/definitions/") :]
base_definitions.append(ref)
else:
if not description:
description = definition.get("description")
properties.update(definition.get("properties", {}))
required.update(
_OrderedSet(definition.get("required", _OrderedSet()))
)
if isinstance(description, (list, tuple)):
description = "\n".join(description)
if (
name == "ModulesRequest"
): # Hack to accept modules request without arguments (ptvsd: 2050).
required.discard("arguments")
class_to_generatees[name] = dict(
name=name,
properties=properties,
base_definitions=base_definitions,
description=description,
required=required,
is_enum=is_enum,
enum_values=enum_values,
)
return class_to_generatees
def collect_bases(curr_class, classes_to_generate, memo=None):
ret = []
if memo is None:
memo = {}
base_definitions = curr_class["base_definitions"]
for base_definition in base_definitions:
if base_definition not in memo:
ret.append(base_definition)
ret.extend(
collect_bases(
classes_to_generate[base_definition], classes_to_generate, memo
)
)
return ret
def fill_properties_and_required_from_base(classes_to_generate):
# Now, resolve properties based on refs
for class_to_generate in classes_to_generate.values():
dct = {}
s = _OrderedSet()
for base_definition in reversed(
collect_bases(class_to_generate, classes_to_generate)
):
# Note: go from base to current so that the initial order of the properties has that
# same order.
dct.update(classes_to_generate[base_definition].get("properties", {}))
s.update(
classes_to_generate[base_definition].get("required", _OrderedSet())
)
dct.update(class_to_generate["properties"])
class_to_generate["properties"] = dct
s.update(class_to_generate["required"])
class_to_generate["required"] = s
return class_to_generate
def update_class_to_generate_description(class_to_generate):
import textwrap
description = class_to_generate["description"]
lines = []
for line in description.splitlines():
wrapped = textwrap.wrap(line.strip(), 100)
lines.extend(wrapped)
lines.append("")
while lines and lines[-1] == "":
lines = lines[:-1]
class_to_generate["description"] = " " + ("\n ".join(lines))
def update_class_to_generate_type(classes_to_generate, class_to_generate):
properties = class_to_generate.get("properties")
for _prop_name, prop_val in properties.items():
prop_type = prop_val.get("type", "")
if not prop_type:
prop_type = prop_val.pop("$ref", "")
if prop_type:
assert prop_type.startswith("#/definitions/")
prop_type = prop_type[len("#/definitions/") :]
prop_val["type"] = Ref(prop_type, classes_to_generate[prop_type])
def update_class_to_generate_register_dec(classes_to_generate, class_to_generate):
# Default
class_to_generate["register_request"] = ""
class_to_generate["register_dec"] = "@register"
properties = class_to_generate.get("properties")
enum_type = properties.get("type", {}).get("enum")
command = None
event = None
if (
enum_type
and len(enum_type) == 1
and next(iter(enum_type)) in ("request", "response", "event")
):
msg_type = next(iter(enum_type))
if msg_type == "response":
# The actual command is typed in the request
response_name = class_to_generate["name"]
request_name = response_name[: -len("Response")] + "Request"
if request_name in classes_to_generate:
command = classes_to_generate[request_name]["properties"].get("command")
else:
if response_name == "ErrorResponse":
command = {"enum": ["error"]}
else:
raise AssertionError("Unhandled: %s" % (response_name,))
elif msg_type == "request":
command = properties.get("command")
elif msg_type == "event":
command = properties.get("event")
else:
raise AssertionError("Unexpected condition.")
if command:
enum = command.get("enum")
if enum and len(enum) == 1:
class_to_generate["register_request"] = "@register_%s(%r)\n" % (
msg_type,
enum[0],
)
def extract_prop_name_and_prop(class_to_generate):
properties = class_to_generate.get("properties")
required = _OrderedSet(class_to_generate.get("required", _OrderedSet()))
# Sort so that required come first
prop_name_and_prop = list(properties.items())
def compute_sort_key(x):
key = x[0]
if key in required:
if key == "seq":
return 0.5 # seq when required is after the other required keys (to have a default of -1).
return 0
return 1
prop_name_and_prop.sort(key=compute_sort_key)
return prop_name_and_prop
def update_class_to_generate_to_json(class_to_generate):
required = _OrderedSet(class_to_generate.get("required", _OrderedSet()))
prop_name_and_prop = extract_prop_name_and_prop(class_to_generate)
to_dict_body = [
"def to_dict(self, update_ids_to_dap=False): # noqa (update_ids_to_dap may be unused)"
]
translate_prop_names = []
for prop_name, prop in prop_name_and_prop:
if is_variable_to_translate(class_to_generate["name"], prop_name):
translate_prop_names.append(prop_name)
for prop_name, prop in prop_name_and_prop:
namespace = dict(prop_name=prop_name, noqa=_get_noqa_for_var(prop_name))
to_dict_body.append(
" %(prop_name)s = self.%(prop_name)s%(noqa)s" % namespace
)
if prop.get("type") == "array":
to_dict_body.append(
' if %(prop_name)s and hasattr(%(prop_name)s[0], "to_dict"):'
% namespace
)
to_dict_body.append(
" %(prop_name)s = [x.to_dict() for x in %(prop_name)s]"
% namespace
)
if translate_prop_names:
to_dict_body.append(" if update_ids_to_dap:")
for prop_name in translate_prop_names:
namespace = dict(prop_name=prop_name, noqa=_get_noqa_for_var(prop_name))
to_dict_body.append(" if %(prop_name)s is not None:" % namespace)
to_dict_body.append(
" %(prop_name)s = self._translate_id_to_dap(%(prop_name)s)%(noqa)s"
% namespace
)
if not translate_prop_names:
update_dict_ids_from_dap_body = []
else:
update_dict_ids_from_dap_body = [
"",
"",
"@classmethod",
"def update_dict_ids_from_dap(cls, dct):",
]
for prop_name in translate_prop_names:
namespace = dict(prop_name=prop_name)
update_dict_ids_from_dap_body.append(
" if %(prop_name)r in dct:" % namespace
)
update_dict_ids_from_dap_body.append(
" dct[%(prop_name)r] = cls._translate_id_from_dap(dct[%(prop_name)r])"
% namespace
)
update_dict_ids_from_dap_body.append(" return dct")
class_to_generate["update_dict_ids_from_dap"] = _indent_lines(
"\n".join(update_dict_ids_from_dap_body)
)
to_dict_body.append(" dct = {")
first_not_required = False
for prop_name, prop in prop_name_and_prop:
use_to_dict = prop["type"].__class__ == Ref and not prop["type"].ref_data.get(
"is_enum", False
)
is_array = prop["type"] == "array"
ref_array_cls_name = ""
if is_array:
ref = prop["items"].get("$ref")
if ref is not None:
ref_array_cls_name = ref.split("/")[-1]
namespace = dict(prop_name=prop_name, ref_array_cls_name=ref_array_cls_name)
if prop_name in required:
if use_to_dict:
to_dict_body.append(
" %(prop_name)r: %(prop_name)s.to_dict(update_ids_to_dap=update_ids_to_dap),"
% namespace
)
else:
if ref_array_cls_name:
to_dict_body.append(
" %(prop_name)r: [%(ref_array_cls_name)s.update_dict_ids_to_dap(o) for o in %(prop_name)s] if (update_ids_to_dap and %(prop_name)s) else %(prop_name)s,"
% namespace
)
else:
to_dict_body.append(
" %(prop_name)r: %(prop_name)s," % namespace
)
else:
if not first_not_required:
first_not_required = True
to_dict_body.append(" }")
to_dict_body.append(" if %(prop_name)s is not None:" % namespace)
if use_to_dict:
to_dict_body.append(
" dct[%(prop_name)r] = %(prop_name)s.to_dict(update_ids_to_dap=update_ids_to_dap)"
% namespace
)
else:
if ref_array_cls_name:
to_dict_body.append(
" dct[%(prop_name)r] = [%(ref_array_cls_name)s.update_dict_ids_to_dap(o) for o in %(prop_name)s] if (update_ids_to_dap and %(prop_name)s) else %(prop_name)s"
% namespace
)
else:
to_dict_body.append(
" dct[%(prop_name)r] = %(prop_name)s" % namespace
)
if not first_not_required:
first_not_required = True
to_dict_body.append(" }")
to_dict_body.append(" dct.update(self.kwargs)")
to_dict_body.append(" return dct")
class_to_generate["to_dict"] = _indent_lines("\n".join(to_dict_body))
if not translate_prop_names:
update_dict_ids_to_dap_body = []
else:
update_dict_ids_to_dap_body = [
"",
"",
"@classmethod",
"def update_dict_ids_to_dap(cls, dct):",
]
for prop_name in translate_prop_names:
namespace = dict(prop_name=prop_name)
update_dict_ids_to_dap_body.append(
" if %(prop_name)r in dct:" % namespace
)
update_dict_ids_to_dap_body.append(
" dct[%(prop_name)r] = cls._translate_id_to_dap(dct[%(prop_name)r])"
% namespace
)
update_dict_ids_to_dap_body.append(" return dct")
class_to_generate["update_dict_ids_to_dap"] = _indent_lines(
"\n".join(update_dict_ids_to_dap_body)
)
def update_class_to_generate_init(class_to_generate):
args = []
init_body = []
docstring = []
required = _OrderedSet(class_to_generate.get("required", _OrderedSet()))
prop_name_and_prop = extract_prop_name_and_prop(class_to_generate)
translate_prop_names = []
for prop_name, prop in prop_name_and_prop:
if is_variable_to_translate(class_to_generate["name"], prop_name):
translate_prop_names.append(prop_name)
enum = prop.get("enum")
if enum and len(enum) == 1:
init_body.append(
" self.%(prop_name)s = %(enum)r"
% dict(prop_name=prop_name, enum=next(iter(enum)))
)
else:
if prop_name in required:
if prop_name == "seq":
args.append(prop_name + "=-1")
else:
args.append(prop_name)
else:
args.append(prop_name + "=None")
if prop["type"].__class__ == Ref:
ref = prop["type"]
ref_data = ref.ref_data
if ref_data.get("is_enum", False):
init_body.append(
" assert %s in %s.VALID_VALUES" % (prop_name, str(ref))
)
init_body.append(
" self.%(prop_name)s = %(prop_name)s"
% dict(prop_name=prop_name)
)
else:
namespace = dict(prop_name=prop_name, ref_name=str(ref))
init_body.append(" if %(prop_name)s is None:" % namespace)
init_body.append(
" self.%(prop_name)s = %(ref_name)s()" % namespace
)
init_body.append(" else:")
init_body.append(
" self.%(prop_name)s = %(ref_name)s(update_ids_from_dap=update_ids_from_dap, **%(prop_name)s) if %(prop_name)s.__class__ != %(ref_name)s else %(prop_name)s"
% namespace
)
else:
init_body.append(
" self.%(prop_name)s = %(prop_name)s" % dict(prop_name=prop_name)
)
if prop["type"] == "array":
ref = prop["items"].get("$ref")
if ref is not None:
ref_array_cls_name = ref.split("/")[-1]
init_body.append(
" if update_ids_from_dap and self.%(prop_name)s:"
% dict(prop_name=prop_name)
)
init_body.append(
" for o in self.%(prop_name)s:"
% dict(prop_name=prop_name)
)
init_body.append(
" %(ref_array_cls_name)s.update_dict_ids_from_dap(o)"
% dict(ref_array_cls_name=ref_array_cls_name)
)
prop_type = prop["type"]
prop_description = prop.get("description", "")
if isinstance(prop_description, (list, tuple)):
prop_description = "\n ".join(prop_description)
docstring.append(
":param %(prop_type)s %(prop_name)s: %(prop_description)s"
% dict(
prop_type=prop_type,
prop_name=prop_name,
prop_description=prop_description,
)
)
if translate_prop_names:
init_body.append(" if update_ids_from_dap:")
for prop_name in translate_prop_names:
init_body.append(
" self.%(prop_name)s = self._translate_id_from_dap(self.%(prop_name)s)"
% dict(prop_name=prop_name)
)
docstring = _indent_lines("\n".join(docstring))
init_body = "\n".join(init_body)
# Actually bundle the whole __init__ from the parts.
args = ", ".join(args)
if args:
args = ", " + args
# Note: added kwargs because some messages are expected to be extended by the user (so, we'll actually
# make all extendable so that we don't have to worry about which ones -- we loose a little on typing,
# but may be better than doing a whitelist based on something only pointed out in the documentation).
class_to_generate[
"init"
] = '''def __init__(self%(args)s, update_ids_from_dap=False, **kwargs): # noqa (update_ids_from_dap may be unused)
"""
%(docstring)s
"""
%(init_body)s
self.kwargs = kwargs
''' % dict(
args=args, init_body=init_body, docstring=docstring
)
class_to_generate["init"] = _indent_lines(class_to_generate["init"])
def update_class_to_generate_props(class_to_generate):
import json
def default(o):
if isinstance(o, Ref):
return o.ref
raise AssertionError("Unhandled: %s" % (o,))
properties = class_to_generate["properties"]
class_to_generate["props"] = (
" __props__ = %s"
% _indent_lines(json.dumps(properties, indent=4, default=default)).strip()
)
def update_class_to_generate_refs(class_to_generate):
properties = class_to_generate["properties"]
class_to_generate["refs"] = (
" __refs__ = %s"
% _OrderedSet(
key for (key, val) in properties.items() if val["type"].__class__ == Ref
).set_repr()
)
def update_class_to_generate_enums(class_to_generate):
class_to_generate["enums"] = ""
if class_to_generate.get("is_enum", False):
enums = ""
for enum in class_to_generate["enum_values"]:
enums += " %s = %r\n" % (enum.upper(), enum)
enums += "\n"
enums += (
" VALID_VALUES = %s\n\n"
% _OrderedSet(class_to_generate["enum_values"]).set_repr()
)
class_to_generate["enums"] = enums
def update_class_to_generate_objects(classes_to_generate, class_to_generate):
properties = class_to_generate["properties"]
for key, val in properties.items():
if val["type"] == "object":
create_new = val.copy()
create_new.update(
{
"name": "%s%s" % (class_to_generate["name"], key.title()),
"description": ' "%s" of %s' % (key, class_to_generate["name"]),
}
)
if "properties" not in create_new:
create_new["properties"] = {}
assert create_new["name"] not in classes_to_generate
classes_to_generate[create_new["name"]] = create_new
update_class_to_generate_type(classes_to_generate, create_new)
update_class_to_generate_props(create_new)
# Update nested object types
update_class_to_generate_objects(classes_to_generate, create_new)
val["type"] = Ref(
create_new["name"], classes_to_generate[create_new["name"]]
)
val.pop("properties", None)
def gen_debugger_protocol():
import os.path
import sys
if sys.version_info[:2] < (3, 6):
raise AssertionError(
"Must be run with Python 3.6 onwards (to keep dict order)."
)
classes_to_generate = create_classes_to_generate_structure(load_schema_data())
classes_to_generate.update(
create_classes_to_generate_structure(load_custom_schema_data())
)
class_to_generate = fill_properties_and_required_from_base(classes_to_generate)
for class_to_generate in list(classes_to_generate.values()):
update_class_to_generate_description(class_to_generate)
update_class_to_generate_type(classes_to_generate, class_to_generate)
update_class_to_generate_props(class_to_generate)
update_class_to_generate_objects(classes_to_generate, class_to_generate)
for class_to_generate in classes_to_generate.values():
update_class_to_generate_refs(class_to_generate)
update_class_to_generate_init(class_to_generate)
update_class_to_generate_enums(class_to_generate)
update_class_to_generate_to_json(class_to_generate)
update_class_to_generate_register_dec(classes_to_generate, class_to_generate)
class_template = '''
%(register_request)s%(register_dec)s
class %(name)s(BaseSchema):
"""
%(description)s
Note: automatically generated code. Do not edit manually.
"""
%(enums)s%(props)s
%(refs)s
__slots__ = list(__props__.keys()) + ['kwargs']
%(init)s%(update_dict_ids_from_dap)s
%(to_dict)s%(update_dict_ids_to_dap)s
'''
contents = []
contents.append("# coding: utf-8")
contents.append("# fmt: off")
contents.append("# Automatically generated code.")
contents.append("# Do not edit manually.")
contents.append("# Generated by running: %s" % os.path.basename(__file__))
contents.append(
"from .dap_base_schema import BaseSchema, register, register_request, register_response, register_event"
)
contents.append("")
for class_to_generate in classes_to_generate.values():
contents.append(class_template % class_to_generate)
contents.append("# fmt: on")
contents.append("")
parent_dir = os.path.dirname(__file__)
schema = os.path.join(parent_dir, "dap_schema.py")
with open(schema, "w", encoding="utf-8") as stream:
stream.write("\n".join(contents))
def _indent_lines(lines, indent=" "):
out_lines = []
for line in lines.splitlines(keepends=True):
out_lines.append(indent + line)
return "".join(out_lines)
if __name__ == "__main__":
gen_debugger_protocol() | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/vendored/robocorp_ls_core/debug_adapter_core/dap/__main__gen_debug_adapter_protocol.py | 0.415847 | 0.219881 | __main__gen_debug_adapter_protocol.py | pypi |
from datetime import datetime
from struct import unpack, calcsize
from pytz.tzinfo import StaticTzInfo, DstTzInfo, memorized_ttinfo
from pytz.tzinfo import memorized_datetime, memorized_timedelta
def _byte_string(s):
"""Cast a string or byte string to an ASCII byte string."""
return s.encode('ASCII')
_NULL = _byte_string('\0')
def _std_string(s):
"""Cast a string or byte string to an ASCII string."""
return str(s.decode('ASCII'))
def build_tzinfo(zone, fp):
head_fmt = '>4s c 15x 6l'
head_size = calcsize(head_fmt)
(magic, format, ttisgmtcnt, ttisstdcnt, leapcnt, timecnt,
typecnt, charcnt) = unpack(head_fmt, fp.read(head_size))
# Make sure it is a tzfile(5) file
assert magic == _byte_string('TZif'), 'Got magic %s' % repr(magic)
# Read out the transition times, localtime indices and ttinfo structures.
data_fmt = '>%(timecnt)dl %(timecnt)dB %(ttinfo)s %(charcnt)ds' % dict(
timecnt=timecnt, ttinfo='lBB' * typecnt, charcnt=charcnt)
data_size = calcsize(data_fmt)
data = unpack(data_fmt, fp.read(data_size))
# make sure we unpacked the right number of values
assert len(data) == 2 * timecnt + 3 * typecnt + 1
transitions = [memorized_datetime(trans)
for trans in data[:timecnt]]
lindexes = list(data[timecnt:2 * timecnt])
ttinfo_raw = data[2 * timecnt:-1]
tznames_raw = data[-1]
del data
# Process ttinfo into separate structs
ttinfo = []
tznames = {}
i = 0
while i < len(ttinfo_raw):
# have we looked up this timezone name yet?
tzname_offset = ttinfo_raw[i + 2]
if tzname_offset not in tznames:
nul = tznames_raw.find(_NULL, tzname_offset)
if nul < 0:
nul = len(tznames_raw)
tznames[tzname_offset] = _std_string(
tznames_raw[tzname_offset:nul])
ttinfo.append((ttinfo_raw[i],
bool(ttinfo_raw[i + 1]),
tznames[tzname_offset]))
i += 3
# Now build the timezone object
if len(ttinfo) == 1 or len(transitions) == 0:
ttinfo[0][0], ttinfo[0][2]
cls = type(zone, (StaticTzInfo,), dict(
zone=zone,
_utcoffset=memorized_timedelta(ttinfo[0][0]),
_tzname=ttinfo[0][2]))
else:
# Early dates use the first standard time ttinfo
i = 0
while ttinfo[i][1]:
i += 1
if ttinfo[i] == ttinfo[lindexes[0]]:
transitions[0] = datetime.min
else:
transitions.insert(0, datetime.min)
lindexes.insert(0, i)
# calculate transition info
transition_info = []
for i in range(len(transitions)):
inf = ttinfo[lindexes[i]]
utcoffset = inf[0]
if not inf[1]:
dst = 0
else:
for j in range(i - 1, -1, -1):
prev_inf = ttinfo[lindexes[j]]
if not prev_inf[1]:
break
dst = inf[0] - prev_inf[0] # dst offset
# Bad dst? Look further. DST > 24 hours happens when
# a timzone has moved across the international dateline.
if dst <= 0 or dst > 3600 * 3:
for j in range(i + 1, len(transitions)):
stdinf = ttinfo[lindexes[j]]
if not stdinf[1]:
dst = inf[0] - stdinf[0]
if dst > 0:
break # Found a useful std time.
tzname = inf[2]
# Round utcoffset and dst to the nearest minute or the
# datetime library will complain. Conversions to these timezones
# might be up to plus or minus 30 seconds out, but it is
# the best we can do.
utcoffset = int((utcoffset + 30) // 60) * 60
dst = int((dst + 30) // 60) * 60
transition_info.append(memorized_ttinfo(utcoffset, dst, tzname))
cls = type(zone, (DstTzInfo,), dict(
zone=zone,
_utc_transition_times=transitions,
_transition_info=transition_info))
return cls()
if __name__ == '__main__':
import os.path
from pprint import pprint
base = os.path.join(os.path.dirname(__file__), 'zoneinfo')
tz = build_tzinfo('Australia/Melbourne',
open(os.path.join(base, 'Australia', 'Melbourne'), 'rb'))
tz = build_tzinfo('US/Eastern',
open(os.path.join(base, 'US', 'Eastern'), 'rb'))
pprint(tz._utc_transition_times) | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/vendored/robocorp_ls_core/libs/robocop_lib/pytz/tzfile.py | 0.667906 | 0.343865 | tzfile.py | pypi |
from datetime import tzinfo, timedelta, datetime
from pytz import HOUR, ZERO, UTC
__all__ = [
'FixedOffset',
'LocalTimezone',
'USTimeZone',
'Eastern',
'Central',
'Mountain',
'Pacific',
'UTC'
]
# A class building tzinfo objects for fixed-offset time zones.
# Note that FixedOffset(0, "UTC") is a different way to build a
# UTC tzinfo object.
class FixedOffset(tzinfo):
"""Fixed offset in minutes east from UTC."""
def __init__(self, offset, name):
self.__offset = timedelta(minutes=offset)
self.__name = name
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return ZERO
import time as _time
STDOFFSET = timedelta(seconds=-_time.timezone)
if _time.daylight:
DSTOFFSET = timedelta(seconds=-_time.altzone)
else:
DSTOFFSET = STDOFFSET
DSTDIFF = DSTOFFSET - STDOFFSET
# A class capturing the platform's idea of local time.
class LocalTimezone(tzinfo):
def utcoffset(self, dt):
if self._isdst(dt):
return DSTOFFSET
else:
return STDOFFSET
def dst(self, dt):
if self._isdst(dt):
return DSTDIFF
else:
return ZERO
def tzname(self, dt):
return _time.tzname[self._isdst(dt)]
def _isdst(self, dt):
tt = (dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second,
dt.weekday(), 0, -1)
stamp = _time.mktime(tt)
tt = _time.localtime(stamp)
return tt.tm_isdst > 0
Local = LocalTimezone()
def first_sunday_on_or_after(dt):
days_to_go = 6 - dt.weekday()
if days_to_go:
dt += timedelta(days_to_go)
return dt
# In the US, DST starts at 2am (standard time) on the first Sunday in April.
DSTSTART = datetime(1, 4, 1, 2)
# and ends at 2am (DST time; 1am standard time) on the last Sunday of Oct.
# which is the first Sunday on or after Oct 25.
DSTEND = datetime(1, 10, 25, 1)
# A complete implementation of current DST rules for major US time zones.
class USTimeZone(tzinfo):
def __init__(self, hours, reprname, stdname, dstname):
self.stdoffset = timedelta(hours=hours)
self.reprname = reprname
self.stdname = stdname
self.dstname = dstname
def __repr__(self):
return self.reprname
def tzname(self, dt):
if self.dst(dt):
return self.dstname
else:
return self.stdname
def utcoffset(self, dt):
return self.stdoffset + self.dst(dt)
def dst(self, dt):
if dt is None or dt.tzinfo is None:
# An exception may be sensible here, in one or both cases.
# It depends on how you want to treat them. The default
# fromutc() implementation (called by the default astimezone()
# implementation) passes a datetime with dt.tzinfo is self.
return ZERO
assert dt.tzinfo is self
# Find first Sunday in April & the last in October.
start = first_sunday_on_or_after(DSTSTART.replace(year=dt.year))
end = first_sunday_on_or_after(DSTEND.replace(year=dt.year))
# Can't compare naive to aware objects, so strip the timezone from
# dt first.
if start <= dt.replace(tzinfo=None) < end:
return HOUR
else:
return ZERO
Eastern = USTimeZone(-5, "Eastern", "EST", "EDT")
Central = USTimeZone(-6, "Central", "CST", "CDT")
Mountain = USTimeZone(-7, "Mountain", "MST", "MDT")
Pacific = USTimeZone(-8, "Pacific", "PST", "PDT") | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/vendored/robocorp_ls_core/libs/robocop_lib/pytz/reference.py | 0.822795 | 0.321061 | reference.py | pypi |
import dataclasses
import re
import warnings
from typing import (
Any,
AnyStr,
Iterable,
Iterator,
Match as MatchHint,
Optional,
Pattern as PatternHint,
Tuple,
Union)
class Pattern(object):
"""
The :class:`Pattern` class is the abstract definition of a pattern.
"""
# Make the class dict-less.
__slots__ = ('include',)
def __init__(self, include: Optional[bool]) -> None:
"""
Initializes the :class:`Pattern` instance.
*include* (:class:`bool` or :data:`None`) is whether the matched
files should be included (:data:`True`), excluded (:data:`False`),
or is a null-operation (:data:`None`).
"""
self.include = include
"""
*include* (:class:`bool` or :data:`None`) is whether the matched
files should be included (:data:`True`), excluded (:data:`False`),
or is a null-operation (:data:`None`).
"""
def match(self, files: Iterable[str]) -> Iterator[str]:
"""
DEPRECATED: This method is no longer used and has been replaced by
:meth:`.match_file`. Use the :meth:`.match_file` method with a loop
for similar results.
Matches this pattern against the specified files.
*files* (:class:`~collections.abc.Iterable` of :class:`str`)
contains each file relative to the root directory (e.g.,
:data:`"relative/path/to/file"`).
Returns an :class:`~collections.abc.Iterable` yielding each matched
file path (:class:`str`).
"""
warnings.warn((
"{0.__module__}.{0.__qualname__}.match() is deprecated. Use "
"{0.__module__}.{0.__qualname__}.match_file() with a loop for "
"similar results."
).format(self.__class__), DeprecationWarning, stacklevel=2)
for file in files:
if self.match_file(file) is not None:
yield file
def match_file(self, file: str) -> Optional[Any]:
"""
Matches this pattern against the specified file.
*file* (:class:`str`) is the normalized file path to match against.
Returns the match result if *file* matched; otherwise, :data:`None`.
"""
raise NotImplementedError((
"{0.__module__}.{0.__qualname__} must override match_file()."
).format(self.__class__))
class RegexPattern(Pattern):
"""
The :class:`RegexPattern` class is an implementation of a pattern
using regular expressions.
"""
# Keep the class dict-less.
__slots__ = ('regex',)
def __init__(
self,
pattern: Union[AnyStr, PatternHint],
include: Optional[bool] = None,
) -> None:
"""
Initializes the :class:`RegexPattern` instance.
*pattern* (:class:`str`, :class:`bytes`, :class:`re.Pattern`, or
:data:`None`) is the pattern to compile into a regular expression.
*include* (:class:`bool` or :data:`None`) must be :data:`None`
unless *pattern* is a precompiled regular expression (:class:`re.Pattern`)
in which case it is whether matched files should be included
(:data:`True`), excluded (:data:`False`), or is a null operation
(:data:`None`).
.. NOTE:: Subclasses do not need to support the *include*
parameter.
"""
if isinstance(pattern, (str, bytes)):
assert include is None, (
"include:{!r} must be null when pattern:{!r} is a string."
).format(include, pattern)
regex, include = self.pattern_to_regex(pattern)
# NOTE: Make sure to allow a null regular expression to be
# returned for a null-operation.
if include is not None:
regex = re.compile(regex)
elif pattern is not None and hasattr(pattern, 'match'):
# Assume pattern is a precompiled regular expression.
# - NOTE: Used specified *include*.
regex = pattern
elif pattern is None:
# NOTE: Make sure to allow a null pattern to be passed for a
# null-operation.
assert include is None, (
"include:{!r} must be null when pattern:{!r} is null."
).format(include, pattern)
else:
raise TypeError("pattern:{!r} is not a string, re.Pattern, or None.".format(pattern))
super(RegexPattern, self).__init__(include)
self.regex: PatternHint = regex
"""
*regex* (:class:`re.Pattern`) is the regular expression for the
pattern.
"""
def __eq__(self, other: 'RegexPattern') -> bool:
"""
Tests the equality of this regex pattern with *other* (:class:`RegexPattern`)
by comparing their :attr:`~Pattern.include` and :attr:`~RegexPattern.regex`
attributes.
"""
if isinstance(other, RegexPattern):
return self.include == other.include and self.regex == other.regex
else:
return NotImplemented
def match_file(self, file: str) -> Optional['RegexMatchResult']:
"""
Matches this pattern against the specified file.
*file* (:class:`str`)
contains each file relative to the root directory (e.g., "relative/path/to/file").
Returns the match result (:class:`RegexMatchResult`) if *file*
matched; otherwise, :data:`None`.
"""
if self.include is not None:
match = self.regex.match(file)
if match is not None:
return RegexMatchResult(match)
return None
@classmethod
def pattern_to_regex(cls, pattern: str) -> Tuple[str, bool]:
"""
Convert the pattern into an uncompiled regular expression.
*pattern* (:class:`str`) is the pattern to convert into a regular
expression.
Returns the uncompiled regular expression (:class:`str` or :data:`None`),
and whether matched files should be included (:data:`True`),
excluded (:data:`False`), or is a null-operation (:data:`None`).
.. NOTE:: The default implementation simply returns *pattern* and
:data:`True`.
"""
return pattern, True
@dataclasses.dataclass()
class RegexMatchResult(object):
"""
The :class:`RegexMatchResult` data class is used to return information
about the matched regular expression.
"""
# Keep the class dict-less.
__slots__ = (
'match',
)
match: MatchHint
"""
*match* (:class:`re.Match`) is the regex match result.
""" | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/vendored/robocorp_ls_core/libs/robocop_lib/pathspec/pattern.py | 0.849628 | 0.371365 | pattern.py | pypi |
import sys
from collections.abc import (
Collection as CollectionType)
from itertools import (
zip_longest)
from os import (
PathLike)
from typing import (
AnyStr,
Callable,
Collection,
Iterable,
Iterator,
Optional,
Type,
TypeVar,
Union)
from . import util
from .pattern import (
Pattern)
from .util import (
StrPath,
TreeEntry,
_filter_patterns,
_is_iterable,
match_file,
normalize_file)
Self = TypeVar("Self", bound="PathSpec")
"""
:class:`PathSpec` self type hint to support Python v<3.11 using PEP 673
recommendation.
"""
class PathSpec(object):
"""
The :class:`PathSpec` class is a wrapper around a list of compiled
:class:`.Pattern` instances.
"""
def __init__(self, patterns: Iterable[Pattern]) -> None:
"""
Initializes the :class:`PathSpec` instance.
*patterns* (:class:`~collections.abc.Collection` or :class:`~collections.abc.Iterable`)
yields each compiled pattern (:class:`.Pattern`).
"""
self.patterns = patterns if isinstance(patterns, CollectionType) else list(patterns)
"""
*patterns* (:class:`~collections.abc.Collection` of :class:`.Pattern`)
contains the compiled patterns.
"""
def __eq__(self, other: object) -> bool:
"""
Tests the equality of this path-spec with *other* (:class:`PathSpec`)
by comparing their :attr:`~PathSpec.patterns` attributes.
"""
if isinstance(other, PathSpec):
paired_patterns = zip_longest(self.patterns, other.patterns)
return all(a == b for a, b in paired_patterns)
else:
return NotImplemented
def __len__(self) -> int:
"""
Returns the number of compiled patterns this path-spec contains
(:class:`int`).
"""
return len(self.patterns)
def __add__(self: Self, other: "PathSpec") -> Self:
"""
Combines the :attr:`Pathspec.patterns` patterns from two
:class:`PathSpec` instances.
"""
if isinstance(other, PathSpec):
return self.__class__(self.patterns + other.patterns)
else:
return NotImplemented
def __iadd__(self: Self, other: "PathSpec") -> Self:
"""
Adds the :attr:`Pathspec.patterns` patterns from one :class:`PathSpec`
instance to this instance.
"""
if isinstance(other, PathSpec):
self.patterns += other.patterns
return self
else:
return NotImplemented
@classmethod
def from_lines(
cls: Type[Self],
pattern_factory: Union[str, Callable[[AnyStr], Pattern]],
lines: Iterable[AnyStr],
) -> Self:
"""
Compiles the pattern lines.
*pattern_factory* can be either the name of a registered pattern
factory (:class:`str`), or a :class:`~collections.abc.Callable` used
to compile patterns. It must accept an uncompiled pattern (:class:`str`)
and return the compiled pattern (:class:`.Pattern`).
*lines* (:class:`~collections.abc.Iterable`) yields each uncompiled
pattern (:class:`str`). This simply has to yield each line so it can
be a :class:`io.TextIOBase` (e.g., from :func:`open` or
:class:`io.StringIO`) or the result from :meth:`str.splitlines`.
Returns the :class:`PathSpec` instance.
"""
if isinstance(pattern_factory, str):
pattern_factory = util.lookup_pattern(pattern_factory)
if not callable(pattern_factory):
raise TypeError(f"pattern_factory:{pattern_factory!r} is not callable.")
if not _is_iterable(lines):
raise TypeError(f"lines:{lines!r} is not an iterable.")
patterns = [pattern_factory(line) for line in lines if line]
return cls(patterns)
def match_entries(
self,
entries: Iterable[TreeEntry],
separators: Optional[Collection[str]] = None,
) -> Iterator[TreeEntry]:
"""
Matches the entries to this path-spec.
*entries* (:class:`~collections.abc.Iterable` of :class:`~util.TreeEntry`)
contains the entries to be matched against :attr:`self.patterns <PathSpec.patterns>`.
*separators* (:class:`~collections.abc.Collection` of :class:`str`;
or :data:`None`) optionally contains the path separators to
normalize. See :func:`~pathspec.util.normalize_file` for more
information.
Returns the matched entries (:class:`~collections.abc.Iterator` of
:class:`~util.TreeEntry`).
"""
if not _is_iterable(entries):
raise TypeError(f"entries:{entries!r} is not an iterable.")
use_patterns = _filter_patterns(self.patterns)
for entry in entries:
norm_file = normalize_file(entry.path, separators)
if self._match_file(use_patterns, norm_file):
yield entry
# Match files using the `match_file()` utility function. Subclasses
# may override this method as an instance method. It does not have to
# be a static method.
_match_file = staticmethod(match_file)
def match_file(
self,
file: StrPath,
separators: Optional[Collection[str]] = None,
) -> bool:
"""
Matches the file to this path-spec.
*file* (:class:`str` or :class:`os.PathLike[str]`) is the file path to be
matched against :attr:`self.patterns <PathSpec.patterns>`.
*separators* (:class:`~collections.abc.Collection` of :class:`str`)
optionally contains the path separators to normalize. See
:func:`~pathspec.util.normalize_file` for more information.
Returns :data:`True` if *file* matched; otherwise, :data:`False`.
"""
norm_file = util.normalize_file(file, separators=separators)
return self._match_file(self.patterns, norm_file)
def match_files(
self,
files: Iterable[StrPath],
separators: Optional[Collection[str]] = None,
) -> Iterator[StrPath]:
"""
Matches the files to this path-spec.
*files* (:class:`~collections.abc.Iterable` of :class:`str` or
:class:`os.PathLike[str]`) contains the file paths to be matched against
:attr:`self.patterns <PathSpec.patterns>`.
*separators* (:class:`~collections.abc.Collection` of :class:`str`;
or :data:`None`) optionally contains the path separators to
normalize. See :func:`~pathspec.util.normalize_file` for more
information.
Returns the matched files (:class:`~collections.abc.Iterator` of
:class:`str` or :class:`os.PathLike[str]`).
"""
if not _is_iterable(files):
raise TypeError(f"files:{files!r} is not an iterable.")
use_patterns = _filter_patterns(self.patterns)
for orig_file in files:
norm_file = normalize_file(orig_file, separators)
if self._match_file(use_patterns, norm_file):
yield orig_file
def match_tree_entries(
self,
root: StrPath,
on_error: Optional[Callable] = None,
follow_links: Optional[bool] = None,
) -> Iterator[TreeEntry]:
"""
Walks the specified root path for all files and matches them to this
path-spec.
*root* (:class:`str` or :class:`os.PathLike[str]`) is the root directory
to search.
*on_error* (:class:`~collections.abc.Callable` or :data:`None`)
optionally is the error handler for file-system exceptions. See
:func:`~pathspec.util.iter_tree_entries` for more information.
*follow_links* (:class:`bool` or :data:`None`) optionally is whether
to walk symbolic links that resolve to directories. See
:func:`~pathspec.util.iter_tree_files` for more information.
Returns the matched files (:class:`~collections.abc.Iterator` of
:class:`.TreeEntry`).
"""
entries = util.iter_tree_entries(root, on_error=on_error, follow_links=follow_links)
yield from self.match_entries(entries)
def match_tree_files(
self,
root: StrPath,
on_error: Optional[Callable] = None,
follow_links: Optional[bool] = None,
) -> Iterator[str]:
"""
Walks the specified root path for all files and matches them to this
path-spec.
*root* (:class:`str` or :class:`os.PathLike[str]`) is the root directory
to search for files.
*on_error* (:class:`~collections.abc.Callable` or :data:`None`)
optionally is the error handler for file-system exceptions. See
:func:`~pathspec.util.iter_tree_files` for more information.
*follow_links* (:class:`bool` or :data:`None`) optionally is whether
to walk symbolic links that resolve to directories. See
:func:`~pathspec.util.iter_tree_files` for more information.
Returns the matched files (:class:`~collections.abc.Iterable` of
:class:`str`).
"""
files = util.iter_tree_files(root, on_error=on_error, follow_links=follow_links)
yield from self.match_files(files)
# Alias `match_tree_files()` as `match_tree()` for backward
# compatibility before v0.3.2.
match_tree = match_tree_files | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/vendored/robocorp_ls_core/libs/robocop_lib/pathspec/pathspec.py | 0.714329 | 0.402568 | pathspec.py | pypi |
from typing import (
AnyStr,
Callable,
Collection,
Iterable,
Type,
TypeVar,
Union)
from .pathspec import (
PathSpec)
from .pattern import (
Pattern)
from .patterns.gitwildmatch import (
GitWildMatchPattern,
GitWildMatchPatternError,
_DIR_MARK)
from .util import (
_is_iterable)
Self = TypeVar("Self", bound="GitIgnoreSpec")
"""
:class:`GitIgnoreSpec` self type hint to support Python v<3.11 using PEP
673 recommendation.
"""
class GitIgnoreSpec(PathSpec):
"""
The :class:`GitIgnoreSpec` class extends :class:`PathSpec` to
replicate *.gitignore* behavior.
"""
def __eq__(self, other: object) -> bool:
"""
Tests the equality of this gitignore-spec with *other*
(:class:`GitIgnoreSpec`) by comparing their :attr:`~PathSpec.patterns`
attributes. A non-:class:`GitIgnoreSpec` will not compare equal.
"""
if isinstance(other, GitIgnoreSpec):
return super().__eq__(other)
elif isinstance(other, PathSpec):
return False
else:
return NotImplemented
@classmethod
def from_lines(
cls: Type[Self],
lines: Iterable[AnyStr],
pattern_factory: Union[str, Callable[[AnyStr], Pattern], None] = None,
) -> Self:
"""
Compiles the pattern lines.
*lines* (:class:`~collections.abc.Iterable`) yields each uncompiled
pattern (:class:`str`). This simply has to yield each line so it can
be a :class:`io.TextIOBase` (e.g., from :func:`open` or
:class:`io.StringIO`) or the result from :meth:`str.splitlines`.
*pattern_factory* can be :data:`None`, the name of a registered
pattern factory (:class:`str`), or a :class:`~collections.abc.Callable`
used to compile patterns. The callable must accept an uncompiled
pattern (:class:`str`) and return the compiled pattern (:class:`.Pattern`).
Default is :data:`None` for :class:`.GitWildMatchPattern`).
Returns the :class:`GitIgnoreSpec` instance.
"""
if pattern_factory is None:
pattern_factory = GitWildMatchPattern
elif (isinstance(lines, str) or callable(lines)) and _is_iterable(pattern_factory):
# Support reversed order of arguments from PathSpec.
pattern_factory, lines = lines, pattern_factory
self = super().from_lines(pattern_factory, lines)
return self # type: ignore
@staticmethod
def _match_file(
patterns: Collection[GitWildMatchPattern],
file: str,
) -> bool:
"""
Matches the file to the patterns.
.. NOTE:: Subclasses of :class:`.PathSpec` may override this
method as an instance method. It does not have to be a static
method.
*patterns* (:class:`~collections.abc.Iterable` of :class:`~pathspec.pattern.Pattern`)
contains the patterns to use.
*file* (:class:`str`) is the normalized file path to be matched
against *patterns*.
Returns :data:`True` if *file* matched; otherwise, :data:`False`.
"""
out_matched = False
out_priority = 0
for pattern in patterns:
if pattern.include is not None:
match = pattern.match_file(file)
if match is not None:
# Pattern matched.
# Check for directory marker.
try:
dir_mark = match.match.group(_DIR_MARK)
except IndexError as e:
# NOTICE: The exact content of this error message is subject
# to change.
raise GitWildMatchPatternError((
f"Invalid git pattern: directory marker regex group is missing. "
f"Debug: file={file!r} regex={pattern.regex!r} "
f"group={_DIR_MARK!r} match={match.match!r}."
)) from e
if dir_mark:
# Pattern matched by a directory pattern.
priority = 1
else:
# Pattern matched by a file pattern.
priority = 2
if pattern.include and dir_mark:
out_matched = pattern.include
out_priority = priority
elif priority >= out_priority:
out_matched = pattern.include
out_priority = priority
return out_matched | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/vendored/robocorp_ls_core/libs/robocop_lib/pathspec/gitignore.py | 0.831896 | 0.298901 | gitignore.py | pypi |
import functools
import re
import string
import sys
import typing as t
if t.TYPE_CHECKING:
import typing_extensions as te
class HasHTML(te.Protocol):
def __html__(self) -> str:
pass
_P = te.ParamSpec("_P")
__version__ = "2.1.3"
_strip_comments_re = re.compile(r"<!--.*?-->", re.DOTALL)
_strip_tags_re = re.compile(r"<.*?>", re.DOTALL)
def _simple_escaping_wrapper(func: "t.Callable[_P, str]") -> "t.Callable[_P, Markup]":
@functools.wraps(func)
def wrapped(self: "Markup", *args: "_P.args", **kwargs: "_P.kwargs") -> "Markup":
arg_list = _escape_argspec(list(args), enumerate(args), self.escape)
_escape_argspec(kwargs, kwargs.items(), self.escape)
return self.__class__(func(self, *arg_list, **kwargs)) # type: ignore[arg-type]
return wrapped # type: ignore[return-value]
class Markup(str):
"""A string that is ready to be safely inserted into an HTML or XML
document, either because it was escaped or because it was marked
safe.
Passing an object to the constructor converts it to text and wraps
it to mark it safe without escaping. To escape the text, use the
:meth:`escape` class method instead.
>>> Markup("Hello, <em>World</em>!")
Markup('Hello, <em>World</em>!')
>>> Markup(42)
Markup('42')
>>> Markup.escape("Hello, <em>World</em>!")
Markup('Hello <em>World</em>!')
This implements the ``__html__()`` interface that some frameworks
use. Passing an object that implements ``__html__()`` will wrap the
output of that method, marking it safe.
>>> class Foo:
... def __html__(self):
... return '<a href="/foo">foo</a>'
...
>>> Markup(Foo())
Markup('<a href="/foo">foo</a>')
This is a subclass of :class:`str`. It has the same methods, but
escapes their arguments and returns a ``Markup`` instance.
>>> Markup("<em>%s</em>") % ("foo & bar",)
Markup('<em>foo & bar</em>')
>>> Markup("<em>Hello</em> ") + "<foo>"
Markup('<em>Hello</em> <foo>')
"""
__slots__ = ()
def __new__(
cls, base: t.Any = "", encoding: t.Optional[str] = None, errors: str = "strict"
) -> "te.Self":
if hasattr(base, "__html__"):
base = base.__html__()
if encoding is None:
return super().__new__(cls, base)
return super().__new__(cls, base, encoding, errors)
def __html__(self) -> "te.Self":
return self
def __add__(self, other: t.Union[str, "HasHTML"]) -> "te.Self":
if isinstance(other, str) or hasattr(other, "__html__"):
return self.__class__(super().__add__(self.escape(other)))
return NotImplemented
def __radd__(self, other: t.Union[str, "HasHTML"]) -> "te.Self":
if isinstance(other, str) or hasattr(other, "__html__"):
return self.escape(other).__add__(self)
return NotImplemented
def __mul__(self, num: "te.SupportsIndex") -> "te.Self":
if isinstance(num, int):
return self.__class__(super().__mul__(num))
return NotImplemented
__rmul__ = __mul__
def __mod__(self, arg: t.Any) -> "te.Self":
if isinstance(arg, tuple):
# a tuple of arguments, each wrapped
arg = tuple(_MarkupEscapeHelper(x, self.escape) for x in arg)
elif hasattr(type(arg), "__getitem__") and not isinstance(arg, str):
# a mapping of arguments, wrapped
arg = _MarkupEscapeHelper(arg, self.escape)
else:
# a single argument, wrapped with the helper and a tuple
arg = (_MarkupEscapeHelper(arg, self.escape),)
return self.__class__(super().__mod__(arg))
def __repr__(self) -> str:
return f"{self.__class__.__name__}({super().__repr__()})"
def join(self, seq: t.Iterable[t.Union[str, "HasHTML"]]) -> "te.Self":
return self.__class__(super().join(map(self.escape, seq)))
join.__doc__ = str.join.__doc__
def split( # type: ignore[override]
self, sep: t.Optional[str] = None, maxsplit: int = -1
) -> t.List["te.Self"]:
return [self.__class__(v) for v in super().split(sep, maxsplit)]
split.__doc__ = str.split.__doc__
def rsplit( # type: ignore[override]
self, sep: t.Optional[str] = None, maxsplit: int = -1
) -> t.List["te.Self"]:
return [self.__class__(v) for v in super().rsplit(sep, maxsplit)]
rsplit.__doc__ = str.rsplit.__doc__
def splitlines( # type: ignore[override]
self, keepends: bool = False
) -> t.List["te.Self"]:
return [self.__class__(v) for v in super().splitlines(keepends)]
splitlines.__doc__ = str.splitlines.__doc__
def unescape(self) -> str:
"""Convert escaped markup back into a text string. This replaces
HTML entities with the characters they represent.
>>> Markup("Main » <em>About</em>").unescape()
'Main » <em>About</em>'
"""
from html import unescape
return unescape(str(self))
def striptags(self) -> str:
""":meth:`unescape` the markup, remove tags, and normalize
whitespace to single spaces.
>>> Markup("Main »\t<em>About</em>").striptags()
'Main » About'
"""
# Use two regexes to avoid ambiguous matches.
value = _strip_comments_re.sub("", self)
value = _strip_tags_re.sub("", value)
value = " ".join(value.split())
return self.__class__(value).unescape()
@classmethod
def escape(cls, s: t.Any) -> "te.Self":
"""Escape a string. Calls :func:`escape` and ensures that for
subclasses the correct type is returned.
"""
rv = escape(s)
if rv.__class__ is not cls:
return cls(rv)
return rv # type: ignore[return-value]
__getitem__ = _simple_escaping_wrapper(str.__getitem__)
capitalize = _simple_escaping_wrapper(str.capitalize)
title = _simple_escaping_wrapper(str.title)
lower = _simple_escaping_wrapper(str.lower)
upper = _simple_escaping_wrapper(str.upper)
replace = _simple_escaping_wrapper(str.replace)
ljust = _simple_escaping_wrapper(str.ljust)
rjust = _simple_escaping_wrapper(str.rjust)
lstrip = _simple_escaping_wrapper(str.lstrip)
rstrip = _simple_escaping_wrapper(str.rstrip)
center = _simple_escaping_wrapper(str.center)
strip = _simple_escaping_wrapper(str.strip)
translate = _simple_escaping_wrapper(str.translate)
expandtabs = _simple_escaping_wrapper(str.expandtabs)
swapcase = _simple_escaping_wrapper(str.swapcase)
zfill = _simple_escaping_wrapper(str.zfill)
casefold = _simple_escaping_wrapper(str.casefold)
if sys.version_info >= (3, 9):
removeprefix = _simple_escaping_wrapper(str.removeprefix)
removesuffix = _simple_escaping_wrapper(str.removesuffix)
def partition(self, sep: str) -> t.Tuple["te.Self", "te.Self", "te.Self"]:
l, s, r = super().partition(self.escape(sep))
cls = self.__class__
return cls(l), cls(s), cls(r)
def rpartition(self, sep: str) -> t.Tuple["te.Self", "te.Self", "te.Self"]:
l, s, r = super().rpartition(self.escape(sep))
cls = self.__class__
return cls(l), cls(s), cls(r)
def format(self, *args: t.Any, **kwargs: t.Any) -> "te.Self":
formatter = EscapeFormatter(self.escape)
return self.__class__(formatter.vformat(self, args, kwargs))
def format_map( # type: ignore[override]
self, map: t.Mapping[str, t.Any]
) -> "te.Self":
formatter = EscapeFormatter(self.escape)
return self.__class__(formatter.vformat(self, (), map))
def __html_format__(self, format_spec: str) -> "te.Self":
if format_spec:
raise ValueError("Unsupported format specification for Markup.")
return self
class EscapeFormatter(string.Formatter):
__slots__ = ("escape",)
def __init__(self, escape: t.Callable[[t.Any], Markup]) -> None:
self.escape = escape
super().__init__()
def format_field(self, value: t.Any, format_spec: str) -> str:
if hasattr(value, "__html_format__"):
rv = value.__html_format__(format_spec)
elif hasattr(value, "__html__"):
if format_spec:
raise ValueError(
f"Format specifier {format_spec} given, but {type(value)} does not"
" define __html_format__. A class that defines __html__ must define"
" __html_format__ to work with format specifiers."
)
rv = value.__html__()
else:
# We need to make sure the format spec is str here as
# otherwise the wrong callback methods are invoked.
rv = string.Formatter.format_field(self, value, str(format_spec))
return str(self.escape(rv))
_ListOrDict = t.TypeVar("_ListOrDict", list, dict)
def _escape_argspec(
obj: _ListOrDict, iterable: t.Iterable[t.Any], escape: t.Callable[[t.Any], Markup]
) -> _ListOrDict:
"""Helper for various string-wrapped functions."""
for key, value in iterable:
if isinstance(value, str) or hasattr(value, "__html__"):
obj[key] = escape(value)
return obj
class _MarkupEscapeHelper:
"""Helper for :meth:`Markup.__mod__`."""
__slots__ = ("obj", "escape")
def __init__(self, obj: t.Any, escape: t.Callable[[t.Any], Markup]) -> None:
self.obj = obj
self.escape = escape
def __getitem__(self, item: t.Any) -> "te.Self":
return self.__class__(self.obj[item], self.escape)
def __str__(self) -> str:
return str(self.escape(self.obj))
def __repr__(self) -> str:
return str(self.escape(repr(self.obj)))
def __int__(self) -> int:
return int(self.obj)
def __float__(self) -> float:
return float(self.obj)
# circular import
try:
from ._speedups import escape as escape
from ._speedups import escape_silent as escape_silent
from ._speedups import soft_str as soft_str
except ImportError:
from ._native import escape as escape
from ._native import escape_silent as escape_silent # noqa: F401
from ._native import soft_str as soft_str # noqa: F401 | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/vendored/robocorp_ls_core/libs/robocop_lib/markupsafe/__init__.py | 0.563858 | 0.173691 | __init__.py | pypi |
import inspect
import json
import sys
from collections import OrderedDict, defaultdict
from datetime import datetime, timezone
from operator import itemgetter
from pathlib import Path
from timeit import default_timer as timer
from warnings import warn
import pytz
import robocop.exceptions
from robocop.rules import Message
from robocop.utils import RecommendationFinder
from robocop.version import __version__
class Report:
"""
Base class for report class.
Override `configure` method if you want to allow report configuration.
Override `add_message`` if your report processes the Robocop issues.
Set class attribute `DEFAULT` to `False` if you don't want your report to be included in `all` reports.
"""
DEFAULT = True
def configure(self, name, value):
raise robocop.exceptions.ConfigGeneralError(
f"Provided param '{name}' for report '{getattr(self, 'name')}' does not exist"
) # noqa
def add_message(self, *args):
pass
def load_reports():
"""
Load all valid reports.
Report is considered valid if it inherits from `Report` class
and contains both `name` and `description` attributes.
"""
reports = {}
classes = inspect.getmembers(sys.modules[__name__], inspect.isclass)
for report_class in classes:
if not issubclass(report_class[1], Report):
continue
report = report_class[1]()
if not hasattr(report, "name") or not hasattr(report, "description"):
continue
reports[report.name] = report
return reports
def is_report_default(report):
return getattr(report, "DEFAULT", False)
def get_reports(configured_reports):
"""
Returns dictionary with list of valid, enabled reports (listed in `configured_reports` set of str).
If `configured_reports` contains `all` then all default reports are enabled.
"""
reports = load_reports()
enabled_reports = OrderedDict()
for report in configured_reports:
if report == "all":
for name, report_class in reports.items():
if is_report_default(report_class) and name not in enabled_reports:
enabled_reports[name] = report_class
elif report not in reports:
raise robocop.exceptions.InvalidReportName(report, reports)
elif report not in enabled_reports:
enabled_reports[report] = reports[report]
return enabled_reports
def list_reports(reports):
"""Returns description of enabled reports."""
sorted_by_name = sorted(reports.values(), key=lambda x: x.name)
available_reports = "Available reports:\n"
available_reports += "\n".join(f"{report.name:20} - {report.description}" for report in sorted_by_name) + "\n"
available_reports += "all" + " " * 18 + "- Turns on all default reports"
return available_reports
class RulesByIdReport(Report):
"""
Report name: ``rules_by_id``
Report that groups linter rules messages by rule id and prints it ordered by most common message.
Example::
Issues by ID:
W0502 (too-little-calls-in-keyword) : 5
W0201 (missing-doc-keyword) : 4
E0401 (parsing-error) : 3
W0301 (not-allowed-char-in-name) : 2
W0901 (keyword-after-return) : 1
"""
def __init__(self):
self.name = "rules_by_id"
self.description = "Groups detected issues by rule id and prints it ordered by most common"
self.message_counter = defaultdict(int)
def add_message(self, message: Message): # noqa
self.message_counter[message.get_fullname()] += 1
def get_report(self) -> str:
message_counter_ordered = sorted(self.message_counter.items(), key=itemgetter(1), reverse=True)
report = "\nIssues by ID:\n"
if not message_counter_ordered:
report += "No issues found."
return report
longest_name = max(len(msg[0]) for msg in message_counter_ordered)
report += "\n".join(f"{message:{longest_name}} : {count}" for message, count in message_counter_ordered)
return report
class RulesBySeverityReport(Report):
"""
Report name: ``rules_by_error_type``
Report that groups linter rules messages by severity and prints total of issues per every severity level.
Example::
Found 15 issues: 4 ERRORs, 11 WARNINGs.
"""
def __init__(self):
self.name = "rules_by_error_type"
self.description = "Prints total number of issues grouped by severity"
self.severity_counter = defaultdict(int)
def add_message(self, message: Message):
self.severity_counter[message.severity] += 1
def get_report(self) -> str:
issues_count = sum(self.severity_counter.values())
if not issues_count:
return "\nFound 0 issues."
report = "\nFound 1 issue: " if issues_count == 1 else f"\nFound {issues_count} issues: "
warning_types = []
for severity, count in self.severity_counter.items():
plural = "" if count == 1 else "s"
warning_types.append(f"{count} {severity.name}{plural}")
report += ", ".join(warning_types)
report += "."
return report
class ReturnStatusReport(Report):
"""
Report name: ``return_status``
This report is always enabled.
Report that checks if number of returned rules messages for given severity value does not exceed preset threshold.
That information is later used as a return status from Robocop.
"""
def __init__(self):
self.name = "return_status"
self.description = "Checks if number of specific issues exceed quality gate limits"
self.return_status = 0
self.counter = RulesBySeverityReport()
self.quality_gate = {"E": 0, "W": 0, "I": -1}
def configure(self, name, value):
if name not in ["quality_gate", "quality_gates"]:
super().configure(name, value)
for val in value.split(":"):
try:
name, count = val.split("=", maxsplit=1)
if name.upper() in self.quality_gate:
self.quality_gate[name.upper()] = int(count)
except ValueError:
continue
def add_message(self, message: Message):
self.counter.add_message(message)
def get_report(self):
for severity, count in self.counter.severity_counter.items():
threshold = self.quality_gate.get(severity.value, 0)
if -1 < threshold < count:
self.return_status += count - threshold
self.return_status = min(self.return_status, 255)
class TimeTakenReport(Report):
"""
Report name: ``scan_timer``
Report that returns Robocop execution time
Example::
Scan finished in 0.054s.
"""
def __init__(self):
self.name = "scan_timer"
self.description = "Returns Robocop execution time"
self.start_time = timer()
def get_report(self) -> str:
return f"\nScan finished in {timer() - self.start_time:.3f}s."
class JsonReport(Report):
"""
Report name: ``json_report``
Report that returns list of found issues in JSON format.
"""
DEFAULT = False
def __init__(self):
self.name = "json_report"
self.description = "Accumulates found issues in JSON format"
self.issues = []
def add_message(self, message: Message):
self.issues.append(message.to_json())
def get_report(self):
return None
class FileStatsReport(Report):
"""
Report name: ``file_stats``
Report that displays overall statistics about number of processed files.
Example::
Processed 7 files from which 5 files contained issues.
"""
def __init__(self):
self.name = "file_stats"
self.description = "Prints overall statistics about number of processed files"
self.files_count = 0
self.files_with_issues = set()
def add_message(self, message: Message):
self.files_with_issues.add(message.source)
def get_report(self) -> str:
if not self.files_count:
return "\nNo files were processed."
plural_files = "s" if self.files_count > 1 else ""
if not self.files_with_issues:
return f"\nProcessed {self.files_count} file{plural_files} but no issues were found."
plural_files_with_issues = "" if len(self.files_with_issues) == 1 else "s"
return (
f"\nProcessed {self.files_count} file{plural_files} from which {len(self.files_with_issues)} "
f"file{plural_files_with_issues} contained issues."
)
class RobocopVersionReport(Report):
"""
Report name: ``version``
Report that returns Robocop version.
Example::
Report generated by Robocop version: 2.0.2
"""
def __init__(self):
self.name = "version"
self.description = "Returns Robocop version"
def get_report(self) -> str:
return f"\nReport generated by Robocop version: {__version__}"
class TimestampReport(Report):
"""
Report name: ``timestamp``
Report that returns Robocop execution timestamp.
Timestamp follows local time in format of
`Year-Month-Day Hours(24-hour clock):Minutes:Seconds ±hh:mm UTC offset` as default.
Example::
Reported: 2022-07-10 21:25:00 +0300
Both of default values, ``timezone`` and ``format`` can be configured by
``-c/--configure`` and ``timestamp:timezone:"<timezone name>"`` and/or ``timestamp:format:"<format string>"``::
robocop --configure timestamp:timezone:"Europe/Paris" --configure timestamp:format:"%Y-%m-%d %H:%M:%S %Z %z"
This yields following timestamp report::
Reported: 2022-07-10 20:38:10 CEST +0200
For timezone names,
see: https://en.wikipedia.org/wiki/List_of_tz_database_time_zones
For timestamp formats,
see: https://docs.python.org/3/library/datetime.html#strftime-and-strptime-format-codes
Useful configurations::
Local time to ISO 8601 format:
robocop --configure timestamp:format:"%Y-%m-%dT%H:%M:%S%z"
UTC time:
robocop --configure timestamp:timezone:"UTC" --configure timestamp:format:"%Y-%m-%dT%H:%M:%S %Z %z"
Timestamp with high precision:
robocop --configure timestamp:format:"%Y-%m-%dT%H:%M:%S.%f %z"
12-hour clock:
robocop --configure timestamp:format:"%Y-%m-%d %I:%M:%S %p %Z %z"
More human readable format 'On 10 July 2022 07:26:24 +0300':
robocop --configure timestamp:format:"On %d %B %Y %H:%M:%S %z"
"""
def __init__(self):
self.name = "timestamp"
self.description = "Returns Robocop execution timestamp."
self.timezone = "local"
self.format = "%Y-%m-%d %H:%M:%S %z"
def configure(self, name, value):
if name == "timezone":
self.timezone = value
elif name == "format":
if value:
self.format = value
else:
warn("Empty format string for `timestamp` report does not make sense. Default format used.")
else:
super().configure(name, value)
def get_report(self) -> str:
return f"\nReported: {self._get_timestamp()}"
def _get_timestamp(self) -> str:
try:
if self.timezone == "local":
timezone_code = datetime.now(timezone.utc).astimezone().tzinfo
else:
timezone_code = pytz.timezone(self.timezone)
return datetime.now(timezone_code).strftime(self.format)
except pytz.exceptions.UnknownTimeZoneError as err:
raise robocop.exceptions.ConfigGeneralError(
f"Provided timezone '{self.timezone}' for report '{getattr(self, 'name')}' is not valid. "
"Use timezone names like `Europe\\Helsinki`."
"See: https://en.wikipedia.org/wiki/List_of_tz_database_time_zone"
) from err # noqa
class SarifReport(Report):
"""
Report name: ``sarif``
Report that generates SARIF output file.
This report is not included in the default reports. The ``--reports all`` option will not enable this report.
You can still enable it using report name directly: ``--reports sarif`` or ``--reports all,sarif``.
All fields required by GitHub Code Scanning are supported. The output file will be generated
in the current working directory with the ``.sarif.json`` name.
You can configure output directory and report filename::
robocop --configure sarif:output_dir:C:/sarif_reports --configure sarif:report_filename:.sarif
"""
DEFAULT = False
SCHEMA_VERSION = "2.1.0"
SCHEMA = f"https://json.schemastore.org/sarif-{SCHEMA_VERSION}.json"
def __init__(self):
self.name = "sarif"
self.description = "Generate SARIF output file"
self.output_dir = None
self.report_filename = ".sarif.json"
self.issues = []
def configure(self, name, value):
if name == "output_dir":
self.output_dir = Path(value)
self.output_dir.mkdir(parents=True, exist_ok=True)
elif name == "report_filename":
self.report_filename = value
else:
super().configure(name, value)
@staticmethod
def map_severity_to_level(severity):
return {"WARNING": "warning", "ERROR": "error", "INFO": "note"}[severity.name]
def get_rule_desc(self, rule):
return {
"id": rule.rule_id,
"name": rule.name,
"helpUri": f"https://robocop.readthedocs.io/en/stable/rules.html#{rule.name}",
"shortDescription": {"text": rule.msg},
"fullDescription": {"text": rule.docs},
"defaultConfiguration": {"level": self.map_severity_to_level(rule.default_severity)},
"help": {"text": rule.docs, "markdown": rule.docs},
}
def add_message(self, message: Message):
self.issues.append(message)
def generate_sarif_issues(self, config):
sarif_issues = []
for issue in self.issues:
relative_uri = Path(issue.source).relative_to(config.root)
sarif_issue = {
"ruleId": issue.rule_id,
"level": self.map_severity_to_level(issue.severity),
"message": {"text": issue.desc},
"locations": [
{
"physicalLocation": {
"artifactLocation": {"uri": relative_uri.as_posix(), "uriBaseId": "%SRCROOT%"},
"region": {
"startLine": issue.line,
"endLine": issue.end_line,
"startColumn": issue.col,
"endColumn": issue.end_col,
},
}
}
],
}
sarif_issues.append(sarif_issue)
return sarif_issues
def generate_rules_config(self, rules):
unique_enabled_rules = {rule.rule_id: rule for rule in rules.values() if rule.enabled}
sorted_rules = sorted(unique_enabled_rules.values(), key=lambda x: x.rule_id)
rules_config = [self.get_rule_desc(rule) for rule in sorted_rules]
return rules_config
def generate_sarif_report(self, config, rules):
report = {
"$schema": self.SCHEMA,
"version": self.SCHEMA_VERSION,
"runs": [
{
"tool": {
"driver": {
"name": "Robocop",
"semanticVersion": __version__,
"informationUri": "https://robocop.readthedocs.io/",
"rules": self.generate_rules_config(rules),
}
},
"automationDetails": {"id": "robocop/"},
"results": self.generate_sarif_issues(config),
}
],
}
return report
def get_report(self, config, rules) -> str:
report = self.generate_sarif_report(config, rules)
if self.output_dir is not None:
output_path = self.output_dir / self.report_filename
else:
output_path = Path(self.report_filename)
with open(output_path, "w") as fp:
json_string = json.dumps(report, indent=4)
fp.write(json_string)
return f"Generated SARIF report in {output_path}" | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/vendored/robocorp_ls_core/libs/robocop_lib/robocop/reports.py | 0.656988 | 0.15109 | reports.py | pypi |
from functools import lru_cache
from pathlib import Path
from pathspec import PathSpec
from robocop.exceptions import FileError
DEFAULT_EXCLUDES = r"(\.direnv|\.eggs|\.git|\.hg|\.nox|\.tox|\.venv|venv|\.svn)"
def find_project_root(root, srcs):
"""
Find project root.
If not provided in ``root`` argument, the first parent directory containing either .git, .robocop or pyproject.toml
file in any of ``srcs`` paths will be root category.
If not found, returns the root of the file system.
"""
if root is not None:
return Path(root)
if not srcs:
return Path("/").resolve()
path_srcs = [Path(Path.cwd(), src).resolve() for src in srcs]
# A list of lists of parents for each 'src'. 'src' is included as a
# "parent" of itself if it is a directory
src_parents = [list(path.parents) + ([path] if path.is_dir() else []) for path in path_srcs]
common_base = max(
set.intersection(*(set(parents) for parents in src_parents)),
key=lambda path: path.parts,
)
for directory in (common_base, *common_base.parents):
if (
(directory / ".git").exists()
or (directory / "pyproject.toml").is_file()
or (directory / ".robocop").is_file()
):
return directory
return directory
def find_file_in_project_root(config_name, root):
for parent in (root, *root.parents):
if (parent / ".git").exists() or (parent / config_name).is_file():
return parent / config_name
return parent / config_name
@lru_cache()
def get_gitignore(root):
"""Return a PathSpec matching gitignore content if present."""
gitignore = root / ".gitignore"
lines = []
if gitignore.is_file():
with gitignore.open(encoding="utf-8") as gf:
lines = gf.readlines()
return PathSpec.from_lines("gitwildmatch", lines)
def get_files(config):
gitignore = get_gitignore(config.root)
for file in config.paths:
yield from get_absolute_path(Path(file), config, gitignore)
def get_absolute_path(path, config, gitignore):
if not path.exists():
raise FileError(path)
if config.is_path_ignored(path):
return
if gitignore is not None and gitignore.match_file(path):
return
if path.is_file():
if should_parse(config, path):
yield path.absolute()
elif path.is_dir():
for file in path.iterdir():
if file.is_dir() and not config.recursive:
continue
yield from get_absolute_path(
file,
config,
gitignore + get_gitignore(path) if gitignore is not None else None,
)
def should_parse(config, file):
"""Check if file extension is in list of supported file types (can be configured from cli)"""
return file.suffix and file.suffix.lower() in config.filetypes | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/vendored/robocorp_ls_core/libs/robocop_lib/robocop/files.py | 0.642657 | 0.313262 | files.py | pypi |
import robot.errors
class RobocopFatalError(ValueError):
pass
class ConfigGeneralError(RobocopFatalError):
pass
class InvalidExternalCheckerError(RobocopFatalError):
def __init__(self, path):
msg = f'Fatal error: Failed to load external rules from file "{path}". Verify if the file exists'
super().__init__(msg)
class FileError(RobocopFatalError):
def __init__(self, source):
msg = f'File "{source}" does not exist'
super().__init__(msg)
class ArgumentFileNotFoundError(RobocopFatalError):
def __init__(self, source):
msg = f'Argument file "{source}" does not exist'
super().__init__(msg)
class CircularArgumentFileError(RobocopFatalError):
def __init__(self, source):
msg = f'Circular argument file import in "{source}"'
super().__init__(msg)
class InvalidArgumentError(RobocopFatalError):
def __init__(self, msg):
super().__init__(f"Invalid configuration for Robocop:\n{msg}")
class RuleNotFoundError(RobocopFatalError):
def __init__(self, rule, checker):
super().__init__(
f"{checker.__class__.__name__} checker does not contain rule `{rule}`. "
f"Available rules: {', '.join(checker.rules.keys())}"
)
class RuleParamNotFoundError(RobocopFatalError):
def __init__(self, rule, param, checker):
super().__init__(
f"Rule `{rule.name}` in `{checker.__class__.__name__}` checker does not contain `{param}` param. "
f"Available params:\n {rule.available_configurables()[1]}"
)
class RuleParamFailedInitError(RobocopFatalError):
def __init__(self, param, value, err):
desc = f" Parameter info: {param.desc}" if param.desc else ""
super().__init__(
f"Failed to configure param `{param.name}` with value `{value}`. Received error `{err}`.\n"
f" Parameter type: {param.converter}\n" + desc
)
class RuleReportsNotFoundError(RobocopFatalError):
def __init__(self, rule, checker):
super().__init__(f"{checker.__class__.__name__} checker `reports` attribute contains unknown rule `{rule}`")
class InvalidReportName(ConfigGeneralError):
def __init__(self, report, reports):
from robocop.utils import RecommendationFinder
report_names = sorted(list(reports.keys()) + ["all"])
similar = RecommendationFinder().find_similar(report, report_names)
msg = f"Provided report '{report}' does not exist. {similar}"
super().__init__(msg)
class RobotFrameworkParsingError(Exception):
def __init__(self):
msg = (
"Fatal exception occurred when using Robot Framework parsing module. "
"Consider updating Robot Framework to recent stable version."
)
super().__init__(msg)
def handle_robot_errors(func):
"""
If the user uses older version of Robot Framework, it many fail while parsing the
source code due to bug that is already fixed in the more recent version.
"""
def wrap_errors(*args, **kwargs):
try:
return func(*args, **kwargs)
except robot.errors.DataError:
raise
except: # noqa
raise RobotFrameworkParsingError
return wrap_errors | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/vendored/robocorp_ls_core/libs/robocop_lib/robocop/exceptions.py | 0.746786 | 0.207135 | exceptions.py | pypi |
from collections import defaultdict
from robot.api import Token
from robocop.checkers import VisitorChecker
from robocop.rules import Rule, RuleSeverity
rules = {
"0601": Rule(
rule_id="0601",
name="tag-with-space",
msg="Tag '{{ tag }}' should not contain spaces",
severity=RuleSeverity.WARNING,
docs="""
Example of rule violation::
Test
[Tags] ${tag with space}
""",
),
"0602": Rule(
rule_id="0602",
name="tag-with-or-and",
msg="Tag '{{ tag }}' with reserved word OR/AND."
" Hint: make sure to include this tag using lowercase name to avoid issues",
severity=RuleSeverity.INFO,
docs="""
OR and AND words are used to combine tags when selecting tests to be run in Robot Framework. Using following
configuration::
robot --include tagANDtag2
Robot Framework will only execute tests that contain `tag` and `tag2`. That's why it's best to avoid AND and OR
in tag names. See
`docs <https://robotframework.org/robotframework/latest/RobotFrameworkUserGuide.html#tag-patterns>`_
for more information.
Tag matching is case-insensitive. If your tag contains OR or AND you can use lowercase to match it.
For example, if your tag is `PORT` you can match it with `port`.
""",
),
"0603": Rule(
rule_id="0603",
name="tag-with-reserved-word",
msg="Tag '{{ tag }}' prefixed with reserved word `robot:`",
severity=RuleSeverity.WARNING,
docs="""
This prefix is used by Robot Framework special tags. More details
`here <https://robotframework.org/robotframework/latest/RobotFrameworkUserGuide.html#reserved-tags>`_.
Special tags currently in use:
- robot:exit
- robot:no-dry-run
- robot:continue-on-failure
- robot:recursive-continue-on-failure
- robot:skip
- robot:skip-on-failure
- robot:stop-on-failure
- robot:exclude
- robot:private
""",
),
"0605": Rule(
rule_id="0605",
name="could-be-test-tags",
msg="All tests in suite share these tags: '{{ tags }}'. "
"You can define them in 'Test Tags' in suite settings instead",
severity=RuleSeverity.INFO,
docs="""
Example::
*** Test Cases ***
Test
[Tag] featureX smoke
Step
Test 2
[Tag] featureX
Step
In this example all tests share one common tag `featureX`. It can be declared just once using ``Test Tags``
or ``Task Tags``.
""",
),
"0606": Rule(
rule_id="0606",
name="tag-already-set-in-test-tags",
msg="Tag '{{ tag }}' is already set by {{ test_force_tags }} in suite settings",
severity=RuleSeverity.INFO,
docs="""
Avoid repeating same tags in tests when the tag is already declared in ``Test Tags`` or ``Force Tags``.
Example of rule violation::
*** Setting ***
Test Tags common-tag
*** Test Cases ***
Test
[Tag] sanity common-tag
""",
),
"0607": Rule(
rule_id="0607",
name="unnecessary-default-tags",
msg="Tags defined in Default Tags are always overwritten",
severity=RuleSeverity.INFO,
docs="""
Example of rule violation::
*** Settings ***
Default Tags tag1 tag2
*** Test Cases ***
Test
[Tags] tag3
Step
Test 2
[Tags] tag4
Step
Since `Test` and `Test 2` have `[Tags]` section, `Default Tags` setting is never used.
""",
),
"0608": Rule(
rule_id="0608",
name="empty-tags",
msg="[Tags] setting without values{{ optional_warning }}",
severity=RuleSeverity.WARNING,
docs="""
If you want to use empty `[Tags]` (for example to overwrite `Default Tags`) then use `NONE` value
to be explicit.
""",
),
"0609": Rule(
rule_id="0609",
name="duplicated-tags",
msg="Multiple tags with name '{{ name }}' (first occurrence at line {{ line }} column {{ column }})",
severity=RuleSeverity.WARNING,
docs="""
Tags are free text, but they are normalized so that they are converted to lowercase and all spaces are removed.
Only first tag is used, other occurrences are ignored.
Example of duplicated tags::
Test
[Tags] Tag TAG tag t a g
""",
),
}
class TagNameChecker(VisitorChecker):
"""Checker for tag names. It scans for tags with spaces or Robot Framework reserved words."""
reports = (
"tag-with-space",
"tag-with-or-and",
"tag-with-reserved-word",
"duplicated-tags",
)
is_keyword = False
reserved_tags = {
"robot:exit",
"robot:no-dry-run",
"robot:continue-on-failure",
"robot:recursive-continue-on-failure",
"robot:skip",
"robot:skip-on-failure",
"robot:stop-on-failure",
"robot:exclude",
"robot:private",
}
def visit_ForceTags(self, node): # noqa
self.check_tags(node)
visit_DefaultTags = visit_Tags = visit_KeywordTags = visit_ForceTags
def visit_Documentation(self, node): # noqa
if self.is_keyword:
*_, last_line = node.lines
filtered_line = filter(
lambda tag: tag.type not in Token.NON_DATA_TOKENS and tag.type != Token.DOCUMENTATION,
last_line,
)
tags = defaultdict(list)
for index, token in enumerate(filtered_line):
if index == 0 and token.value.lower() != "tags:":
break
token.value = token.value.rstrip(",")
normalized_tag = token.value.lower().replace(" ", "")
tags[normalized_tag].append(token)
self.check_tag(token, node)
self.check_duplicates(tags)
def visit_Keyword(self, node): # noqa
self.is_keyword = True
super().generic_visit(node)
self.is_keyword = False
def check_tags(self, node):
tags = defaultdict(list)
for tag in node.data_tokens[1:]:
normalized_tag = tag.value.lower().replace(" ", "")
tags[normalized_tag].append(tag)
self.check_tag(tag, node)
self.check_duplicates(tags)
def check_duplicates(self, tags):
for nodes in tags.values():
for duplicate in nodes[1:]:
self.report(
"duplicated-tags",
name=duplicate.value,
line=nodes[0].lineno,
column=nodes[0].col_offset + 1,
node=duplicate,
col=duplicate.col_offset + 1,
end_col=duplicate.end_col_offset + 1,
)
def check_tag(self, tag, node):
if " " in tag.value:
self.report(
"tag-with-space",
tag=tag.value,
node=node,
lineno=tag.lineno,
col=tag.col_offset + 1,
end_col=tag.end_col_offset + 1,
)
if "OR" in tag.value or "AND" in tag.value:
self.report("tag-with-or-and", tag=tag.value, node=node, lineno=tag.lineno, col=tag.col_offset + 1)
normalized = tag.value.lower()
if normalized.startswith("robot:") and normalized not in self.reserved_tags:
self.report(
"tag-with-reserved-word",
tag=tag.value,
node=node,
lineno=tag.lineno,
col=tag.col_offset + 1,
end_col=tag.end_col_offset,
)
class TagScopeChecker(VisitorChecker):
"""Checker for tag scopes."""
reports = (
"could-be-test-tags",
"tag-already-set-in-test-tags",
"unnecessary-default-tags",
"empty-tags",
)
def __init__(self):
self.tags = []
self.test_tags = set()
self.default_tags = set()
self.test_tags_node = None
self.default_tags_node = None
self.test_cases_count = 0
self.in_keywords = False
super().__init__()
def visit_File(self, node): # noqa
self.tags = []
self.test_tags = set()
self.default_tags = set()
self.test_cases_count = 0
self.test_tags_node = None
super().visit_File(node)
if not self.tags:
return
if len(self.tags) != self.test_cases_count:
return
if self.default_tags:
report_node = node if self.default_tags_node is None else self.default_tags_node
self.report(
"unnecessary-default-tags",
node=report_node,
col=report_node.col_offset + 1,
end_col=report_node.get_token(Token.DEFAULT_TAGS).end_col_offset + 1,
)
if self.test_cases_count < 2:
return
common_tags = set.intersection(*[set(tags) for tags in self.tags])
common_tags = common_tags - self.test_tags
if common_tags:
report_node = node if self.test_tags_node is None else self.test_tags_node
self.report(
"could-be-test-tags",
tags=", ".join(common_tags),
node=report_node,
)
def visit_KeywordSection(self, node): # noqa
self.in_keywords = True
self.generic_visit(node)
self.in_keywords = False
def visit_TestCase(self, node): # noqa
self.test_cases_count += 1
self.generic_visit(node)
def visit_ForceTags(self, node): # noqa
self.test_tags = {token.value for token in node.data_tokens[1:]}
self.test_tags_node = node
def visit_DefaultTags(self, node): # noqa
self.default_tags = {token.value for token in node.data_tokens[1:]}
self.default_tags_node = node
def visit_Tags(self, node): # noqa
if not node.values:
suffix = "" if self.in_keywords else ". Consider using NONE if you want to overwrite the Default Tags"
self.report(
"empty-tags",
optional_warning=suffix,
node=node,
col=node.data_tokens[0].col_offset + 1,
end_col=node.end_col_offset,
)
self.tags.append([tag.value for tag in node.data_tokens[1:]])
for tag in node.data_tokens[1:]:
if tag.value not in self.test_tags:
continue
test_force_tags = self.test_tags_node.data_tokens[0].value
self.report(
"tag-already-set-in-test-tags",
tag=tag.value,
test_force_tags=test_force_tags,
node=node,
lineno=tag.lineno,
col=tag.col_offset + 1,
) | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/vendored/robocorp_ls_core/libs/robocop_lib/robocop/checkers/tags.py | 0.774711 | 0.410225 | tags.py | pypi |
from collections import defaultdict
from robot.api import Token
from robocop.checkers import VisitorChecker
from robocop.rules import Rule, RuleParam, RuleSeverity
from robocop.utils import get_errors, normalize_robot_name, normalize_robot_var_name
def configure_sections_order(value):
section_map = {
"settings": Token.SETTING_HEADER,
"variables": Token.VARIABLE_HEADER,
"testcase": Token.TESTCASE_HEADER,
"testcases": Token.TESTCASE_HEADER,
"task": "TASK HEADER",
"tasks": "TASK HEADER",
"keyword": Token.KEYWORD_HEADER,
"keywords": Token.KEYWORD_HEADER,
}
sections_order = {}
for index, name in enumerate(value.split(",")):
if name.lower() not in section_map or section_map[name.lower()] in sections_order:
raise ValueError(f"Invalid section name: `{name}`")
sections_order[section_map[name.lower()]] = index
if Token.TESTCASE_HEADER in sections_order:
sections_order["TASK HEADER"] = sections_order[Token.TESTCASE_HEADER]
return sections_order
rules = {
"0801": Rule(
rule_id="0801",
name="duplicated-test-case",
msg="Multiple test cases with name '{{ name }}' (first occurrence in line {{ first_occurrence_line }})",
severity=RuleSeverity.ERROR,
docs="""
It is not allowed to reuse the same name of the test case within the same suite in Robot Framework.
Name matching is case-insensitive and ignores spaces and underscore characters.
Duplicated test cases example::
*** Test Cases ***
Test with name
No Operation
test_with Name # it is a duplicate of 'Test with name'
No Operation
""",
),
"0802": Rule(
rule_id="0802",
name="duplicated-keyword",
msg="Multiple keywords with name '{{ name }}' (first occurrence in line {{ first_occurrence_line }})",
severity=RuleSeverity.ERROR,
docs="""
Do not define keywords with the same name inside the same file. Name matching is case-insensitive and
ignores spaces and underscore characters.
Duplicated keyword names example::
*** Keywords ***
Keyword
No Operation
keyword
No Operation
K_eywor d
No Operation
""",
),
"0803": Rule(
rule_id="0803",
name="duplicated-variable",
msg="Multiple variables with name '{{ name }}' in Variables section (first occurrence in line "
"{{ first_occurrence_line }}). "
"Note that Robot Framework is case-insensitive",
severity=RuleSeverity.ERROR,
docs="""
Variable names in Robot Framework are case-insensitive and ignore spaces and underscores. Following variables
are duplicates::
*** Variables ***
${variable} 1
${VARIAble} a
@{variable} a b
${v ariabl e} c
${v_ariable} d
""",
),
"0804": Rule(
rule_id="0804",
name="duplicated-resource",
msg="Multiple resource imports with path '{{ name }}' (first occurrence in line {{ first_occurrence_line }})",
severity=RuleSeverity.WARNING,
),
"0805": Rule(
rule_id="0805",
name="duplicated-library",
msg="Multiple library imports with name '{{ name }}' and identical arguments (first occurrence in line "
"{{ first_occurrence_line }})",
severity=RuleSeverity.WARNING,
docs="""
If you need to reimport library use alias::
*** Settings ***
Library RobotLibrary
Library RobotLibrary AS OtherRobotLibrary
""",
),
"0806": Rule(
rule_id="0806",
name="duplicated-metadata",
msg="Duplicated metadata '{{ name }}' (first occurrence in line {{ first_occurrence_line }})",
severity=RuleSeverity.WARNING,
),
"0807": Rule(
rule_id="0807",
name="duplicated-variables-import",
msg="Duplicated variables import with path '{{ name }}' (first occurrence in line {{ first_occurrence_line }})",
severity=RuleSeverity.WARNING,
),
"0808": Rule(
rule_id="0808",
name="section-already-defined",
msg="'{{ section_name }}' section header already defined in file (first occurrence in line "
"{{ first_occurrence_line }})",
severity=RuleSeverity.WARNING,
docs="""
Duplicated section in the file. Robot Framework will handle repeated sections but it is recommended to not
duplicate them.
Example::
*** Test Cases ***
My Test
Keyword
*** Keywords ***
Keyword
No Operation
*** Test Cases *** # duplicate
Other Test
Keyword
""",
),
"0809": Rule(
RuleParam(
name="sections_order",
default="settings,variables,testcases,keywords",
converter=configure_sections_order,
show_type="str",
desc="order of sections in comma-separated list",
),
rule_id="0809",
name="section-out-of-order",
msg="'{{ section_name }}' section header is defined in wrong order: {{ recommended_order }}",
severity=RuleSeverity.WARNING,
docs="""
Sections should be defined in order set by `sections_order`
parameter (default: `settings,variables,testcases,keywords`).
To change the default order use following option::
robocop --configure section-out-of-order:sections_order:comma,separated,list,of,sections
where section should be case-insensitive name from the list: comments, settings, variables, testcases, keywords.
Order of not configured sections is ignored.
Example::
*** Settings ***
*** Keywords ***
*** Test Cases *** # it will report issue because Test Cases should be defined before Keywords
""",
),
"0810": Rule(
rule_id="0810",
name="both-tests-and-tasks",
msg="Both Task(s) and Test Case(s) section headers defined in file",
severity=RuleSeverity.ERROR,
docs="""
The file contains both Test Case and Task sections. Use only one of them. ::
*** Test Cases ***
*** Tasks ***
""",
),
"0811": Rule(
rule_id="0811",
name="duplicated-argument-name",
msg="Argument name '{{ argument_name }}' is already used",
severity=RuleSeverity.ERROR,
docs="""
Variable names in Robot Framework are case-insensitive and ignores spaces and underscores. Following arguments
are duplicates::
*** Keywords ***
Keyword
[Arguments] ${var} ${VAR} ${v_ar} ${v ar}
Other Keyword
""",
),
"0812": Rule(
rule_id="0812",
name="duplicated-assigned-var-name",
msg="Assigned variable name '{{ variable_name }}' is already used",
severity=RuleSeverity.INFO,
docs="""
Variable names in Robot Framework are case-insensitive and ignores spaces and underscores. Following variables
are duplicates::
*** Test Cases ***
Test
${var} ${VAR} ${v_ar} ${v ar} Keyword
""",
),
"0813": Rule(
rule_id="0813",
name="duplicated-setting",
msg="{{ error_msg }}",
severity=RuleSeverity.WARNING,
docs="""
Some settings can be used only once in a file. Only the first value is used.
Example::
*** Settings ***
Force Tags F1
Force Tags F2 # this setting will be ignored
""",
),
}
class DuplicationsChecker(VisitorChecker):
"""Checker for duplicated names."""
reports = (
"duplicated-test-case",
"duplicated-keyword",
"duplicated-variable",
"duplicated-resource",
"duplicated-library",
"duplicated-metadata",
"duplicated-variables-import",
"duplicated-argument-name",
"duplicated-assigned-var-name",
"duplicated-setting",
)
def __init__(self):
self.test_cases = defaultdict(list)
self.keywords = defaultdict(list)
self.variables = defaultdict(list)
self.resources = defaultdict(list)
self.libraries = defaultdict(list)
self.metadata = defaultdict(list)
self.variable_imports = defaultdict(list)
super().__init__()
def visit_File(self, node): # noqa
self.test_cases = defaultdict(list)
self.keywords = defaultdict(list)
self.variables = defaultdict(list)
self.resources = defaultdict(list)
self.libraries = defaultdict(list)
self.metadata = defaultdict(list)
self.variable_imports = defaultdict(list)
super().visit_File(node)
self.check_duplicates(self.test_cases, "duplicated-test-case")
self.check_duplicates(self.keywords, "duplicated-keyword")
self.check_duplicates(self.variables, "duplicated-variable")
self.check_duplicates(self.resources, "duplicated-resource", True)
self.check_duplicates(self.metadata, "duplicated-metadata", True)
self.check_duplicates(self.variable_imports, "duplicated-variables-import", True)
self.check_library_duplicates(self.libraries, "duplicated-library")
def check_duplicates(self, container, rule, underline_whole_line=False):
for nodes in container.values():
for duplicate in nodes[1:]:
if underline_whole_line:
end_col = duplicate.end_col_offset + 1
else:
end_col = duplicate.col_offset + len(duplicate.name) + 1
self.report(
rule, name=duplicate.name, first_occurrence_line=nodes[0].lineno, node=duplicate, end_col=end_col
)
def check_library_duplicates(self, container, rule):
for nodes in container.values():
for duplicate in nodes[1:]:
lib_token = duplicate.get_token(Token.NAME)
self.report(
rule,
name=duplicate.name,
first_occurrence_line=nodes[0].lineno,
node=duplicate,
col=lib_token.col_offset + 1,
end_col=lib_token.end_col_offset + 1,
)
def visit_TestCase(self, node): # noqa
testcase_name = normalize_robot_name(node.name)
self.test_cases[testcase_name].append(node)
self.generic_visit(node)
def visit_Keyword(self, node): # noqa
keyword_name = normalize_robot_name(node.name)
self.keywords[keyword_name].append(node)
self.generic_visit(node)
def visit_KeywordCall(self, node): # noqa
assign = node.get_tokens(Token.ASSIGN)
seen = set()
for var in assign:
name = normalize_robot_var_name(var.value)
if name in seen:
self.report(
"duplicated-assigned-var-name",
variable_name=var.value,
node=node,
lineno=var.lineno,
col=var.col_offset + 1,
end_col=var.col_offset + len(var.value) + 1,
)
else:
seen.add(name)
def visit_VariableSection(self, node): # noqa
self.generic_visit(node)
def visit_Variable(self, node): # noqa
if not node.name or get_errors(node):
return
var_name = normalize_robot_name(self.replace_chars(node.name, "${}@&"))
self.variables[var_name].append(node)
@staticmethod
def replace_chars(name, chars):
return "".join(c for c in name if c not in chars)
def visit_ResourceImport(self, node): # noqa
if node.name:
self.resources[node.name].append(node)
def visit_LibraryImport(self, node): # noqa
if not node.name:
return
lib_name = node.alias if node.alias else node.name
name_with_args = lib_name + "".join(token.value for token in node.get_tokens(Token.ARGUMENT))
self.libraries[name_with_args].append(node)
def visit_Metadata(self, node): # noqa
if node.name is not None:
self.metadata[node.name + node.value].append(node)
def visit_VariablesImport(self, node): # noqa
if not node.name:
return
# only python files can have arguments - covered in E0404 variables-import-with-args
if not node.name.endswith(".py") and node.get_token(Token.ARGUMENT):
return
name_with_args = node.name + "".join(token.value for token in node.data_tokens[2:])
self.variable_imports[name_with_args].append(node)
def visit_Arguments(self, node): # noqa
args = set()
for arg in node.get_tokens(Token.ARGUMENT):
orig, *_ = arg.value.split("=", maxsplit=1)
name = normalize_robot_var_name(orig)
if name in args:
self.report(
"duplicated-argument-name",
argument_name=orig,
node=node,
lineno=arg.lineno,
col=arg.col_offset + 1,
end_col=arg.col_offset + len(orig) + 1,
)
else:
args.add(name)
def visit_Error(self, node): # noqa
for error in get_errors(node):
if "is allowed only once" in error:
self.report(
"duplicated-setting", error_msg=error, node=node, end_col=node.data_tokens[0].end_col_offset
)
class SectionHeadersChecker(VisitorChecker):
"""Checker for duplicated or out of order section headers."""
reports = (
"section-already-defined",
"section-out-of-order",
"both-tests-and-tasks",
)
def __init__(self):
self.sections_by_order = []
self.sections_by_existence = dict()
super().__init__()
@staticmethod
def section_order_to_str(order):
by_index = sorted([(key, value) for key, value in order.items()], key=lambda x: x[1])
name_map = {
Token.SETTING_HEADER: "Settings",
Token.VARIABLE_HEADER: "Variables",
Token.TESTCASE_HEADER: "Test Cases / Tasks",
"TASK HEADER": "Test Cases / Tasks",
Token.KEYWORD_HEADER: "Keywords",
}
order_str = []
for name, _ in by_index:
mapped_name = name_map[name]
if mapped_name not in order_str:
order_str.append(mapped_name)
return " > ".join(order_str)
def visit_File(self, node): # noqa
self.sections_by_order = []
self.sections_by_existence = dict()
super().visit_File(node)
def visit_SectionHeader(self, node): # noqa
section_name = node.type
if section_name not in self.param("section-out-of-order", "sections_order"):
return
if section_name in (Token.TESTCASE_HEADER, "TASK HEADER"):
# a bit awkward implementation because before RF 6.0 task header used TESTCASE_HEADER type
if "task" in node.name.lower():
section_name = "TASK HEADER"
if Token.TESTCASE_HEADER in self.sections_by_existence:
self.report("both-tests-and-tasks", node=node, col=node.col_offset + 1, end_col=node.end_col_offset)
else:
if "TASK HEADER" in self.sections_by_existence:
self.report("both-tests-and-tasks", node=node, col=node.col_offset + 1, end_col=node.end_col_offset)
order_id = self.param("section-out-of-order", "sections_order")[section_name]
if section_name in self.sections_by_existence:
self.report(
"section-already-defined",
section_name=node.data_tokens[0].value,
first_occurrence_line=self.sections_by_existence[section_name],
node=node,
end_col=node.end_col_offset,
)
else:
self.sections_by_existence[section_name] = node.lineno
if any(previous_id > order_id for previous_id in self.sections_by_order):
token = node.data_tokens[0]
self.report(
"section-out-of-order",
section_name=token.value,
recommended_order=self.section_order_to_str(self.param("section-out-of-order", "sections_order")),
node=node,
end_col=token.end_col_offset + 1,
)
self.sections_by_order.append(order_id) | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/vendored/robocorp_ls_core/libs/robocop_lib/robocop/checkers/duplications.py | 0.64791 | 0.328651 | duplications.py | pypi |
from pathlib import Path
from robot.parsing.model.blocks import SettingSection
from robot.parsing.model.statements import Documentation
from robocop.checkers import VisitorChecker
from robocop.rules import Rule, RuleParam, RuleSeverity
from robocop.utils.misc import str2bool
rules = {
"0201": Rule(
rule_id="0201",
name="missing-doc-keyword",
msg="Missing documentation in '{{ name }}' keyword",
severity=RuleSeverity.WARNING,
docs="""
You can add documentation to keyword using following syntax::
Keyword
[Documentation] Keyword documentation
Keyword Step
Other Step
""",
),
"0202": Rule(
RuleParam(
name="ignore_templated",
default="True",
converter=str2bool,
show_type="bool",
desc="whether templated tests should be documented or not",
),
rule_id="0202",
name="missing-doc-test-case",
msg="Missing documentation in '{{ name }}' test case",
severity=RuleSeverity.WARNING,
docs="""
You can add documentation to test case using following syntax::
Test
[Documentation] Test documentation
Keyword Step
Other Step
The rule by default ignores templated test cases but it can be configured with::
robocop --configure missing-doc-test-case:ignore_templated:False
Possible values are: Yes / 1 / True (default) or No / False / 0.
""",
),
"0203": Rule(
rule_id="0203",
name="missing-doc-suite",
msg="Missing documentation in suite",
severity=RuleSeverity.WARNING,
docs="""
You can add documentation to suite using following syntax::
*** Settings ***
Documentation Suite documentation
""",
),
"0204": Rule(
rule_id="0204",
name="missing-doc-resource-file",
msg="Missing documentation in resource file",
severity=RuleSeverity.WARNING,
docs="""
You can add documentation to resource file using following syntax::
*** Settings ***
Documentation Resource file documentation
""",
),
}
class MissingDocumentationChecker(VisitorChecker):
"""Checker for missing documentation."""
reports = (
"missing-doc-keyword",
"missing-doc-test-case",
"missing-doc-suite",
"missing-doc-resource-file",
)
def visit_Keyword(self, node): # noqa
if node.name.lstrip().startswith("#"):
return
self.check_if_docs_are_present(node, "missing-doc-keyword")
def visit_TestCase(self, node): # noqa
if self.param("missing-doc-test-case", "ignore_templated") and self.templated_suite:
return
self.check_if_docs_are_present(node, "missing-doc-test-case")
def visit_SettingSection(self, node): # noqa
self.check_if_docs_are_present(node, "missing-doc-suite")
def visit_File(self, node): # noqa
for section in node.sections:
if isinstance(section, SettingSection):
break
else:
source = node.source if node.source else self.source
if source:
extension = Path(source).suffix
if ".resource" in extension:
self.report("missing-doc-resource-file", node=node, lineno=1, col=1)
else:
self.report("missing-doc-suite", node=node, lineno=1, col=1)
else:
self.report("missing-doc-suite", node=node, lineno=1, col=1)
super().visit_File(node)
def check_if_docs_are_present(self, node, msg):
for statement in node.body:
if isinstance(statement, Documentation):
break
else:
if hasattr(node, "name"):
self.report(msg, name=node.name, node=node, end_col=node.col_offset + len(node.name) + 1)
else:
self.report(msg, node=node, end_col=node.end_col_offset) | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/vendored/robocorp_ls_core/libs/robocop_lib/robocop/checkers/documentation.py | 0.701509 | 0.358521 | documentation.py | pypi |
import inspect
try:
from robot.api.parsing import ModelVisitor
except ImportError:
from robot.parsing.model.visitor import ModelVisitor
from robot.utils import FileReader
from robocop.exceptions import RuleNotFoundError, RuleParamNotFoundError, RuleReportsNotFoundError
from robocop.utils import modules_from_paths, modules_in_current_dir
class BaseChecker:
rules = None
def __init__(self):
self.disabled = False
self.source = None
self.lines = None
self.issues = []
self.rules = {}
self.templated_suite = False
def param(self, rule, param_name):
try:
return self.rules[rule].config[param_name].value
except KeyError:
if rule not in self.rules:
raise RuleNotFoundError(rule, self) from None
if param_name not in self.rules[rule].config:
raise RuleParamNotFoundError(self.rules[rule], param_name, self) from None
def report(
self,
rule,
node=None,
lineno=None,
col=None,
end_lineno=None,
end_col=None,
ext_disablers=None,
sev_threshold_value=None,
severity=None,
**kwargs,
):
if rule not in self.rules:
raise ValueError(f"Missing definition for message with name {rule}")
message = self.rules[rule].prepare_message(
source=self.source,
node=node,
lineno=lineno,
col=col,
end_lineno=end_lineno,
end_col=end_col,
ext_disablers=ext_disablers,
sev_threshold_value=sev_threshold_value,
severity=severity,
**kwargs,
)
if message.enabled:
self.issues.append(message)
class VisitorChecker(BaseChecker, ModelVisitor): # noqa
type = "visitor_checker"
def scan_file(self, ast_model, filename, in_memory_content, templated=False):
self.issues = []
self.source = filename
self.templated_suite = templated
if in_memory_content is not None:
self.lines = in_memory_content.splitlines(keepends=True)
else:
self.lines = None
self.visit_File(ast_model)
return self.issues
def visit_File(self, node): # noqa
"""Perform generic ast visit on file node."""
self.generic_visit(node)
class RawFileChecker(BaseChecker): # noqa
type = "rawfile_checker"
def scan_file(self, ast_model, filename, in_memory_content, templated=False):
self.issues = []
self.source = filename
self.templated_suite = templated
if in_memory_content is not None:
self.lines = in_memory_content.splitlines(keepends=True)
else:
self.lines = None
self.parse_file()
return self.issues
def parse_file(self):
"""Read file line by line and for each call check_line method."""
if self.lines is not None:
for lineno, line in enumerate(self.lines):
self.check_line(line, lineno + 1)
else:
with FileReader(self.source) as file_reader:
for lineno, line in enumerate(file_reader.readlines()):
self.check_line(line, lineno + 1)
def check_line(self, line, lineno):
raise NotImplementedError
def init(linter):
"""For each module get `rules` dictionary and visitors. Instantiate each visitor and map it to the
rule class instance using `reports` visitor attribute."""
for module in get_modules(linter.config.ext_rules):
classes = inspect.getmembers(module, inspect.isclass)
module_rules = {rule.name: rule for rule in getattr(module, "rules", {}).values()}
for checker in classes:
if issubclass(checker[1], BaseChecker) and getattr(checker[1], "reports", False):
checker_instance = checker[1]()
for reported_rule in checker_instance.reports:
if reported_rule not in module_rules:
raise RuleReportsNotFoundError(reported_rule, checker_instance) from None
checker_instance.rules[reported_rule] = module_rules[reported_rule]
linter.register_checker(checker_instance)
def get_modules(ext_rules):
yield from modules_in_current_dir(__file__, __name__)
yield from modules_from_paths(ext_rules)
def get_rules():
for module in modules_in_current_dir(__file__, __name__):
module_name = module.__name__.split(".")[-1]
for rule in getattr(module, "rules", {}).values():
yield module_name, rule | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/vendored/robocorp_ls_core/libs/robocop_lib/robocop/checkers/__init__.py | 0.582254 | 0.170042 | __init__.py | pypi |
import re
from robot.api import Token
from robot.parsing.model.blocks import CommentSection, TestCase
from robot.parsing.model.statements import (
Arguments,
Comment,
Documentation,
EmptyLine,
KeywordCall,
Template,
TemplateArguments,
)
try:
from robot.api.parsing import Break, Continue, ReturnStatement
except ImportError:
ReturnStatement, Break, Continue = None, None, None
from robocop.checkers import RawFileChecker, VisitorChecker
from robocop.rules import Rule, RuleParam, RuleSeverity, SeverityThreshold
from robocop.utils import get_section_name, normalize_robot_name, pattern_type, str2bool
rules = {
"0501": Rule(
RuleParam(name="max_len", default=40, converter=int, desc="number of lines allowed in a keyword"),
RuleParam(name="ignore_docs", default=False, converter=str2bool, show_type="bool", desc="Ignore documentation"),
SeverityThreshold("max_len", compare_method="greater"),
rule_id="0501",
name="too-long-keyword",
msg="Keyword '{{ keyword_name }}' is too long ({{ keyword_length }}/{{ allowed_length}})",
severity=RuleSeverity.WARNING,
),
"0502": Rule(
RuleParam(name="min_calls", default=1, converter=int, desc="number of keyword calls required in a keyword"),
SeverityThreshold("min_calls", compare_method="less"),
rule_id="0502",
name="too-few-calls-in-keyword",
msg="Keyword '{{ keyword_name }}' has too few keywords inside ({{ keyword_count }}/{{ min_allowed_count }})",
severity=RuleSeverity.WARNING,
),
"0503": Rule(
RuleParam(name="max_calls", default=10, converter=int, desc="number of keyword calls allowed in a keyword"),
SeverityThreshold("max_calls", compare_method="greater"),
rule_id="0503",
name="too-many-calls-in-keyword",
msg="Keyword '{{ keyword_name }}' has too many keywords inside ({{ keyword_count }}/{{ max_allowed_count }})",
severity=RuleSeverity.WARNING,
),
"0504": Rule(
RuleParam(name="max_len", default=20, converter=int, desc="number of lines allowed in a test case"),
RuleParam(name="ignore_docs", default=False, converter=str2bool, show_type="bool", desc="Ignore documentation"),
SeverityThreshold("max_len", compare_method="greater"),
rule_id="0504",
name="too-long-test-case",
msg="Test case '{{ test_name }}' is too long ({{ test_length }}/{{ allowed_length }})",
severity=RuleSeverity.WARNING,
),
"0505": Rule(
RuleParam(name="max_calls", default=10, converter=int, desc="number of keyword calls allowed in a test case"),
RuleParam(
name="ignore_templated", default=False, converter=str2bool, show_type="bool", desc="Ignore templated tests"
),
SeverityThreshold("max_calls", compare_method="greater"),
rule_id="0505",
name="too-many-calls-in-test-case",
msg="Test case '{{ test_name }}' has too many keywords inside ({{ keyword_count }}/{{ max_allowed_count }})",
docs="Redesign the test and move complex logic to separate keywords to increase readability.",
severity=RuleSeverity.WARNING,
),
"0506": Rule(
RuleParam(name="max_lines", default=400, converter=int, desc="number of lines allowed in a file"),
SeverityThreshold("max_lines", compare_method="greater"),
rule_id="0506",
name="file-too-long",
msg="File has too many lines ({{ lines_count }}/{{max_allowed_count }})",
severity=RuleSeverity.WARNING,
),
"0507": Rule(
RuleParam(name="max_args", default=5, converter=int, desc="number of lines allowed in a file"),
SeverityThreshold("max_args", compare_method="greater"),
rule_id="0507",
name="too-many-arguments",
msg="Keyword '{{ keyword_name }}' has too many arguments ({{ arguments_count }}/{{ max_allowed_count }})",
severity=RuleSeverity.WARNING,
),
"0508": Rule(
RuleParam(name="line_length", default=120, converter=int, desc="number of characters allowed in line"),
RuleParam(
name="ignore_pattern",
default=re.compile(r"https?://\S+"),
converter=pattern_type,
show_type="regex",
desc="ignore lines that contain configured pattern",
),
SeverityThreshold("line_length"),
rule_id="0508",
name="line-too-long",
msg="Line is too long ({{ line_length }}/{{ allowed_length }})",
severity=RuleSeverity.WARNING,
docs="""
It is possible to ignore lines that match regex pattern. Configure it using following option::
robocop --configure line-too-long:ignore_pattern:pattern
The default pattern is ``https?://\S+`` that ignores the lines that look like an URL.
""",
),
"0509": Rule(
rule_id="0509", name="empty-section", msg="Section '{{ section_name }}' is empty", severity=RuleSeverity.WARNING
),
"0510": Rule(
RuleParam(
name="max_returns", default=4, converter=int, desc="allowed number of returned values from a keyword"
),
SeverityThreshold("max_returns", compare_method="greater"),
rule_id="0510",
name="number-of-returned-values",
msg="Too many return values ({{ return_count }}/{{ max_allowed_count }})",
severity=RuleSeverity.WARNING,
),
"0511": Rule(
rule_id="0511",
name="empty-metadata",
msg="Metadata settings does not have any value set",
severity=RuleSeverity.WARNING,
),
"0512": Rule(
rule_id="0512",
name="empty-documentation",
msg="Documentation of {{ block_name }} is empty",
severity=RuleSeverity.WARNING,
),
"0513": Rule(rule_id="0513", name="empty-force-tags", msg="Force Tags are empty", severity=RuleSeverity.WARNING),
"0514": Rule(
rule_id="0514", name="empty-default-tags", msg="Default Tags are empty", severity=RuleSeverity.WARNING
),
"0515": Rule(
rule_id="0515", name="empty-variables-import", msg="Import variables path is empty", severity=RuleSeverity.ERROR
),
"0516": Rule(
rule_id="0516", name="empty-resource-import", msg="Import resource path is empty", severity=RuleSeverity.ERROR
),
"0517": Rule(
rule_id="0517", name="empty-library-import", msg="Import library path is empty", severity=RuleSeverity.ERROR
),
"0518": Rule(
rule_id="0518",
name="empty-setup",
msg="Setup of {{ block_name }} does not have any keywords",
severity=RuleSeverity.ERROR,
),
"0519": Rule(
rule_id="0519",
name="empty-suite-setup",
msg="Suite Setup does not have any keywords",
severity=RuleSeverity.ERROR,
),
"0520": Rule(
rule_id="0520",
name="empty-test-setup",
msg="Test Setup does not have any keywords",
severity=RuleSeverity.ERROR,
),
"0521": Rule(
rule_id="0521",
name="empty-teardown",
msg="Teardown of {{ block_name }} does not have any keywords",
severity=RuleSeverity.ERROR,
),
"0522": Rule(
rule_id="0522",
name="empty-suite-teardown",
msg="Suite Teardown does not have any keywords",
severity=RuleSeverity.ERROR,
),
"0523": Rule(
rule_id="0523",
name="empty-test-teardown",
msg="Test Teardown does not have any keywords",
severity=RuleSeverity.ERROR,
),
"0524": Rule(
rule_id="0524", name="empty-timeout", msg="Timeout of {{ block_name }} is empty", severity=RuleSeverity.WARNING
),
"0525": Rule(rule_id="0525", name="empty-test-timeout", msg="Test Timeout is empty", severity=RuleSeverity.WARNING),
"0526": Rule(
rule_id="0526",
name="empty-arguments",
msg="Arguments of {{ block_name }} are empty",
severity=RuleSeverity.ERROR,
),
"0527": Rule(
RuleParam(name="max_testcases", default=50, converter=int, desc="number of test cases allowed in a suite"),
RuleParam(
name="max_templated_testcases",
default=100,
converter=int,
desc="number of test cases allowed in a templated suite",
),
SeverityThreshold("max_testcases or max_templated_testcases"),
rule_id="0527",
name="too-many-test-cases",
msg="Too many test cases ({{ test_count }}/{{ max_allowed_count }})",
severity=RuleSeverity.WARNING,
),
"0528": Rule(
RuleParam(name="min_calls", default=1, converter=int, desc="number of keyword calls required in a test case"),
RuleParam(
name="ignore_templated", default=False, converter=str2bool, show_type="bool", desc="Ignore templated tests"
),
rule_id="0528",
name="too-few-calls-in-test-case",
msg="Test case '{{ test_name }}' has too few keywords inside ({{ keyword_count }}/{{ min_allowed_count }})",
docs="""
Test without keywords will fail. Add more keywords or set results using Fail, Pass, Skip keywords::
*** Test Cases ***
Test case
[Tags] smoke
Skip Test case draft
""",
severity=RuleSeverity.ERROR,
),
"0529": Rule(
rule_id="0529",
name="empty-test-template",
msg="Test Template is empty",
docs="""
``Test Template`` sets the template to all tests in a suite. Empty value is considered an error
because it leads the users to wrong impression on how the suite operates.
Without value, the setting is ignored and the tests are not templated.
""",
severity=RuleSeverity.ERROR,
),
"0530": Rule(
rule_id="0530",
name="empty-template",
msg="Template of {{ block_name }} is empty. "
"To overwrite suite Test Template use more explicit [Template] NONE",
docs="""
The ``[Template]`` setting overrides the possible template set in the Setting section, and an empty value for
``[Template]`` means that the test has no template even when Test Template is used.
If it is intended behaviour, use more explicit ``NONE`` value to indicate that you want to overwrite suite
Test Template::
*** Settings ***
Test Template Template Keyword
*** Test Cases ***
Templated test
argument
Not templated test
[Template] NONE
""",
severity=RuleSeverity.WARNING,
),
}
def is_data_statement(node):
return not isinstance(node, (EmptyLine, Comment))
def is_not_standalone_comment(node):
return isinstance(node, Comment) and node.tokens[0].type == Token.SEPARATOR
def check_node_length(node, ignore_docs):
last_node = node
for child in node.body[::-1]:
if is_data_statement(child) or is_not_standalone_comment(child):
last_node = child
break
if ignore_docs:
return (last_node.end_lineno - node.lineno - get_documentation_length(node)), last_node.end_lineno
return (last_node.end_lineno - node.lineno), last_node.end_lineno
def get_documentation_length(node):
doc_len = 0
for child in node.body:
if isinstance(child, Documentation):
doc_len += child.end_lineno - child.lineno + 1
return doc_len
class LengthChecker(VisitorChecker):
"""Checker for max and min length of keyword or test case. It analyses number of lines and also number of
keyword calls (as you can have just few keywords but very long ones or vice versa).
"""
reports = (
"too-few-calls-in-keyword",
"too-few-calls-in-test-case",
"too-many-calls-in-keyword",
"too-many-calls-in-test-case",
"too-long-keyword",
"too-long-test-case",
"file-too-long",
"too-many-arguments",
)
def visit_File(self, node):
if node.end_lineno > self.param("file-too-long", "max_lines"):
self.report(
"file-too-long",
lines_count=node.end_lineno,
max_allowed_count=self.param("file-too-long", "max_lines"),
node=node,
lineno=node.end_lineno,
end_col=node.end_col_offset,
sev_threshold_value=node.end_lineno,
)
super().visit_File(node)
def visit_Keyword(self, node): # noqa
if node.name.lstrip().startswith("#"):
return
for child in node.body:
if isinstance(child, Arguments):
args_number = len(child.values)
if args_number > self.param("too-many-arguments", "max_args"):
self.report(
"too-many-arguments",
keyword_name=node.name,
arguments_count=args_number,
max_allowed_count=self.param("too-many-arguments", "max_args"),
node=node,
end_col=node.col_offset + len(node.name) + 1,
sev_threshold_value=args_number,
)
break
length, node_end_line = check_node_length(node, ignore_docs=self.param("too-long-keyword", "ignore_docs"))
if length > self.param("too-long-keyword", "max_len"):
self.report(
"too-long-keyword",
keyword_name=node.name,
keyword_length=length,
allowed_length=self.param("too-long-keyword", "max_len"),
node=node,
end_col=node.col_offset + len(node.name) + 1,
ext_disablers=(node.lineno, node_end_line),
sev_threshold_value=length,
)
return
key_calls = LengthChecker.count_keyword_calls(node)
if key_calls < self.param("too-few-calls-in-keyword", "min_calls"):
self.report(
"too-few-calls-in-keyword",
keyword_name=node.name,
keyword_count=key_calls,
min_allowed_count=self.param("too-few-calls-in-keyword", "min_calls"),
node=node,
end_col=node.col_offset + len(node.name) + 1,
sev_threshold_value=key_calls,
)
elif key_calls > self.param("too-many-calls-in-keyword", "max_calls"):
self.report(
"too-many-calls-in-keyword",
keyword_name=node.name,
keyword_count=key_calls,
max_allowed_count=self.param("too-many-calls-in-keyword", "max_calls"),
node=node,
end_col=node.col_offset + len(node.name) + 1,
sev_threshold_value=key_calls,
)
def test_is_templated(self, node):
if self.templated_suite:
return True
if not node.body:
return False
for statement in node.body:
if isinstance(statement, Template):
return True
return False
def visit_TestCase(self, node): # noqa
length, _ = check_node_length(node, ignore_docs=self.param("too-long-test-case", "ignore_docs"))
if length > self.param("too-long-test-case", "max_len"):
self.report(
"too-long-test-case",
test_name=node.name,
test_length=length,
allowed_length=self.param("too-long-test-case", "max_len"),
node=node,
end_col=node.col_offset + len(node.name) + 1,
sev_threshold_value=length,
)
test_is_templated = self.test_is_templated(node)
skip_too_many = test_is_templated and self.param("too-many-calls-in-test-case", "ignore_templated")
skip_too_few = test_is_templated and self.param("too-few-calls-in-test-case", "ignore_templated")
if skip_too_few and skip_too_many:
return
key_calls = LengthChecker.count_keyword_calls(node)
if not skip_too_many and (key_calls > self.param("too-many-calls-in-test-case", "max_calls")):
self.report(
"too-many-calls-in-test-case",
test_name=node.name,
keyword_count=key_calls,
max_allowed_count=self.param("too-many-calls-in-test-case", "max_calls"),
node=node,
sev_threshold_value=key_calls,
end_col=node.col_offset + len(node.name) + 1,
)
elif not skip_too_few and (key_calls < self.param("too-few-calls-in-test-case", "min_calls")):
self.report(
"too-few-calls-in-test-case",
test_name=node.name,
keyword_count=key_calls,
min_allowed_count=self.param("too-few-calls-in-test-case", "min_calls"),
node=node,
sev_threshold_value=key_calls,
end_col=node.col_offset + len(node.name) + 1,
)
@staticmethod
def count_keyword_calls(node):
# ReturnStatement is imported and evaluates to true in RF 5.0+, we don't need to also check Break/Continue
if (
isinstance(node, (KeywordCall, TemplateArguments))
or ReturnStatement
and isinstance(node, (Break, Continue, ReturnStatement))
):
return 1
if not hasattr(node, "body"):
return 0
calls = sum(LengthChecker.count_keyword_calls(child) for child in node.body)
while node and getattr(node, "orelse", None):
node = node.orelse
calls += sum(LengthChecker.count_keyword_calls(child) for child in node.body)
while node and getattr(node, "next", None):
node = node.next
calls += sum(LengthChecker.count_keyword_calls(child) for child in node.body)
return calls
class LineLengthChecker(RawFileChecker):
"""Checker for maximum length of a line."""
reports = ("line-too-long",)
# replace `# noqa` or `# robocop`, `# robocop: enable`, `# robocop: disable=optional,rule,names`
disabler_pattern = re.compile(r"(# )+(noqa|robocop: ?(?P<disabler>disable|enable)=?(?P<rules>[\w\-,]*))")
def check_line(self, line, lineno):
if self.param("line-too-long", "ignore_pattern") and self.param("line-too-long", "ignore_pattern").search(line):
return
line = self.disabler_pattern.sub("", line)
line = line.rstrip().expandtabs(4)
if len(line) > self.param("line-too-long", "line_length"):
self.report(
"line-too-long",
line_length=len(line),
allowed_length=self.param("line-too-long", "line_length"),
lineno=lineno,
end_col=len(line) + 1,
sev_threshold_value=len(line),
)
class EmptySectionChecker(VisitorChecker):
"""Checker for detecting empty sections."""
reports = ("empty-section",)
def check_if_empty(self, node):
if not node.header:
return
anything_but = EmptyLine if isinstance(node, CommentSection) else (Comment, EmptyLine)
if all(isinstance(child, anything_but) for child in node.body):
self.report(
"empty-section",
section_name=get_section_name(node),
node=node,
col=node.col_offset + 1,
end_col=node.header.end_col_offset,
)
def visit_Section(self, node): # noqa
self.check_if_empty(node)
class NumberOfReturnedArgsChecker(VisitorChecker):
"""Checker for number of returned values from a keyword."""
reports = ("number-of-returned-values",)
def visit_Return(self, node): # noqa
self.check_node_returns(len(node.values), node)
visit_ReturnStatement = visit_Return
def visit_KeywordCall(self, node): # noqa
if not node.keyword:
return
normalized_name = normalize_robot_name(node.keyword, remove_prefix="builtin.")
if normalized_name == "returnfromkeyword":
self.check_node_returns(len(node.args), node)
elif normalized_name == "returnfromkeywordif":
self.check_node_returns(len(node.args) - 1, node)
def check_node_returns(self, return_count, node):
if return_count > self.param("number-of-returned-values", "max_returns"):
self.report(
"number-of-returned-values",
return_count=return_count,
max_allowed_count=self.param("number-of-returned-values", "max_returns"),
node=node,
col=node.data_tokens[0].col_offset + 1,
end_col=node.data_tokens[0].end_col_offset + 1,
sev_threshold_value=return_count,
)
class EmptySettingsChecker(VisitorChecker):
"""Checker for detecting empty settings."""
reports = (
"empty-metadata",
"empty-documentation",
"empty-force-tags",
"empty-default-tags",
"empty-variables-import",
"empty-resource-import",
"empty-library-import",
"empty-setup",
"empty-suite-setup",
"empty-test-setup",
"empty-teardown",
"empty-suite-teardown",
"empty-test-teardown",
"empty-timeout",
"empty-test-timeout",
"empty-template",
"empty-test-template",
"empty-arguments",
)
def __init__(self):
self.parent_node_name = ""
super().__init__()
def visit_SettingSection(self, node): # noqa
self.parent_node_name = "Test Suite"
self.generic_visit(node)
def visit_TestCaseName(self, node): # noqa
if node.name:
self.parent_node_name = f"'{node.name}' Test Case"
else:
self.parent_node_name = ""
self.generic_visit(node)
def visit_Keyword(self, node): # noqa
if node.name:
self.parent_node_name = f"'{node.name}' Keyword"
else:
self.parent_node_name = ""
self.generic_visit(node)
def visit_Metadata(self, node): # noqa
if node.name is None:
self.report("empty-metadata", node=node, col=node.col_offset + 1)
def visit_Documentation(self, node): # noqa
if not node.value:
self.report(
"empty-documentation",
block_name=self.parent_node_name,
node=node,
col=node.data_tokens[0].col_offset + 1,
end_col=node.end_col_offset,
)
def visit_ForceTags(self, node): # noqa
if not node.values:
self.report("empty-force-tags", node=node, col=node.col_offset + 1, end_col=node.end_col_offset)
def visit_DefaultTags(self, node): # noqa
if not node.values:
self.report("empty-default-tags", node=node, col=node.col_offset + 1, end_col=node.end_col_offset)
def visit_VariablesImport(self, node): # noqa
if not node.name:
self.report("empty-variables-import", node=node, col=node.col_offset + 1, end_col=node.end_col_offset)
def visit_ResourceImport(self, node): # noqa
if not node.name:
self.report("empty-resource-import", node=node, col=node.col_offset + 1)
def visit_LibraryImport(self, node): # noqa
if not node.name:
self.report("empty-library-import", node=node, col=node.col_offset + 1)
def visit_Setup(self, node): # noqa
if not node.name:
self.report(
"empty-setup",
block_name=self.parent_node_name,
node=node,
col=node.data_tokens[0].col_offset + 1,
end_col=node.end_col_offset,
)
def visit_SuiteSetup(self, node): # noqa
if not node.name:
self.report("empty-suite-setup", node=node, col=node.col_offset + 1, end_col=node.end_col_offset)
def visit_TestSetup(self, node): # noqa
if not node.name:
self.report("empty-test-setup", node=node, col=node.col_offset + 1, end_col=node.end_col_offset)
def visit_Teardown(self, node): # noqa
if not node.name:
self.report(
"empty-teardown",
block_name=self.parent_node_name,
node=node,
col=node.data_tokens[0].col_offset + 1,
end_col=node.end_col_offset,
)
def visit_SuiteTeardown(self, node): # noqa
if not node.name:
self.report("empty-suite-teardown", node=node, col=node.col_offset + 1, end_col=node.end_col_offset)
def visit_TestTeardown(self, node): # noqa
if not node.name:
self.report("empty-test-teardown", node=node, col=node.col_offset + 1, end_col=node.end_col_offset)
def visit_TestTemplate(self, node): # noqa
if not node.value:
self.report("empty-test-template", node=node, col=node.col_offset + 1, end_col=node.end_col_offset)
def visit_Template(self, node): # noqa
if len(node.data_tokens) < 2:
self.report(
"empty-template",
block_name=self.parent_node_name,
node=node,
col=node.data_tokens[0].col_offset + 1,
end_col=node.end_col_offset,
)
def visit_Timeout(self, node): # noqa
if not node.value:
self.report(
"empty-timeout",
block_name=self.parent_node_name,
node=node,
col=node.data_tokens[0].col_offset + 1,
end_col=node.end_col_offset,
)
def visit_TestTimeout(self, node): # noqa
if not node.value:
self.report("empty-test-timeout", node=node, col=node.col_offset + 1, end_col=node.end_col_offset)
def visit_Arguments(self, node): # noqa
if not node.values:
self.report(
"empty-arguments",
block_name=self.parent_node_name,
node=node,
col=node.data_tokens[0].col_offset + 1,
end_col=node.end_col_offset + 1,
)
class TestCaseNumberChecker(VisitorChecker):
"""Checker for counting number of test cases depending on suite type"""
reports = ("too-many-test-cases",)
def visit_TestCaseSection(self, node): # noqa
max_testcases = (
self.param("too-many-test-cases", "max_templated_testcases")
if self.templated_suite
else self.param("too-many-test-cases", "max_testcases")
)
discovered_testcases = sum([isinstance(child, TestCase) for child in node.body])
if discovered_testcases > max_testcases:
self.report(
"too-many-test-cases",
test_count=discovered_testcases,
max_allowed_count=max_testcases,
node=node,
end_col=node.header.end_col_offset,
sev_threshold_value=discovered_testcases,
) | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/vendored/robocorp_ls_core/libs/robocop_lib/robocop/checkers/lengths.py | 0.566618 | 0.245627 | lengths.py | pypi |
from robocop.utils.misc import normalize_robot_name
class RunKeywordVariant:
def __init__(self, name, resolve=1, branches=None, split_on_and=False):
self.name = normalize_robot_name(name)
self.resolve = resolve
self.branches = branches
self.split_on_and = split_on_and
class RunKeywords(dict):
def __init__(self, keywords):
normalized_keywords = {}
for keyword_variant in keywords:
normalized_name = normalize_robot_name(keyword_variant.name)
name_with_lib = f"builtin.{normalized_name}"
normalized_keywords[normalized_name] = keyword_variant
normalized_keywords[name_with_lib] = keyword_variant
super().__init__(normalized_keywords)
def __setitem__(self, keyword_name, kw_variant):
normalized_name = normalize_robot_name(keyword_name)
name_with_lib = f"builtin.{normalized_name}"
super().__setitem__(normalized_name, kw_variant)
super().__setitem__(name_with_lib, kw_variant)
def __getitem__(self, keyword_name):
normalized_name = normalize_robot_name(keyword_name)
return super().__getitem__(normalized_name)
def __missing__(self, keyword_name):
return None
RUN_KEYWORDS = RunKeywords(
[
RunKeywordVariant("Run Keyword"),
RunKeywordVariant("Run Keyword And Continue On Failure"),
RunKeywordVariant("Run Keyword And Expect Error", resolve=2),
RunKeywordVariant("Run Keyword And Ignore Error"),
RunKeywordVariant("Run Keyword And Return"),
RunKeywordVariant("Run Keyword And Return If", resolve=2),
RunKeywordVariant("Run Keyword And Return Status"),
RunKeywordVariant("Run Keyword And Warn On Failure"),
RunKeywordVariant("Run Keyword If", resolve=2, branches=["ELSE IF", "ELSE"]),
RunKeywordVariant("Run Keyword If All Tests Passed"),
RunKeywordVariant("Run Keyword If Any Tests Failed"),
RunKeywordVariant("Run Keyword If Test Failed"),
RunKeywordVariant("Run Keyword If Test Passed"),
RunKeywordVariant("Run Keyword If Timeout Occurred"),
RunKeywordVariant("Run Keyword Unless", resolve=2),
RunKeywordVariant("Run Keywords", split_on_and=True),
RunKeywordVariant("Repeat Keyword", resolve=2),
RunKeywordVariant("Wait Until Keyword Succeeds", resolve=3),
]
)
def skip_leading_tokens(tokens, break_token):
for index, token in enumerate(tokens):
if token.type == break_token:
return tokens[index:]
def is_token_value_in_tokens(value, tokens):
return any(value == token.value for token in tokens)
def split_on_token_value(tokens, value, resolve: int):
"""
Split list of tokens into three lists based on token value.
Returns tokens before found token, found token + `resolve` number of tokens, remaining tokens.
"""
for index, token in enumerate(tokens):
if value == token.value:
prefix = tokens[:index]
branch = tokens[index : index + resolve]
remainder = tokens[index + resolve :]
return prefix, branch, remainder
else:
return [], [], tokens
def iterate_keyword_names(keyword_node, name_token_type):
tokens = skip_leading_tokens(keyword_node.data_tokens, name_token_type)
yield from parse_run_keyword(tokens)
def parse_run_keyword(tokens):
if not tokens:
return
yield tokens[0]
run_keyword = RUN_KEYWORDS[tokens[0].value]
if not run_keyword:
return
tokens = tokens[run_keyword.resolve :]
if run_keyword.branches:
if "ELSE IF" in run_keyword.branches:
while is_token_value_in_tokens("ELSE IF", tokens):
prefix, branch, tokens = split_on_token_value(tokens, "ELSE IF", 2)
yield from parse_run_keyword(prefix)
if "ELSE" in run_keyword.branches and is_token_value_in_tokens("ELSE", tokens):
prefix, branch, tokens = split_on_token_value(tokens, "ELSE", 1)
yield from parse_run_keyword(prefix)
yield from parse_run_keyword(tokens)
return
elif run_keyword.split_on_and:
yield from split_on_and(tokens)
return
yield from parse_run_keyword(tokens)
def split_on_and(tokens):
if not is_token_value_in_tokens("AND", tokens):
yield from (token for token in tokens)
return
while is_token_value_in_tokens("AND", tokens):
prefix, branch, tokens = split_on_token_value(tokens, "AND", 1)
yield from parse_run_keyword(prefix)
yield from parse_run_keyword(tokens) | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/vendored/robocorp_ls_core/libs/robocop_lib/robocop/utils/run_keywords.py | 0.772144 | 0.210178 | run_keywords.py | pypi |
import sys
import typing as t
from types import CodeType
from types import TracebackType
from .exceptions import TemplateSyntaxError
from .utils import internal_code
from .utils import missing
if t.TYPE_CHECKING:
from .runtime import Context
def rewrite_traceback_stack(source: t.Optional[str] = None) -> BaseException:
"""Rewrite the current exception to replace any tracebacks from
within compiled template code with tracebacks that look like they
came from the template source.
This must be called within an ``except`` block.
:param source: For ``TemplateSyntaxError``, the original source if
known.
:return: The original exception with the rewritten traceback.
"""
_, exc_value, tb = sys.exc_info()
exc_value = t.cast(BaseException, exc_value)
tb = t.cast(TracebackType, tb)
if isinstance(exc_value, TemplateSyntaxError) and not exc_value.translated:
exc_value.translated = True
exc_value.source = source
# Remove the old traceback, otherwise the frames from the
# compiler still show up.
exc_value.with_traceback(None)
# Outside of runtime, so the frame isn't executing template
# code, but it still needs to point at the template.
tb = fake_traceback(
exc_value, None, exc_value.filename or "<unknown>", exc_value.lineno
)
else:
# Skip the frame for the render function.
tb = tb.tb_next
stack = []
# Build the stack of traceback object, replacing any in template
# code with the source file and line information.
while tb is not None:
# Skip frames decorated with @internalcode. These are internal
# calls that aren't useful in template debugging output.
if tb.tb_frame.f_code in internal_code:
tb = tb.tb_next
continue
template = tb.tb_frame.f_globals.get("__jinja_template__")
if template is not None:
lineno = template.get_corresponding_lineno(tb.tb_lineno)
fake_tb = fake_traceback(exc_value, tb, template.filename, lineno)
stack.append(fake_tb)
else:
stack.append(tb)
tb = tb.tb_next
tb_next = None
# Assign tb_next in reverse to avoid circular references.
for tb in reversed(stack):
tb.tb_next = tb_next
tb_next = tb
return exc_value.with_traceback(tb_next)
def fake_traceback( # type: ignore
exc_value: BaseException, tb: t.Optional[TracebackType], filename: str, lineno: int
) -> TracebackType:
"""Produce a new traceback object that looks like it came from the
template source instead of the compiled code. The filename, line
number, and location name will point to the template, and the local
variables will be the current template context.
:param exc_value: The original exception to be re-raised to create
the new traceback.
:param tb: The original traceback to get the local variables and
code info from.
:param filename: The template filename.
:param lineno: The line number in the template source.
"""
if tb is not None:
# Replace the real locals with the context that would be
# available at that point in the template.
locals = get_template_locals(tb.tb_frame.f_locals)
locals.pop("__jinja_exception__", None)
else:
locals = {}
globals = {
"__name__": filename,
"__file__": filename,
"__jinja_exception__": exc_value,
}
# Raise an exception at the correct line number.
code: CodeType = compile(
"\n" * (lineno - 1) + "raise __jinja_exception__", filename, "exec"
)
# Build a new code object that points to the template file and
# replaces the location with a block name.
location = "template"
if tb is not None:
function = tb.tb_frame.f_code.co_name
if function == "root":
location = "top-level template code"
elif function.startswith("block_"):
location = f"block {function[6:]!r}"
if sys.version_info >= (3, 8):
code = code.replace(co_name=location)
else:
code = CodeType(
code.co_argcount,
code.co_kwonlyargcount,
code.co_nlocals,
code.co_stacksize,
code.co_flags,
code.co_code,
code.co_consts,
code.co_names,
code.co_varnames,
code.co_filename,
location,
code.co_firstlineno,
code.co_lnotab,
code.co_freevars,
code.co_cellvars,
)
# Execute the new code, which is guaranteed to raise, and return
# the new traceback without this frame.
try:
exec(code, globals, locals)
except BaseException:
return sys.exc_info()[2].tb_next # type: ignore
def get_template_locals(real_locals: t.Mapping[str, t.Any]) -> t.Dict[str, t.Any]:
"""Based on the runtime locals, get the context that would be
available at that point in the template.
"""
# Start with the current template context.
ctx: "t.Optional[Context]" = real_locals.get("context")
if ctx is not None:
data: t.Dict[str, t.Any] = ctx.get_all().copy()
else:
data = {}
# Might be in a derived context that only sets local variables
# rather than pushing a context. Local variables follow the scheme
# l_depth_name. Find the highest-depth local that has a value for
# each name.
local_overrides: t.Dict[str, t.Tuple[int, t.Any]] = {}
for name, value in real_locals.items():
if not name.startswith("l_") or value is missing:
# Not a template variable, or no longer relevant.
continue
try:
_, depth_str, name = name.split("_", 2)
depth = int(depth_str)
except ValueError:
continue
cur_depth = local_overrides.get(name, (-1,))[0]
if cur_depth < depth:
local_overrides[name] = (depth, value)
# Modify the context with any derived context.
for name, (_, value) in local_overrides.items():
if value is missing:
data.pop(name, None)
else:
data[name] = value
return data | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/vendored/robocorp_ls_core/libs/robocop_lib/jinja2/debug.py | 0.466359 | 0.204521 | debug.py | pypi |
import typing as t
from . import nodes
from .visitor import NodeVisitor
VAR_LOAD_PARAMETER = "param"
VAR_LOAD_RESOLVE = "resolve"
VAR_LOAD_ALIAS = "alias"
VAR_LOAD_UNDEFINED = "undefined"
def find_symbols(
nodes: t.Iterable[nodes.Node], parent_symbols: t.Optional["Symbols"] = None
) -> "Symbols":
sym = Symbols(parent=parent_symbols)
visitor = FrameSymbolVisitor(sym)
for node in nodes:
visitor.visit(node)
return sym
def symbols_for_node(
node: nodes.Node, parent_symbols: t.Optional["Symbols"] = None
) -> "Symbols":
sym = Symbols(parent=parent_symbols)
sym.analyze_node(node)
return sym
class Symbols:
def __init__(
self, parent: t.Optional["Symbols"] = None, level: t.Optional[int] = None
) -> None:
if level is None:
if parent is None:
level = 0
else:
level = parent.level + 1
self.level: int = level
self.parent = parent
self.refs: t.Dict[str, str] = {}
self.loads: t.Dict[str, t.Any] = {}
self.stores: t.Set[str] = set()
def analyze_node(self, node: nodes.Node, **kwargs: t.Any) -> None:
visitor = RootVisitor(self)
visitor.visit(node, **kwargs)
def _define_ref(
self, name: str, load: t.Optional[t.Tuple[str, t.Optional[str]]] = None
) -> str:
ident = f"l_{self.level}_{name}"
self.refs[name] = ident
if load is not None:
self.loads[ident] = load
return ident
def find_load(self, target: str) -> t.Optional[t.Any]:
if target in self.loads:
return self.loads[target]
if self.parent is not None:
return self.parent.find_load(target)
return None
def find_ref(self, name: str) -> t.Optional[str]:
if name in self.refs:
return self.refs[name]
if self.parent is not None:
return self.parent.find_ref(name)
return None
def ref(self, name: str) -> str:
rv = self.find_ref(name)
if rv is None:
raise AssertionError(
"Tried to resolve a name to a reference that was"
f" unknown to the frame ({name!r})"
)
return rv
def copy(self) -> "Symbols":
rv = object.__new__(self.__class__)
rv.__dict__.update(self.__dict__)
rv.refs = self.refs.copy()
rv.loads = self.loads.copy()
rv.stores = self.stores.copy()
return rv
def store(self, name: str) -> None:
self.stores.add(name)
# If we have not see the name referenced yet, we need to figure
# out what to set it to.
if name not in self.refs:
# If there is a parent scope we check if the name has a
# reference there. If it does it means we might have to alias
# to a variable there.
if self.parent is not None:
outer_ref = self.parent.find_ref(name)
if outer_ref is not None:
self._define_ref(name, load=(VAR_LOAD_ALIAS, outer_ref))
return
# Otherwise we can just set it to undefined.
self._define_ref(name, load=(VAR_LOAD_UNDEFINED, None))
def declare_parameter(self, name: str) -> str:
self.stores.add(name)
return self._define_ref(name, load=(VAR_LOAD_PARAMETER, None))
def load(self, name: str) -> None:
if self.find_ref(name) is None:
self._define_ref(name, load=(VAR_LOAD_RESOLVE, name))
def branch_update(self, branch_symbols: t.Sequence["Symbols"]) -> None:
stores: t.Dict[str, int] = {}
for branch in branch_symbols:
for target in branch.stores:
if target in self.stores:
continue
stores[target] = stores.get(target, 0) + 1
for sym in branch_symbols:
self.refs.update(sym.refs)
self.loads.update(sym.loads)
self.stores.update(sym.stores)
for name, branch_count in stores.items():
if branch_count == len(branch_symbols):
continue
target = self.find_ref(name) # type: ignore
assert target is not None, "should not happen"
if self.parent is not None:
outer_target = self.parent.find_ref(name)
if outer_target is not None:
self.loads[target] = (VAR_LOAD_ALIAS, outer_target)
continue
self.loads[target] = (VAR_LOAD_RESOLVE, name)
def dump_stores(self) -> t.Dict[str, str]:
rv: t.Dict[str, str] = {}
node: t.Optional["Symbols"] = self
while node is not None:
for name in sorted(node.stores):
if name not in rv:
rv[name] = self.find_ref(name) # type: ignore
node = node.parent
return rv
def dump_param_targets(self) -> t.Set[str]:
rv = set()
node: t.Optional["Symbols"] = self
while node is not None:
for target, (instr, _) in self.loads.items():
if instr == VAR_LOAD_PARAMETER:
rv.add(target)
node = node.parent
return rv
class RootVisitor(NodeVisitor):
def __init__(self, symbols: "Symbols") -> None:
self.sym_visitor = FrameSymbolVisitor(symbols)
def _simple_visit(self, node: nodes.Node, **kwargs: t.Any) -> None:
for child in node.iter_child_nodes():
self.sym_visitor.visit(child)
visit_Template = _simple_visit
visit_Block = _simple_visit
visit_Macro = _simple_visit
visit_FilterBlock = _simple_visit
visit_Scope = _simple_visit
visit_If = _simple_visit
visit_ScopedEvalContextModifier = _simple_visit
def visit_AssignBlock(self, node: nodes.AssignBlock, **kwargs: t.Any) -> None:
for child in node.body:
self.sym_visitor.visit(child)
def visit_CallBlock(self, node: nodes.CallBlock, **kwargs: t.Any) -> None:
for child in node.iter_child_nodes(exclude=("call",)):
self.sym_visitor.visit(child)
def visit_OverlayScope(self, node: nodes.OverlayScope, **kwargs: t.Any) -> None:
for child in node.body:
self.sym_visitor.visit(child)
def visit_For(
self, node: nodes.For, for_branch: str = "body", **kwargs: t.Any
) -> None:
if for_branch == "body":
self.sym_visitor.visit(node.target, store_as_param=True)
branch = node.body
elif for_branch == "else":
branch = node.else_
elif for_branch == "test":
self.sym_visitor.visit(node.target, store_as_param=True)
if node.test is not None:
self.sym_visitor.visit(node.test)
return
else:
raise RuntimeError("Unknown for branch")
if branch:
for item in branch:
self.sym_visitor.visit(item)
def visit_With(self, node: nodes.With, **kwargs: t.Any) -> None:
for target in node.targets:
self.sym_visitor.visit(target)
for child in node.body:
self.sym_visitor.visit(child)
def generic_visit(self, node: nodes.Node, *args: t.Any, **kwargs: t.Any) -> None:
raise NotImplementedError(f"Cannot find symbols for {type(node).__name__!r}")
class FrameSymbolVisitor(NodeVisitor):
"""A visitor for `Frame.inspect`."""
def __init__(self, symbols: "Symbols") -> None:
self.symbols = symbols
def visit_Name(
self, node: nodes.Name, store_as_param: bool = False, **kwargs: t.Any
) -> None:
"""All assignments to names go through this function."""
if store_as_param or node.ctx == "param":
self.symbols.declare_parameter(node.name)
elif node.ctx == "store":
self.symbols.store(node.name)
elif node.ctx == "load":
self.symbols.load(node.name)
def visit_NSRef(self, node: nodes.NSRef, **kwargs: t.Any) -> None:
self.symbols.load(node.name)
def visit_If(self, node: nodes.If, **kwargs: t.Any) -> None:
self.visit(node.test, **kwargs)
original_symbols = self.symbols
def inner_visit(nodes: t.Iterable[nodes.Node]) -> "Symbols":
self.symbols = rv = original_symbols.copy()
for subnode in nodes:
self.visit(subnode, **kwargs)
self.symbols = original_symbols
return rv
body_symbols = inner_visit(node.body)
elif_symbols = inner_visit(node.elif_)
else_symbols = inner_visit(node.else_ or ())
self.symbols.branch_update([body_symbols, elif_symbols, else_symbols])
def visit_Macro(self, node: nodes.Macro, **kwargs: t.Any) -> None:
self.symbols.store(node.name)
def visit_Import(self, node: nodes.Import, **kwargs: t.Any) -> None:
self.generic_visit(node, **kwargs)
self.symbols.store(node.target)
def visit_FromImport(self, node: nodes.FromImport, **kwargs: t.Any) -> None:
self.generic_visit(node, **kwargs)
for name in node.names:
if isinstance(name, tuple):
self.symbols.store(name[1])
else:
self.symbols.store(name)
def visit_Assign(self, node: nodes.Assign, **kwargs: t.Any) -> None:
"""Visit assignments in the correct order."""
self.visit(node.node, **kwargs)
self.visit(node.target, **kwargs)
def visit_For(self, node: nodes.For, **kwargs: t.Any) -> None:
"""Visiting stops at for blocks. However the block sequence
is visited as part of the outer scope.
"""
self.visit(node.iter, **kwargs)
def visit_CallBlock(self, node: nodes.CallBlock, **kwargs: t.Any) -> None:
self.visit(node.call, **kwargs)
def visit_FilterBlock(self, node: nodes.FilterBlock, **kwargs: t.Any) -> None:
self.visit(node.filter, **kwargs)
def visit_With(self, node: nodes.With, **kwargs: t.Any) -> None:
for target in node.values:
self.visit(target)
def visit_AssignBlock(self, node: nodes.AssignBlock, **kwargs: t.Any) -> None:
"""Stop visiting at block assigns."""
self.visit(node.target, **kwargs)
def visit_Scope(self, node: nodes.Scope, **kwargs: t.Any) -> None:
"""Stop visiting at scopes."""
def visit_Block(self, node: nodes.Block, **kwargs: t.Any) -> None:
"""Stop visiting at blocks."""
def visit_OverlayScope(self, node: nodes.OverlayScope, **kwargs: t.Any) -> None:
"""Do not visit into overlay scopes.""" | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/vendored/robocorp_ls_core/libs/robocop_lib/jinja2/idtracking.py | 0.538983 | 0.234999 | idtracking.py | pypi |
import inspect
import typing as t
from functools import WRAPPER_ASSIGNMENTS
from functools import wraps
from .utils import _PassArg
from .utils import pass_eval_context
V = t.TypeVar("V")
def async_variant(normal_func): # type: ignore
def decorator(async_func): # type: ignore
pass_arg = _PassArg.from_obj(normal_func)
need_eval_context = pass_arg is None
if pass_arg is _PassArg.environment:
def is_async(args: t.Any) -> bool:
return t.cast(bool, args[0].is_async)
else:
def is_async(args: t.Any) -> bool:
return t.cast(bool, args[0].environment.is_async)
# Take the doc and annotations from the sync function, but the
# name from the async function. Pallets-Sphinx-Themes
# build_function_directive expects __wrapped__ to point to the
# sync function.
async_func_attrs = ("__module__", "__name__", "__qualname__")
normal_func_attrs = tuple(set(WRAPPER_ASSIGNMENTS).difference(async_func_attrs))
@wraps(normal_func, assigned=normal_func_attrs)
@wraps(async_func, assigned=async_func_attrs, updated=())
def wrapper(*args, **kwargs): # type: ignore
b = is_async(args)
if need_eval_context:
args = args[1:]
if b:
return async_func(*args, **kwargs)
return normal_func(*args, **kwargs)
if need_eval_context:
wrapper = pass_eval_context(wrapper)
wrapper.jinja_async_variant = True
return wrapper
return decorator
_common_primitives = {int, float, bool, str, list, dict, tuple, type(None)}
async def auto_await(value: t.Union[t.Awaitable["V"], "V"]) -> "V":
# Avoid a costly call to isawaitable
if type(value) in _common_primitives:
return t.cast("V", value)
if inspect.isawaitable(value):
return await t.cast("t.Awaitable[V]", value)
return t.cast("V", value)
async def auto_aiter(
iterable: "t.Union[t.AsyncIterable[V], t.Iterable[V]]",
) -> "t.AsyncIterator[V]":
if hasattr(iterable, "__aiter__"):
async for item in t.cast("t.AsyncIterable[V]", iterable):
yield item
else:
for item in t.cast("t.Iterable[V]", iterable):
yield item
async def auto_to_list(
value: "t.Union[t.AsyncIterable[V], t.Iterable[V]]",
) -> t.List["V"]:
return [x async for x in auto_aiter(value)] | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/vendored/robocorp_ls_core/libs/robocop_lib/jinja2/async_utils.py | 0.64232 | 0.197619 | async_utils.py | pypi |
import operator
import types
import typing as t
from _string import formatter_field_name_split # type: ignore
from collections import abc
from collections import deque
from string import Formatter
from markupsafe import EscapeFormatter
from markupsafe import Markup
from .environment import Environment
from .exceptions import SecurityError
from .runtime import Context
from .runtime import Undefined
F = t.TypeVar("F", bound=t.Callable[..., t.Any])
#: maximum number of items a range may produce
MAX_RANGE = 100000
#: Unsafe function attributes.
UNSAFE_FUNCTION_ATTRIBUTES: t.Set[str] = set()
#: Unsafe method attributes. Function attributes are unsafe for methods too.
UNSAFE_METHOD_ATTRIBUTES: t.Set[str] = set()
#: unsafe generator attributes.
UNSAFE_GENERATOR_ATTRIBUTES = {"gi_frame", "gi_code"}
#: unsafe attributes on coroutines
UNSAFE_COROUTINE_ATTRIBUTES = {"cr_frame", "cr_code"}
#: unsafe attributes on async generators
UNSAFE_ASYNC_GENERATOR_ATTRIBUTES = {"ag_code", "ag_frame"}
_mutable_spec: t.Tuple[t.Tuple[t.Type, t.FrozenSet[str]], ...] = (
(
abc.MutableSet,
frozenset(
[
"add",
"clear",
"difference_update",
"discard",
"pop",
"remove",
"symmetric_difference_update",
"update",
]
),
),
(
abc.MutableMapping,
frozenset(["clear", "pop", "popitem", "setdefault", "update"]),
),
(
abc.MutableSequence,
frozenset(["append", "reverse", "insert", "sort", "extend", "remove"]),
),
(
deque,
frozenset(
[
"append",
"appendleft",
"clear",
"extend",
"extendleft",
"pop",
"popleft",
"remove",
"rotate",
]
),
),
)
def inspect_format_method(callable: t.Callable) -> t.Optional[str]:
if not isinstance(
callable, (types.MethodType, types.BuiltinMethodType)
) or callable.__name__ not in ("format", "format_map"):
return None
obj = callable.__self__
if isinstance(obj, str):
return obj
return None
def safe_range(*args: int) -> range:
"""A range that can't generate ranges with a length of more than
MAX_RANGE items.
"""
rng = range(*args)
if len(rng) > MAX_RANGE:
raise OverflowError(
"Range too big. The sandbox blocks ranges larger than"
f" MAX_RANGE ({MAX_RANGE})."
)
return rng
def unsafe(f: F) -> F:
"""Marks a function or method as unsafe.
.. code-block: python
@unsafe
def delete(self):
pass
"""
f.unsafe_callable = True # type: ignore
return f
def is_internal_attribute(obj: t.Any, attr: str) -> bool:
"""Test if the attribute given is an internal python attribute. For
example this function returns `True` for the `func_code` attribute of
python objects. This is useful if the environment method
:meth:`~SandboxedEnvironment.is_safe_attribute` is overridden.
>>> from jinja2.sandbox import is_internal_attribute
>>> is_internal_attribute(str, "mro")
True
>>> is_internal_attribute(str, "upper")
False
"""
if isinstance(obj, types.FunctionType):
if attr in UNSAFE_FUNCTION_ATTRIBUTES:
return True
elif isinstance(obj, types.MethodType):
if attr in UNSAFE_FUNCTION_ATTRIBUTES or attr in UNSAFE_METHOD_ATTRIBUTES:
return True
elif isinstance(obj, type):
if attr == "mro":
return True
elif isinstance(obj, (types.CodeType, types.TracebackType, types.FrameType)):
return True
elif isinstance(obj, types.GeneratorType):
if attr in UNSAFE_GENERATOR_ATTRIBUTES:
return True
elif hasattr(types, "CoroutineType") and isinstance(obj, types.CoroutineType):
if attr in UNSAFE_COROUTINE_ATTRIBUTES:
return True
elif hasattr(types, "AsyncGeneratorType") and isinstance(
obj, types.AsyncGeneratorType
):
if attr in UNSAFE_ASYNC_GENERATOR_ATTRIBUTES:
return True
return attr.startswith("__")
def modifies_known_mutable(obj: t.Any, attr: str) -> bool:
"""This function checks if an attribute on a builtin mutable object
(list, dict, set or deque) or the corresponding ABCs would modify it
if called.
>>> modifies_known_mutable({}, "clear")
True
>>> modifies_known_mutable({}, "keys")
False
>>> modifies_known_mutable([], "append")
True
>>> modifies_known_mutable([], "index")
False
If called with an unsupported object, ``False`` is returned.
>>> modifies_known_mutable("foo", "upper")
False
"""
for typespec, unsafe in _mutable_spec:
if isinstance(obj, typespec):
return attr in unsafe
return False
class SandboxedEnvironment(Environment):
"""The sandboxed environment. It works like the regular environment but
tells the compiler to generate sandboxed code. Additionally subclasses of
this environment may override the methods that tell the runtime what
attributes or functions are safe to access.
If the template tries to access insecure code a :exc:`SecurityError` is
raised. However also other exceptions may occur during the rendering so
the caller has to ensure that all exceptions are caught.
"""
sandboxed = True
#: default callback table for the binary operators. A copy of this is
#: available on each instance of a sandboxed environment as
#: :attr:`binop_table`
default_binop_table: t.Dict[str, t.Callable[[t.Any, t.Any], t.Any]] = {
"+": operator.add,
"-": operator.sub,
"*": operator.mul,
"/": operator.truediv,
"//": operator.floordiv,
"**": operator.pow,
"%": operator.mod,
}
#: default callback table for the unary operators. A copy of this is
#: available on each instance of a sandboxed environment as
#: :attr:`unop_table`
default_unop_table: t.Dict[str, t.Callable[[t.Any], t.Any]] = {
"+": operator.pos,
"-": operator.neg,
}
#: a set of binary operators that should be intercepted. Each operator
#: that is added to this set (empty by default) is delegated to the
#: :meth:`call_binop` method that will perform the operator. The default
#: operator callback is specified by :attr:`binop_table`.
#:
#: The following binary operators are interceptable:
#: ``//``, ``%``, ``+``, ``*``, ``-``, ``/``, and ``**``
#:
#: The default operation form the operator table corresponds to the
#: builtin function. Intercepted calls are always slower than the native
#: operator call, so make sure only to intercept the ones you are
#: interested in.
#:
#: .. versionadded:: 2.6
intercepted_binops: t.FrozenSet[str] = frozenset()
#: a set of unary operators that should be intercepted. Each operator
#: that is added to this set (empty by default) is delegated to the
#: :meth:`call_unop` method that will perform the operator. The default
#: operator callback is specified by :attr:`unop_table`.
#:
#: The following unary operators are interceptable: ``+``, ``-``
#:
#: The default operation form the operator table corresponds to the
#: builtin function. Intercepted calls are always slower than the native
#: operator call, so make sure only to intercept the ones you are
#: interested in.
#:
#: .. versionadded:: 2.6
intercepted_unops: t.FrozenSet[str] = frozenset()
def __init__(self, *args: t.Any, **kwargs: t.Any) -> None:
super().__init__(*args, **kwargs)
self.globals["range"] = safe_range
self.binop_table = self.default_binop_table.copy()
self.unop_table = self.default_unop_table.copy()
def is_safe_attribute(self, obj: t.Any, attr: str, value: t.Any) -> bool:
"""The sandboxed environment will call this method to check if the
attribute of an object is safe to access. Per default all attributes
starting with an underscore are considered private as well as the
special attributes of internal python objects as returned by the
:func:`is_internal_attribute` function.
"""
return not (attr.startswith("_") or is_internal_attribute(obj, attr))
def is_safe_callable(self, obj: t.Any) -> bool:
"""Check if an object is safely callable. By default callables
are considered safe unless decorated with :func:`unsafe`.
This also recognizes the Django convention of setting
``func.alters_data = True``.
"""
return not (
getattr(obj, "unsafe_callable", False) or getattr(obj, "alters_data", False)
)
def call_binop(
self, context: Context, operator: str, left: t.Any, right: t.Any
) -> t.Any:
"""For intercepted binary operator calls (:meth:`intercepted_binops`)
this function is executed instead of the builtin operator. This can
be used to fine tune the behavior of certain operators.
.. versionadded:: 2.6
"""
return self.binop_table[operator](left, right)
def call_unop(self, context: Context, operator: str, arg: t.Any) -> t.Any:
"""For intercepted unary operator calls (:meth:`intercepted_unops`)
this function is executed instead of the builtin operator. This can
be used to fine tune the behavior of certain operators.
.. versionadded:: 2.6
"""
return self.unop_table[operator](arg)
def getitem(
self, obj: t.Any, argument: t.Union[str, t.Any]
) -> t.Union[t.Any, Undefined]:
"""Subscribe an object from sandboxed code."""
try:
return obj[argument]
except (TypeError, LookupError):
if isinstance(argument, str):
try:
attr = str(argument)
except Exception:
pass
else:
try:
value = getattr(obj, attr)
except AttributeError:
pass
else:
if self.is_safe_attribute(obj, argument, value):
return value
return self.unsafe_undefined(obj, argument)
return self.undefined(obj=obj, name=argument)
def getattr(self, obj: t.Any, attribute: str) -> t.Union[t.Any, Undefined]:
"""Subscribe an object from sandboxed code and prefer the
attribute. The attribute passed *must* be a bytestring.
"""
try:
value = getattr(obj, attribute)
except AttributeError:
try:
return obj[attribute]
except (TypeError, LookupError):
pass
else:
if self.is_safe_attribute(obj, attribute, value):
return value
return self.unsafe_undefined(obj, attribute)
return self.undefined(obj=obj, name=attribute)
def unsafe_undefined(self, obj: t.Any, attribute: str) -> Undefined:
"""Return an undefined object for unsafe attributes."""
return self.undefined(
f"access to attribute {attribute!r} of"
f" {type(obj).__name__!r} object is unsafe.",
name=attribute,
obj=obj,
exc=SecurityError,
)
def format_string(
self,
s: str,
args: t.Tuple[t.Any, ...],
kwargs: t.Dict[str, t.Any],
format_func: t.Optional[t.Callable] = None,
) -> str:
"""If a format call is detected, then this is routed through this
method so that our safety sandbox can be used for it.
"""
formatter: SandboxedFormatter
if isinstance(s, Markup):
formatter = SandboxedEscapeFormatter(self, escape=s.escape)
else:
formatter = SandboxedFormatter(self)
if format_func is not None and format_func.__name__ == "format_map":
if len(args) != 1 or kwargs:
raise TypeError(
"format_map() takes exactly one argument"
f" {len(args) + (kwargs is not None)} given"
)
kwargs = args[0]
args = ()
rv = formatter.vformat(s, args, kwargs)
return type(s)(rv)
def call(
__self, # noqa: B902
__context: Context,
__obj: t.Any,
*args: t.Any,
**kwargs: t.Any,
) -> t.Any:
"""Call an object from sandboxed code."""
fmt = inspect_format_method(__obj)
if fmt is not None:
return __self.format_string(fmt, args, kwargs, __obj)
# the double prefixes are to avoid double keyword argument
# errors when proxying the call.
if not __self.is_safe_callable(__obj):
raise SecurityError(f"{__obj!r} is not safely callable")
return __context.call(__obj, *args, **kwargs)
class ImmutableSandboxedEnvironment(SandboxedEnvironment):
"""Works exactly like the regular `SandboxedEnvironment` but does not
permit modifications on the builtin mutable objects `list`, `set`, and
`dict` by using the :func:`modifies_known_mutable` function.
"""
def is_safe_attribute(self, obj: t.Any, attr: str, value: t.Any) -> bool:
if not super().is_safe_attribute(obj, attr, value):
return False
return not modifies_known_mutable(obj, attr)
class SandboxedFormatter(Formatter):
def __init__(self, env: Environment, **kwargs: t.Any) -> None:
self._env = env
super().__init__(**kwargs)
def get_field(
self, field_name: str, args: t.Sequence[t.Any], kwargs: t.Mapping[str, t.Any]
) -> t.Tuple[t.Any, str]:
first, rest = formatter_field_name_split(field_name)
obj = self.get_value(first, args, kwargs)
for is_attr, i in rest:
if is_attr:
obj = self._env.getattr(obj, i)
else:
obj = self._env.getitem(obj, i)
return obj, first
class SandboxedEscapeFormatter(SandboxedFormatter, EscapeFormatter):
pass | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/vendored/robocorp_ls_core/libs/robocop_lib/jinja2/sandbox.py | 0.77081 | 0.174586 | sandbox.py | pypi |
import typing as t
from .nodes import Node
if t.TYPE_CHECKING:
import typing_extensions as te
class VisitCallable(te.Protocol):
def __call__(self, node: Node, *args: t.Any, **kwargs: t.Any) -> t.Any:
...
class NodeVisitor:
"""Walks the abstract syntax tree and call visitor functions for every
node found. The visitor functions may return values which will be
forwarded by the `visit` method.
Per default the visitor functions for the nodes are ``'visit_'`` +
class name of the node. So a `TryFinally` node visit function would
be `visit_TryFinally`. This behavior can be changed by overriding
the `get_visitor` function. If no visitor function exists for a node
(return value `None`) the `generic_visit` visitor is used instead.
"""
def get_visitor(self, node: Node) -> "t.Optional[VisitCallable]":
"""Return the visitor function for this node or `None` if no visitor
exists for this node. In that case the generic visit function is
used instead.
"""
return getattr(self, f"visit_{type(node).__name__}", None)
def visit(self, node: Node, *args: t.Any, **kwargs: t.Any) -> t.Any:
"""Visit a node."""
f = self.get_visitor(node)
if f is not None:
return f(node, *args, **kwargs)
return self.generic_visit(node, *args, **kwargs)
def generic_visit(self, node: Node, *args: t.Any, **kwargs: t.Any) -> t.Any:
"""Called if no explicit visitor function exists for a node."""
for child_node in node.iter_child_nodes():
self.visit(child_node, *args, **kwargs)
class NodeTransformer(NodeVisitor):
"""Walks the abstract syntax tree and allows modifications of nodes.
The `NodeTransformer` will walk the AST and use the return value of the
visitor functions to replace or remove the old node. If the return
value of the visitor function is `None` the node will be removed
from the previous location otherwise it's replaced with the return
value. The return value may be the original node in which case no
replacement takes place.
"""
def generic_visit(self, node: Node, *args: t.Any, **kwargs: t.Any) -> Node:
for field, old_value in node.iter_fields():
if isinstance(old_value, list):
new_values = []
for value in old_value:
if isinstance(value, Node):
value = self.visit(value, *args, **kwargs)
if value is None:
continue
elif not isinstance(value, Node):
new_values.extend(value)
continue
new_values.append(value)
old_value[:] = new_values
elif isinstance(old_value, Node):
new_node = self.visit(old_value, *args, **kwargs)
if new_node is None:
delattr(node, field)
else:
setattr(node, field, new_node)
return node
def visit_list(self, node: Node, *args: t.Any, **kwargs: t.Any) -> t.List[Node]:
"""As transformers may return lists in some places this method
can be used to enforce a list as return value.
"""
rv = self.visit(node, *args, **kwargs)
if not isinstance(rv, list):
return [rv]
return rv | /robotframework-lsp-1.11.0.tar.gz/robotframework-lsp-1.11.0/robotframework_ls/vendored/robocorp_ls_core/libs/robocop_lib/jinja2/visitor.py | 0.80147 | 0.575349 | visitor.py | pypi |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.