docstring stringlengths 52 499 | function stringlengths 67 35.2k | __index_level_0__ int64 52.6k 1.16M |
|---|---|---|
Create a User Group
Args:
name (str): A name for the User Group. Must be unique among User Groups.
e.g. 'My Test Team' | def usergroups_create(self, *, name: str, **kwargs) -> SlackResponse:
self._validate_xoxp_token()
kwargs.update({"name": name})
return self.api_call("usergroups.create", json=kwargs) | 145,234 |
Disable an existing User Group
Args:
usergroup (str): The encoded ID of the User Group to disable.
e.g. 'S0604QSJC' | def usergroups_disable(self, *, usergroup: str, **kwargs) -> SlackResponse:
self._validate_xoxp_token()
kwargs.update({"usergroup": usergroup})
return self.api_call("usergroups.disable", json=kwargs) | 145,235 |
List all users in a User Group
Args:
usergroup (str): The encoded ID of the User Group to update.
e.g. 'S0604QSJC' | def usergroups_users_list(self, *, usergroup: str, **kwargs) -> SlackResponse:
self._validate_xoxp_token()
kwargs.update({"usergroup": usergroup})
return self.api_call("usergroups.users.list", http_verb="GET", params=kwargs) | 145,237 |
Update the list of users for a User Group
Args:
usergroup (str): The encoded ID of the User Group to update.
e.g. 'S0604QSJC'
users (list): A list user IDs that represent the entire list of
users for the User Group. e.g. ['U060R4BJ4', 'U060RNRCZ'] | def usergroups_users_update(
self, *, usergroup: str, users: List[str], **kwargs
) -> SlackResponse:
self._validate_xoxp_token()
kwargs.update({"usergroup": usergroup, "users": users})
return self.api_call("usergroups.users.update", json=kwargs) | 145,238 |
Gets user presence information.
Args:
user (str): User to get presence info on. Defaults to the authed user.
e.g. 'W1234567890' | def users_getPresence(self, *, user: str, **kwargs) -> SlackResponse:
kwargs.update({"user": user})
return self.api_call("users.getPresence", http_verb="GET", params=kwargs) | 145,240 |
Find a user with an email address.
Args:
email (str): An email address belonging to a user in the workspace.
e.g. 'spengler@ghostbusters.example.com' | def users_lookupByEmail(self, *, email: str, **kwargs) -> SlackResponse:
kwargs.update({"email": email})
return self.api_call("users.lookupByEmail", http_verb="GET", params=kwargs) | 145,242 |
Set the user profile photo
Args:
image (str): Supply the path of the image you'd like to upload.
e.g. 'myimage.png' | def users_setPhoto(self, *, image: Union[str, IOBase], **kwargs) -> SlackResponse:
self._validate_xoxp_token()
return self.api_call("users.setPhoto", files={"image": image}, data=kwargs) | 145,243 |
Manually sets user presence.
Args:
presence (str): Either 'auto' or 'away'. | def users_setPresence(self, *, presence: str, **kwargs) -> SlackResponse:
kwargs.update({"presence": presence})
return self.api_call("users.setPresence", json=kwargs) | 145,244 |
Sets the location of the output from the static analysis tool.
Parameters:
location: str Filesystem location for the results. | def analysis_output(self, location: Optional[str] = None) -> None:
try:
if not location:
location = self.prompt(
"Analysis results: ",
history_key="analysis_results",
completer=PathCompleter(),
)
... | 145,496 |
Select an issue.
Parameters:
issue_instance_id: int id of the issue instance to select
Note: We are selecting issue instances, even though the command is called
issue. | def issue(self, issue_instance_id):
with self.db.make_session() as session:
selected_issue = (
session.query(IssueInstance)
.filter(IssueInstance.id == issue_instance_id)
.scalar()
)
if selected_issue is None:
... | 145,498 |
Jump to a specific trace frame in a trace.
Parameters:
selected_number: int the trace frame number from trace output | def jump(self, selected_number: int) -> None:
self._verify_entrypoint_selected()
if selected_number < 1 or selected_number > len(self.trace_tuples):
raise UserError(
"Trace frame number out of bounds "
f"(expected 1-{len(self.trace_tuples)} but got {s... | 145,508 |
Show source code around the current trace frame location.
Parameters:
context: int number of lines to show above and below trace location
(default: 5) | def list_source_code(self, context: int = 5) -> None:
self._verify_entrypoint_selected()
current_trace_frame = self.trace_tuples[
self.current_trace_frame_index
].trace_frame
filename = os.path.join(self.repository_directory, current_trace_frame.filename)
f... | 145,511 |
Download a trained weights, config and preprocessor.
Args:
url (str): target url. | def download(url):
filepath = get_file(fname='tmp.zip', origin=url, extract=True)
base_dir = os.path.dirname(filepath)
weights_file = os.path.join(base_dir, 'weights.h5')
params_file = os.path.join(base_dir, 'params.json')
preprocessor_file = os.path.join(base_dir, 'preprocessor.pickle')
r... | 145,768 |
Loads word vectors in numpy array.
Args:
embeddings (dict): a dictionary of numpy array.
vocab (dict): word_index lookup table.
Returns:
numpy array: an array of word embeddings. | def filter_embeddings(embeddings, vocab, dim):
if not isinstance(embeddings, dict):
return
_embeddings = np.zeros([len(vocab), dim])
for word in vocab:
if word in embeddings:
word_idx = vocab[word]
_embeddings[word_idx] = embeddings[word]
return _embeddings | 145,770 |
Loads GloVe vectors in numpy array.
Args:
file (str): a path to a glove file.
Return:
dict: a dict of numpy arrays. | def load_glove(file):
model = {}
with open(file, encoding="utf8", errors='ignore') as f:
for line in f:
line = line.split(' ')
word = line[0]
vector = np.array([float(val) for val in line[1:]])
model[word] = vector
return model | 145,771 |
Create a Vocabulary object.
Args:
max_size: The maximum size of the vocabulary, or None for no
maximum. Default: None.
lower: boolean. Whether to convert the texts to lowercase.
unk_token: boolean. Whether to add unknown token.
specials: The list ... | def __init__(self, max_size=None, lower=True, unk_token=True, specials=('<pad>',)):
self._max_size = max_size
self._lower = lower
self._unk = unk_token
self._token2id = {token: i for i, token in enumerate(specials)}
self._id2token = list(specials)
self._token_cou... | 145,774 |
Add token to vocabulary.
Args:
token (str): token to add. | def add_token(self, token):
token = self.process_token(token)
self._token_count.update([token]) | 145,775 |
Update dictionary from a collection of documents. Each document is a list
of tokens.
Args:
docs (list): documents to add. | def add_documents(self, docs):
for sent in docs:
sent = map(self.process_token, sent)
self._token_count.update(sent) | 145,776 |
Get the list of token_id given doc.
Args:
doc (list): document.
Returns:
list: int id of doc. | def doc2id(self, doc):
doc = map(self.process_token, doc)
return [self.token_to_id(token) for token in doc] | 145,777 |
Get the token_id of given token.
Args:
token (str): token from vocabulary.
Returns:
int: int id of token. | def token_to_id(self, token):
token = self.process_token(token)
return self._token2id.get(token, len(self._token2id) - 1) | 145,779 |
Probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
Args:
text : string, the input text.
Returns:
y : array-like, shape = [num_words, num_classes]
Returns the probability of the word for each class in t... | def predict_proba(self, text):
assert isinstance(text, str)
words = self.tokenizer(text)
X = self.preprocessor.transform([words])
y = self.model.predict(X)
y = y[0] # reduce batch dimension.
return y | 145,783 |
Predict using the model.
Args:
text: string, the input text.
Returns:
tags: list, shape = (num_words,)
Returns predicted values. | def predict(self, text):
pred = self.predict_proba(text)
tags = self._get_tags(pred)
return tags | 145,787 |
Pads nested sequences to the same length.
This function transforms a list of list sequences
into a 3D Numpy array of shape `(num_samples, max_sent_len, max_word_len)`.
Args:
sequences: List of lists of lists.
dtype: Type of the output sequences.
# Returns
x: Numpy array. | def pad_nested_sequences(sequences, dtype='int32'):
max_sent_len = 0
max_word_len = 0
for sent in sequences:
max_sent_len = max(len(sent), max_sent_len)
for word in sent:
max_word_len = max(len(word), max_word_len)
x = np.zeros((len(sequences), max_sent_len, max_word_le... | 145,789 |
Create a preprocessor object.
Args:
lower: boolean. Whether to convert the texts to lowercase.
use_char: boolean. Whether to use char feature.
num_norm: boolean. Whether to normalize text.
initial_vocab: Iterable. Initial vocabulary for expanding word_vocab. | def __init__(self, lower=True, num_norm=True,
use_char=True, initial_vocab=None):
self._num_norm = num_norm
self._use_char = use_char
self._word_vocab = Vocabulary(lower=lower)
self._char_vocab = Vocabulary(lower=False)
self._label_vocab = Vocabulary(low... | 145,790 |
Learn vocabulary from training set.
Args:
X : iterable. An iterable which yields either str, unicode or file objects.
Returns:
self : IndexTransformer. | def fit(self, X, y):
self._word_vocab.add_documents(X)
self._label_vocab.add_documents(y)
if self._use_char:
for doc in X:
self._char_vocab.add_documents(doc)
self._word_vocab.build()
self._char_vocab.build()
self._label_vocab.build()... | 145,791 |
Transform documents to document ids.
Uses the vocabulary learned by fit.
Args:
X : iterable
an iterable which yields either str, unicode or file objects.
y : iterabl, label strings.
Returns:
features: document id matrix.
y: label id ... | def transform(self, X, y=None):
word_ids = [self._word_vocab.doc2id(doc) for doc in X]
word_ids = pad_sequences(word_ids, padding='post')
if self._use_char:
char_ids = [[self._char_vocab.doc2id(w) for w in doc] for doc in X]
char_ids = pad_nested_sequences(char_... | 145,792 |
Learn vocabulary and return document id matrix.
This is equivalent to fit followed by transform.
Args:
X : iterable
an iterable which yields either str, unicode or file objects.
Returns:
list : document id matrix.
list: label id matrix. | def fit_transform(self, X, y=None, **params):
return self.fit(X, y).transform(X, y) | 145,793 |
Return label strings.
Args:
y: label id matrix.
lengths: sentences length.
Returns:
list: list of list of strings. | def inverse_transform(self, y, lengths=None):
y = np.argmax(y, -1)
inverse_y = [self._label_vocab.id2doc(ids) for ids in y]
if lengths is not None:
inverse_y = [iy[:l] for iy, l in zip(inverse_y, lengths)]
return inverse_y | 145,794 |
Transform documents to document ids.
Uses the vocabulary learned by fit.
Args:
X : iterable
an iterable which yields either str, unicode or file objects.
y : iterabl, label strings.
Returns:
features: document id matrix.
y: label id ... | def transform(self, X, y=None):
word_ids = [self._word_vocab.doc2id(doc) for doc in X]
word_ids = pad_sequences(word_ids, padding='post')
char_ids = [[self._char_vocab.doc2id(w) for w in doc] for doc in X]
char_ids = pad_nested_sequences(char_ids)
character_ids = batch... | 145,796 |
Returns the prediction of the model on the given test data.
Args:
x_test : array-like, shape = (n_samples, sent_length)
Test samples.
Returns:
y_pred : array-like, shape = (n_smaples, sent_length)
Prediction labels for x. | def predict(self, x_test):
if self.model:
lengths = map(len, x_test)
x_test = self.p.transform(x_test)
y_pred = self.model.predict(x_test)
y_pred = self.p.inverse_transform(y_pred, lengths)
return y_pred
else:
raise OSErro... | 145,801 |
Analyze text and return pretty format.
Args:
text: string, the input text.
tokenizer: Tokenize input sentence. Default tokenizer is `str.split`.
Returns:
res: dict. | def analyze(self, text, tokenizer=str.split):
if not self.tagger:
self.tagger = Tagger(self.model,
preprocessor=self.p,
tokenizer=tokenizer)
return self.tagger.analyze(text) | 145,802 |
Returns a notebook object with papermill metadata loaded from the specified path.
Args:
notebook_path (str): Path to the notebook file.
Returns:
nbformat.NotebookNode | def load_notebook_node(notebook_path):
nb = nbformat.reads(papermill_io.read(notebook_path), as_version=4)
if not hasattr(nb.metadata, 'papermill'):
nb.metadata['papermill'] = {
'parameters': dict(),
'environment_variables': dict(),
'version': __version__,
... | 146,190 |
Download Malmo from github and optionaly build the Minecraft Mod.
Args:
branch: optional branch to clone. Default is release version.
buildMod: don't build the Mod unless build arg is given as True.
Returns:
The path for the Malmo Minecraft mod. | def download(branch=None, buildMod=False):
gradlew = "./gradlew"
if os.name == 'nt':
gradlew = "gradlew.bat"
if branch is None:
branch = malmo_version
subprocess.check_call(["git", "clone", "-b", branch, "https://github.com/Microsoft/malmo.git" , malmo_install_dir])
os.chdir(... | 147,360 |
Launch Malmo Minecraft Mod in one or more clients from
the Minecraft directory on the (optionally) given ports.
Args:
ports: an optionsl list of ports to start minecraft clients on.
Defaults to a single Minecraft client on port 10000.
wait_timeout: optional time in secon... | def launch_minecraft(ports = [], wait_timeout = 360):
if "MALMO_XSD_PATH" not in os.environ:
print("Please set the MALMO_XSD_PATH environment variable.")
return
cwd = os.getcwd()
try:
os.chdir(malmo_install_dir + "/Minecraft")
launch_minecraft_in_background(os.getcwd(), ... | 147,361 |
Sets a mission running.
Parameters:
mission_spec : MissionSpec instance, specifying the mission.
mission_record_spec : MissionRecordSpec instance, specifying what should be recorded.
role : int, the index of the role this human agent is to play. Zero based. | def runMission( self, mission_spec, mission_record_spec, role = 0 ):
self.world_state = None
total_reward = 0
# decide on the action space
command_handlers = mission_spec.getListOfCommandHandlers(role)
if 'ContinuousMovement' in command_handlers and 'Di... | 147,407 |
Download Malmo from github and build (by default) the Minecraft Mod.
Example usage: import malmoenv.bootstrap; malmoenv.bootstrap.download()
Args:
branch: optional branch to clone. TODO Default is release version.
build: build the Mod unless build arg is given as False.
installdir: th... | def download(branch=None, build=True, installdir="MalmoPlatform"):
if branch is None:
branch = malmo_version
subprocess.check_call(["git", "clone", "-b", branch, "https://github.com/Microsoft/malmo.git", installdir])
return setup(build=build, installdir=installdir) | 147,480 |
Launch Minecraft listening for malmoenv connections.
Args:
port: the TCP port to listen on.
installdir: the install dir name. Defaults to MalmoPlatform.
Must be same as given (or defaulted) in download call if used.
replaceable: whether or not to automatically restart Minecraft (def... | def launch_minecraft(port, installdir="MalmoPlatform", replaceable=False):
launch_script = './launchClient.sh'
if os.name == 'nt':
launch_script = 'launchClient.bat'
cwd = os.getcwd()
os.chdir(installdir)
os.chdir("Minecraft")
try:
cmd = [launch_script, '-port', str(port), '... | 147,482 |
Load a theme from the specified configuration file.
Parameters:
filename: The name of the filename to load.
source: A description of where the theme was loaded from. | def from_file(cls, filename, source):
_logger.info('Loading theme %s', filename)
try:
config = configparser.ConfigParser()
config.optionxform = six.text_type # Preserve case
with codecs.open(filename, encoding='utf-8') as fp:
config.readfp(f... | 147,722 |
Converts hex RGB to the 6x6x6 xterm color space
Args:
color (str): RGB color string in the format "#RRGGBB"
Returns:
str: ansi color string in the format "ansi_n", where n
is between 16 and 230
Reference:
https://github.com/chadj2/bash-ui/bl... | def rgb_to_ansi(color):
if color[0] != '#' or len(color) != 7:
return None
try:
r = round(int(color[1:3], 16) / 51.0) # Normalize between 0-5
g = round(int(color[3:5], 16) / 51.0)
b = round(int(color[5:7], 16) / 51.0)
n = int(36 * r... | 147,725 |
Overlay a message box on the center of the screen and wait for input.
Params:
message (list or string): List of strings, one per line.
timeout (float): Optional, maximum length of time that the message
will be shown before disappearing.
style (str): The theme... | def show_notification(self, message, timeout=None, style='Info'):
assert style in ('Info', 'Warning', 'Error', 'Success')
if isinstance(message, six.string_types):
message = message.splitlines()
n_rows, n_cols = self.stdscr.getmaxyx()
v_offset, h_offset = self.std... | 147,811 |
Search through the mime handlers list and attempt to find the
appropriate command to open the provided url with.
Will raise a MailcapEntryNotFound exception if no valid command exists.
Params:
url (text): URL that will be checked
Returns:
command (text): The st... | def get_mailcap_entry(self, url):
for parser in mime_parsers.parsers:
if parser.pattern.match(url):
# modified_url may be the same as the original url, but it
# could also be updated to point to a different page, or it
# could refer to the lo... | 147,816 |
Display a text prompt at the bottom of the screen.
Params:
prompt (string): Text prompt that will be displayed
key (bool): If true, grab a single keystroke instead of a full
string. This can be faster than pressing enter for
single key pro... | def prompt_input(self, prompt, key=False):
n_rows, n_cols = self.stdscr.getmaxyx()
v_offset, h_offset = self.stdscr.getbegyx()
ch, attr = str(' '), self.attr('Prompt')
prompt = self.clean(prompt, n_cols - 1)
# Create a new window to draw the text at the bottom of the s... | 147,822 |
Sample this motion track into discretized motion events.
Args:
contact_id: contact point id
accuracy: motion minimum difference in space
dt: sample time difference | def discretize(self, contact_id=0, accuracy=0.004, dt=0.001):
if not self.event_points:
return []
events = []
action_dt = accuracy / self.speed
dt = dt or action_dt
ep0 = self.event_points[0]
for _ in range(int(ep0[0] / dt)):
events.app... | 148,195 |
Similar to swipe action, but the end point is provide by a UI proxy or by fixed coordinates.
Args:
target (:py:class:`UIObjectProxy <poco.proxy.UIObjectProxy>`): a UI proxy or 2-list/2-tuple coordinates
(x, y) in NormalizedCoordinate system
duration (:py:obj:`float`): time ... | def drag_to(self, target, duration=2.0):
try:
duration = float(duration)
except ValueError:
raise ValueError('Argument `duration` should be <float>. Got {}'.format(repr(duration)))
if type(target) in (list, tuple):
target_pos = target
else:
... | 148,302 |
Get a new UI proxy copy with the given focus. Return a new UI proxy object as the UI proxy is immutable.
Args:
f (2-:obj:`tuple`/2-:obj:`list`/:obj:`str`): the focus point, it can be specified as 2-list/2-tuple
coordinates (x, y) in NormalizedCoordinate system or as 'center' or 'anchor... | def focus(self, f):
ret = copy.copy(self)
ret._focus = f
return ret | 148,305 |
Get the position of the UI elements.
Args:
focus: focus point of UI proxy, see :py:meth:`.focus() <poco.proxy.UIObjectProxy.focus>` for more details
Returns:
2-list/2-tuple: coordinates (x, y) in NormalizedCoordinate system
Raises:
TypeError: raised when u... | def get_position(self, focus=None):
focus = focus or self._focus or 'anchor'
if focus == 'anchor':
pos = self.attr('pos')
elif focus == 'center':
x, y = self.attr('pos')
w, h = self.get_size()
ap_x, ap_y = self.attr("anchorPoint")
... | 148,306 |
Block and wait for max given time before the UI element appears.
Args:
timeout: maximum waiting time in seconds
Returns:
:py:class:`UIObjectProxy <poco.proxy.UIObjectProxy>`: self | def wait(self, timeout=3):
start = time.time()
while not self.exists():
self.poco.sleep_for_polling_interval()
if time.time() - start > timeout:
break
return self | 148,308 |
Block and wait until the UI element **disappears** within the given timeout.
Args:
timeout: maximum waiting time in seconds
Raises:
PocoTargetTimeout: when timeout | def wait_for_disappearance(self, timeout=120):
start = time.time()
while self.exists():
self.poco.sleep_for_polling_interval()
if time.time() - start > timeout:
raise PocoTargetTimeout('disappearance', self) | 148,309 |
Change the attribute value of the UI element. Not all attributes can be casted to text. If changing the
immutable attributes or attributes which do not exist, the InvalidOperationException exception is raised.
Args:
name: attribute name
val: new attribute value to cast
... | def setattr(self, name, val):
nodes = self._do_query(multiple=False)
try:
return self.poco.agent.hierarchy.setAttr(nodes, name, val)
except UnableToSetAttributeException as e:
raise InvalidOperationException('"{}" of "{}"'.format(str(e), self)) | 148,311 |
Similar to click but press the screen for the given time interval and then release
Args:
pos (:obj:`2-list/2-tuple`): coordinates (x, y) in range from 0 to 1
duration: duration of press the screen | def long_click(self, pos, duration=2.0):
try:
duration = float(duration)
except ValueError:
raise ValueError('Argument `duration` should be <float>. Got {}'.format(repr(duration)))
if not (0 <= pos[0] <= 1) or not (0 <= pos[1] <= 1):
raise InvalidOp... | 148,344 |
Scroll from the lower part to the upper part of the entire screen.
Args:
direction (:py:obj:`str`): scrolling direction. "vertical" or "horizontal"
percent (:py:obj:`float`): scrolling distance percentage of the entire screen height or width according to
direction
... | def scroll(self, direction='vertical', percent=0.6, duration=2.0):
if direction not in ('vertical', 'horizontal'):
raise ValueError('Argument `direction` should be one of "vertical" or "horizontal". Got {}'
.format(repr(direction)))
start = [0.5, 0.5]
... | 148,345 |
Squeezing or expanding 2 fingers on the entire screen.
Args:
direction (:py:obj:`str`): pinching direction, only "in" or "out". "in" for squeezing, "out" for expanding
percent (:py:obj:`float`): squeezing range from or expanding range to of the entire screen
duration (:py:ob... | def pinch(self, direction='in', percent=0.6, duration=2.0, dead_zone=0.1):
if direction not in ('in', 'out'):
raise ValueError('Argument `direction` should be one of "in" or "out". Got {}'.format(repr(direction)))
if dead_zone >= percent:
raise ValueError('Argument `dea... | 148,346 |
Similar to click but press the screen for the given time interval and then release
Args:
tracks (:py:obj:`list`): list of :py:class:`poco.utils.track.MotionTrack` object
accuracy (:py:obj:`float`): motion accuracy for each motion steps in normalized coordinate metrics. | def apply_motion_tracks(self, tracks, accuracy=0.004):
if not tracks:
raise ValueError('Please provide at least one track. Got {}'.format(repr(tracks)))
tb = MotionTrackBatch(tracks)
return self.agent.input.applyMotionEvents(tb.discretize(accuracy)) | 148,347 |
This is only a slot to store and get already initialized poco instance rather than initializing again. You can
simply pass the ``current device instance`` provided by ``airtest`` to get the AndroidUiautomationPoco instance.
If no such AndroidUiautomationPoco instance, a new instance will be created and ... | def get_instance(cls, device):
if cls._nuis.get(device) is None:
cls._nuis[device] = AndroidUiautomationPoco(device)
return cls._nuis[device] | 148,370 |
Determine if a number of Suggested Actions are supported by a Channel.
Args:
channel_id (str): The Channel to check the if Suggested Actions are supported in.
button_cnt (int, optional): Defaults to 100. The number of Suggested Actions to check for the Channel.
Returns:
... | def supports_suggested_actions(channel_id: str, button_cnt: int = 100) -> bool:
max_actions = {
# https://developers.facebook.com/docs/messenger-platform/send-messages/quick-replies
Channels.facebook: 10,
Channels.skype: 10,
# https://developers.line.biz... | 149,655 |
Determine if a number of Card Actions are supported by a Channel.
Args:
channel_id (str): The Channel to check if the Card Actions are supported in.
button_cnt (int, optional): Defaults to 100. The number of Card Actions to check for the Channel.
Returns:
bool: True... | def supports_card_actions(channel_id: str, button_cnt: int = 100) -> bool:
max_actions = {
Channels.facebook: 3,
Channels.skype: 3,
Channels.ms_teams: 3,
Channels.line: 99,
Channels.slack: 100,
Channels.emulator: 100,
... | 149,656 |
Get the Channel Id from the current Activity on the Turn Context.
Args:
turn_context (TurnContext): The Turn Context to retrieve the Activity's Channel Id from.
Returns:
str: The Channel Id from the Turn Context's Activity. | def get_channel_id(turn_context: TurnContext) -> str:
if turn_context.activity.channel_id is None:
return ""
else:
return turn_context.activity.channel_id | 149,657 |
run testcase or testsuite.
Args:
config (dict): testcase/testsuite config dict
{
"name": "ABC",
"variables": {},
"setup_hooks", [],
"teardown_hooks", []
}
http_client_sessio... | def __init__(self, config, http_client_session=None):
self.verify = config.get("verify", True)
self.output = config.get("output", [])
self.validation_results = []
config_variables = config.get("variables", {})
# testcase setup hooks
testcase_setup_hooks = config... | 150,336 |
handle skip feature for test
- skip: skip current test unconditionally
- skipIf: skip current test if condition is true
- skipUnless: skip current test unless condition is true
Args:
test_dict (dict): test info
Raises:
SkipTest: skip test | def _handle_skip_feature(self, test_dict):
# TODO: move skip to initialize
skip_reason = None
if "skip" in test_dict:
skip_reason = test_dict["skip"]
elif "skipIf" in test_dict:
skip_if_condition = test_dict["skipIf"]
if self.session_context... | 150,339 |
call hook actions.
Args:
actions (list): each action in actions list maybe in two format.
format1 (dict): assignment, the value returned by hook function will be assigned to variable.
{"var": "${func()}"}
format2 (str): only call hook functions.
... | def do_hook_actions(self, actions, hook_type):
logger.log_debug("call {} hook actions.".format(hook_type))
for action in actions:
if isinstance(action, dict) and len(action) == 1:
# format 1
# {"var": "${func()}"}
var_name, hook_conte... | 150,340 |
prepare locust testcases
Args:
path (str): testcase file path.
Returns:
list: locust tests data
[
testcase1_dict,
testcase2_dict
] | def prepare_locust_tests(path):
tests_mapping = loader.load_tests(path)
testcases = parser.parse_tests(tests_mapping)
locust_tests = []
for testcase in testcases:
testcase_weight = testcase.get("config", {}).pop("weight", 1)
for _ in range(testcase_weight):
locust_test... | 150,345 |
initialize HttpRunner.
Args:
failfast (bool): stop the test run on the first error or failure.
save_tests (bool): save loaded/parsed tests to JSON file.
report_template (str): report template file path, template should be in Jinja2 format.
report_dir (str): html ... | def __init__(self, failfast=False, save_tests=False, report_template=None, report_dir=None,
log_level="INFO", log_file=None):
self.exception_stage = "initialize HttpRunner()"
kwargs = {
"failfast": failfast,
"resultclass": report.HtmlTestResult
}
... | 150,346 |
initialize testcase with Runner() and add to test suite.
Args:
testcases (list): testcases list.
Returns:
unittest.TestSuite() | def _add_tests(self, testcases):
def _add_test(test_runner, test_dict):
def test(self):
try:
test_runner.run_test(test_dict)
except exceptions.MyBaseFailure as ex:
self.fail(str(ex))
finally... | 150,347 |
run tests in test_suite
Args:
test_suite: unittest.TestSuite()
Returns:
list: tests_results | def _run_suite(self, test_suite):
tests_results = []
for testcase in test_suite:
testcase_name = testcase.config.get("name")
logger.log_info("Start to run testcase: {}".format(testcase_name))
result = self.unittest_runner.run(testcase)
tests_res... | 150,348 |
aggregate results
Args:
tests_results (list): list of (testcase, result) | def _aggregate(self, tests_results):
summary = {
"success": True,
"stat": {
"testcases": {
"total": len(tests_results),
"success": 0,
"fail": 0
},
"teststeps": {}
... | 150,349 |
run testcase/testsuite file or folder.
Args:
path (str): testcase/testsuite file/foler path.
dot_env_path (str): specified .env file path.
mapping (dict): if mapping is specified, it will override variables in config block.
Returns:
instance: HttpRunner(... | def run_path(self, path, dot_env_path=None, mapping=None):
# load tests
self.exception_stage = "load tests"
tests_mapping = loader.load_tests(path, dot_env_path)
tests_mapping["project_mapping"]["test_path"] = path
if mapping:
tests_mapping["project_mapping"... | 150,351 |
main interface.
Args:
path_or_tests:
str: testcase/testsuite file/foler path
dict: valid testcase/testsuite data | def run(self, path_or_tests, dot_env_path=None, mapping=None):
if validator.is_testcase_path(path_or_tests):
return self.run_path(path_or_tests, dot_env_path, mapping)
elif validator.is_testcases(path_or_tests):
return self.run_tests(path_or_tests)
else:
... | 150,352 |
convert dict to params string
Args:
src_dict (dict): source mapping data structure
Returns:
str: string params data
Examples:
>>> src_dict = {
"a": 1,
"b": 2
}
>>> convert_dict_to_params(src_dict)
>>> "a=1&b=2" | def convert_dict_to_params(src_dict):
return "&".join([
"{}={}".format(key, value)
for key, value in src_dict.items()
]) | 150,358 |
deepcopy dict data, ignore file object (_io.BufferedReader)
Args:
data (dict): dict data structure
{
'a': 1,
'b': [2, 4],
'c': lambda x: x+1,
'd': open('LICENSE'),
'f': {
'f1': {'a1': 2},
... | def deepcopy_dict(data):
try:
return copy.deepcopy(data)
except TypeError:
copied_data = {}
for key, value in data.items():
if isinstance(value, dict):
copied_data[key] = deepcopy_dict(value)
else:
try:
copi... | 150,361 |
ensure variables are in mapping format.
Args:
variables (list/dict): original variables
Returns:
dict: ensured variables in dict format
Examples:
>>> variables = [
{"a": 1},
{"b": 2}
]
>>> print(ensure_mapping_format(variables))
... | def ensure_mapping_format(variables):
if isinstance(variables, list):
variables_dict = {}
for map_dict in variables:
variables_dict.update(map_dict)
return variables_dict
elif isinstance(variables, dict):
return variables
else:
raise exceptions.Par... | 150,362 |
get and print testcase input(variables) and output.
Args:
testcase (unittest.suite.TestSuite): corresponding to one YAML/JSON file, it has been set two attributes:
config: parsed config block
runner: initialized runner.Runner() with config
Returns:
dict: input(variables)... | def get_testcase_io(testcase):
test_runner = testcase.runner
variables = testcase.config.get("variables", {})
output_list = testcase.config.get("output", [])
output_mapping = test_runner.extract_output(output_list)
return {
"in": variables,
"out": output_mapping
} | 150,364 |
dump tests data to json file.
the dumped file is located in PWD/logs folder.
Args:
json_data (list/dict): json data to dump
project_mapping (dict): project info
tag_name (str): tag name, loaded/parsed/summary | def dump_logs(json_data, project_mapping, tag_name):
pwd_dir_path, dump_file_name = _prepare_dump_info(project_mapping, tag_name)
dump_json_file(json_data, pwd_dir_path, dump_file_name) | 150,372 |
load folder path, return all files endswith yml/yaml/json in list.
Args:
folder_path (str): specified folder path to load
recursive (bool): load files recursively if True
Returns:
list: files endswith yml/yaml/json | def load_folder_files(folder_path, recursive=True):
if isinstance(folder_path, (list, set)):
files = []
for path in set(folder_path):
files.extend(load_folder_files(path, recursive))
return files
if not os.path.exists(folder_path):
return []
file_list = []... | 150,379 |
load .env file.
Args:
dot_env_path (str): .env file path
Returns:
dict: environment variables mapping
{
"UserName": "debugtalk",
"Password": "123456",
"PROJECT_KEY": "ABCDEFGH"
}
Raises:
exceptions.FileFormat... | def load_dot_env_file(dot_env_path):
if not os.path.isfile(dot_env_path):
return {}
logger.log_info("Loading environment variables from {}".format(dot_env_path))
env_variables_mapping = {}
with io.open(dot_env_path, 'r', encoding='utf-8') as fp:
for line in fp:
# maxsp... | 150,380 |
locate filename and return absolute file path.
searching will be recursive upward until current working directory.
Args:
start_path (str): start locating path, maybe file path or directory path
Returns:
str: located file path. None if file not found.
Raises:
exceptions.Fil... | def locate_file(start_path, file_name):
if os.path.isfile(start_path):
start_dir_path = os.path.dirname(start_path)
elif os.path.isdir(start_path):
start_dir_path = start_path
else:
raise exceptions.FileNotFound("invalid path: {}".format(start_path))
file_path = os.path.joi... | 150,381 |
load python module functions.
Args:
module: python module
Returns:
dict: functions mapping for specified python module
{
"func1_name": func1,
"func2_name": func2
} | def load_module_functions(module):
module_functions = {}
for name, item in vars(module).items():
if validator.is_function(item):
module_functions[name] = item
return module_functions | 150,382 |
load api/testcases/testsuites definitions from folder.
Args:
folder_path (str): api/testcases/testsuites files folder.
Returns:
dict: api definition mapping.
{
"tests/api/basic.yml": [
{"api": {"def": "api_login", "request": {}, "validate": []}}... | def load_folder_content(folder_path):
items_mapping = {}
for file_path in load_folder_files(folder_path):
items_mapping[file_path] = load_file(file_path)
return items_mapping | 150,388 |
load api, testcases, .env, debugtalk.py functions.
api/testcases folder is relative to project_working_directory
Args:
test_path (str): test file/folder path, locate pwd from this path.
dot_env_path (str): specified .env file path
Returns:
dict: project loaded api/testcases def... | def load_project_tests(test_path, dot_env_path=None):
# locate debugtalk.py file
debugtalk_path = locate_debugtalk_py(test_path)
if debugtalk_path:
# The folder contains debugtalk.py will be treated as PWD.
project_working_directory = os.path.dirname(debugtalk_path)
else:
#... | 150,390 |
extract all variable names from content, which is in format $variable
Args:
content (str): string content
Returns:
list: variables list extracted from string content
Examples:
>>> regex_findall_variables("$variable")
["variable"]
>>> regex_findall_variables("/blog... | def regex_findall_variables(content):
try:
vars_list = []
for var_tuple in variable_regex_compile.findall(content):
vars_list.append(
var_tuple[0] or var_tuple[1]
)
return vars_list
except TypeError:
return [] | 150,395 |
get variable from variables_mapping.
Args:
variable_name (str): variable name
variables_mapping (dict): variables mapping
Returns:
mapping variable value.
Raises:
exceptions.VariableNotFound: variable is not found. | def get_mapping_variable(variable_name, variables_mapping):
try:
return variables_mapping[variable_name]
except KeyError:
raise exceptions.VariableNotFound("{} is not found.".format(variable_name)) | 150,397 |
get function from functions_mapping,
if not found, then try to check if builtin function.
Args:
variable_name (str): variable name
variables_mapping (dict): variables mapping
Returns:
mapping function object.
Raises:
exceptions.FunctionNotFound: function is neither... | def get_mapping_function(function_name, functions_mapping):
if function_name in functions_mapping:
return functions_mapping[function_name]
elif function_name in ["parameterize", "P"]:
from httprunner import loader
return loader.load_csv_file
elif function_name in ["environ", "... | 150,398 |
extend test with testcase definition
test will merge and override testcase config definition.
Args:
test_dict (dict): test block
testcase_def_dict (dict): testcase definition
Returns:
dict: extended test dict. | def _extend_with_testcase(test_dict, testcase_def_dict):
# override testcase config variables
testcase_def_dict["config"].setdefault("variables", {})
testcase_def_variables = utils.ensure_mapping_format(testcase_def_dict["config"].get("variables", {}))
testcase_def_variables.update(test_dict.pop("v... | 150,406 |
parse testcase
Args:
testcase (dict):
{
"config": {},
"teststeps": []
} | def _parse_testcase(testcase, project_mapping, session_variables_set=None):
testcase.setdefault("config", {})
prepared_config = __prepare_config(
testcase["config"],
project_mapping,
session_variables_set
)
prepared_testcase_tests = __prepare_testcase_tests(
testcase... | 150,409 |
init LazyFunction object with function_meta
Args:
function_meta (dict): function name, args and kwargs.
{
"func_name": "func",
"args": [1, 2]
"kwargs": {"a": 3, "b": 4}
} | def __init__(self, function_meta, functions_mapping=None, check_variables_set=None):
self.functions_mapping = functions_mapping or {}
self.check_variables_set = check_variables_set or set()
self.cache_key = None
self.__parse(function_meta) | 150,413 |
init func as lazy functon instance
Args:
function_meta (dict): function meta including name, args and kwargs | def __parse(self, function_meta):
self._func = get_mapping_function(
function_meta["func_name"],
self.functions_mapping
)
self.func_name = self._func.__name__
self._args = prepare_lazy_data(
function_meta.get("args", []),
self.func... | 150,414 |
parse raw string, replace function and variable with {}
Args:
raw_string(str): string with functions or varialbes
e.g. "ABC${func2($a, $b)}DE$c"
Returns:
string: "ABC{}DE{}"
args: ["${func2($a, $b)}", "$c"] | def __parse(self, raw_string):
self._args = []
def escape_braces(origin_string):
return origin_string.replace("{", "{{").replace("}", "}}")
try:
match_start_position = raw_string.index("$", 0)
begin_string = raw_string[0:match_start_position]
... | 150,419 |
check if path is testcase path or path list.
Args:
path (str/list): file path or file path list.
Returns:
bool: True if path is valid file path or path list, otherwise False. | def is_testcase_path(path):
if not isinstance(path, (str, list)):
return False
if isinstance(path, list):
for p in path:
if not is_testcase_path(p):
return False
if isinstance(path, str):
if not os.path.exists(path):
return False
re... | 150,428 |
init test variables, called when each test(api) starts.
variables_mapping will be evaluated first.
Args:
variables_mapping (dict)
{
"random": "${gen_random_string(5)}",
"authorization": "${gen_md5($TOKEN, $data, $random)}",
... | def init_test_variables(self, variables_mapping=None):
variables_mapping = variables_mapping or {}
variables_mapping = utils.ensure_mapping_format(variables_mapping)
variables_mapping.update(self.session_variables_mapping)
parsed_variables_mapping = parser.parse_variables_mappin... | 150,446 |
get summary from test result
Args:
result (instance): HtmlTestResult() instance
Returns:
dict: summary extracted from result.
{
"success": True,
"stat": {},
"time": {},
"records": []
} | def get_summary(result):
summary = {
"success": result.wasSuccessful(),
"stat": {
'total': result.testsRun,
'failures': len(result.failures),
'errors': len(result.errors),
'skipped': len(result.skipped),
'expectedFailures': len(result.... | 150,450 |
aggregate new_stat to origin_stat.
Args:
origin_stat (dict): origin stat dict, will be updated with new_stat dict.
new_stat (dict): new stat dict. | def aggregate_stat(origin_stat, new_stat):
for key in new_stat:
if key not in origin_stat:
origin_stat[key] = new_stat[key]
elif key == "start_at":
# start datetime
origin_stat[key] = min(origin_stat[key], new_stat[key])
else:
origin_stat[... | 150,451 |
expand meta_datas to one level
Args:
meta_datas (dict/list): maybe in nested format
Returns:
list: expanded list in one level
Examples:
>>> meta_datas = [
[
dict1,
dict2
],
dict3
]
... | def __expand_meta_datas(meta_datas, meta_datas_expanded):
if isinstance(meta_datas, dict):
meta_datas_expanded.append(meta_datas)
elif isinstance(meta_datas, list):
for meta_data in meta_datas:
__expand_meta_datas(meta_data, meta_datas_expanded) | 150,455 |
render html report with specified report name and template
Args:
report_template (str): specify html report template path
report_dir (str): specify html report save directory | def render_html_report(summary, report_template=None, report_dir=None):
if not report_template:
report_template = os.path.join(
os.path.abspath(os.path.dirname(__file__)),
"templates",
"report_template.html"
)
logger.log_debug("No html report template... | 150,458 |
response content could be json or html text.
Args:
field (str): string joined by delimiter.
e.g.
"status_code"
"headers"
"cookies"
"content"
"headers.content-type"
"content.person.name.first_... | def _extract_field_with_delimiter(self, field):
# string.split(sep=None, maxsplit=-1) -> list of strings
# e.g. "content.person.name" => ["content", "person.name"]
try:
top_query, sub_query = field.split('.', 1)
except ValueError:
top_query = field
... | 150,468 |
extract value from requests.Response and store in OrderedDict.
Args:
extractors (list):
[
{"resp_status_code": "status_code"},
{"resp_headers_content_type": "headers.content-type"},
{"resp_content": "content"},
... | def extract_response(self, extractors):
if not extractors:
return {}
logger.log_debug("start to extract from response object.")
extracted_variables_mapping = OrderedDict()
extract_binds_order_dict = utils.ensure_mapping_format(extractors)
for key, field in ... | 150,470 |
Use this method to set a new profile photo.
This method only works for Users.
Bots profile photos must be set using BotFather.
Args:
photo (``str``):
Profile photo to set.
Pass a file path as string to upload a new photo that exists on your local mac... | def set_user_profile_photo(
self,
photo: str
) -> bool:
return bool(
self.send(
functions.photos.UploadProfilePhoto(
file=self.save_file(photo)
)
)
) | 150,488 |
Use this decorator to automatically register a function for handling user status updates.
This does the same thing as :meth:`add_handler` using the :class:`UserStatusHandler`.
Args:
filters (:obj:`Filters <pyrogram.Filters>`):
Pass one or more filters to allow only a subset ... | def on_user_status(
self=None,
filters=None,
group: int = 0
) -> callable:
def decorator(func: callable) -> Tuple[Handler, int]:
if isinstance(func, tuple):
func = func[0].callback
handler = pyrogram.UserStatusHandler(func, filters)
... | 150,525 |
Use this method to get the number of members in a chat.
Args:
chat_id (``int`` | ``str``):
Unique identifier (int) or username (str) of the target chat.
Returns:
On success, an integer is returned.
Raises:
:class:`RPCError <pyrogram.RPCError... | def get_chat_members_count(
self,
chat_id: Union[int, str]
) -> int:
peer = self.resolve_peer(chat_id)
if isinstance(peer, types.InputPeerChat):
return self.send(
functions.messages.GetChats(
id=[peer.chat_id]
... | 150,531 |
Blocks the program execution until one of the signals are received,
then gently stop the Client by closing the underlying connection.
Args:
stop_signals (``tuple``, *optional*):
Iterable containing signals the signal handler will listen to.
Defaults to (SIGIN... | def idle(self, stop_signals: tuple = (SIGINT, SIGTERM, SIGABRT)):
def signal_handler(*args):
self.is_idle = False
for s in stop_signals:
signal(s, signal_handler)
self.is_idle = True
while self.is_idle:
time.sleep(1)
self.stop() | 150,537 |
Removes a previously-added update handler.
Make sure to provide the right group that the handler was added in. You can use
the return value of the :meth:`add_handler` method, a tuple of (handler, group), and
pass it directly.
Args:
handler (``Handler``):
The... | def remove_handler(self, handler: Handler, group: int = 0):
if isinstance(handler, DisconnectHandler):
self.disconnect_handler = None
else:
self.dispatcher.remove_handler(handler, group) | 150,539 |
Use this decorator to automatically register a function for handling raw updates.
This does the same thing as :meth:`add_handler` using the :class:`RawUpdateHandler`.
Args:
group (``int``, *optional*):
The group identifier, defaults to 0. | def on_raw_update(
self=None,
group: int = 0
) -> callable:
def decorator(func: callable) -> Tuple[Handler, int]:
if isinstance(func, tuple):
func = func[0].callback
handler = pyrogram.RawUpdateHandler(func)
if isinstance(self, ... | 150,616 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.