INSTRUCTION
stringlengths 1
46.3k
| RESPONSE
stringlengths 75
80.2k
|
|---|---|
Returns the function signature string for memberdef nodes.
|
def get_function_signature(self, node):
"""Returns the function signature string for memberdef nodes."""
name = self.extract_text(self.get_specific_subnodes(node, 'name'))
if self.with_type_info:
argsstring = self.extract_text(self.get_specific_subnodes(node, 'argsstring'))
else:
argsstring = []
param_id = 1
for n_param in self.get_specific_subnodes(node, 'param'):
declname = self.extract_text(self.get_specific_subnodes(n_param, 'declname'))
if not declname:
declname = 'arg' + str(param_id)
defval = self.extract_text(self.get_specific_subnodes(n_param, 'defval'))
if defval:
defval = '=' + defval
argsstring.append(declname + defval)
param_id = param_id + 1
argsstring = '(' + ', '.join(argsstring) + ')'
type = self.extract_text(self.get_specific_subnodes(node, 'type'))
function_definition = name + argsstring
if type != '' and type != 'void':
function_definition = function_definition + ' -> ' + type
return '`' + function_definition + '` '
|
Produces the "Constructors" section and the constructor signatures
(since swig does not do so for classes) for class docstrings.
|
def make_constructor_list(self, constructor_nodes, classname):
"""Produces the "Constructors" section and the constructor signatures
(since swig does not do so for classes) for class docstrings."""
if constructor_nodes == []:
return
self.add_text(['\n', 'Constructors',
'\n', '------------'])
for n in constructor_nodes:
self.add_text('\n')
self.add_line_with_subsequent_indent('* ' + self.get_function_signature(n))
self.subnode_parse(n, pieces = [], indent=4, ignore=['definition', 'name'])
|
Produces the "Attributes" section in class docstrings for public
member variables (attributes).
|
def make_attribute_list(self, node):
"""Produces the "Attributes" section in class docstrings for public
member variables (attributes).
"""
atr_nodes = []
for n in self.get_specific_subnodes(node, 'memberdef', recursive=2):
if n.attributes['kind'].value == 'variable' and n.attributes['prot'].value == 'public':
atr_nodes.append(n)
if not atr_nodes:
return
self.add_text(['\n', 'Attributes',
'\n', '----------'])
for n in atr_nodes:
name = self.extract_text(self.get_specific_subnodes(n, 'name'))
self.add_text(['\n* ', '`', name, '`', ' : '])
self.add_text(['`', self.extract_text(self.get_specific_subnodes(n, 'type')), '`'])
self.add_text(' \n')
restrict = ['briefdescription', 'detaileddescription']
self.subnode_parse(n, pieces=[''], indent=4, restrict=restrict)
|
Collects the memberdef nodes and corresponding signatures that
correspond to public function entries that are at most depth 2 deeper
than the current (compounddef) node. Returns a dictionary with
function signatures (what swig expects after the %feature directive)
as keys, and a list of corresponding memberdef nodes as values.
|
def get_memberdef_nodes_and_signatures(self, node, kind):
"""Collects the memberdef nodes and corresponding signatures that
correspond to public function entries that are at most depth 2 deeper
than the current (compounddef) node. Returns a dictionary with
function signatures (what swig expects after the %feature directive)
as keys, and a list of corresponding memberdef nodes as values."""
sig_dict = {}
sig_prefix = ''
if kind in ('file', 'namespace'):
ns_node = node.getElementsByTagName('innernamespace')
if not ns_node and kind == 'namespace':
ns_node = node.getElementsByTagName('compoundname')
if ns_node:
sig_prefix = self.extract_text(ns_node[0]) + '::'
elif kind in ('class', 'struct'):
# Get the full function name.
cn_node = node.getElementsByTagName('compoundname')
sig_prefix = self.extract_text(cn_node[0]) + '::'
md_nodes = self.get_specific_subnodes(node, 'memberdef', recursive=2)
for n in md_nodes:
if n.attributes['prot'].value != 'public':
continue
if n.attributes['kind'].value in ['variable', 'typedef']:
continue
if not self.get_specific_subnodes(n, 'definition'):
continue
name = self.extract_text(self.get_specific_subnodes(n, 'name'))
if name[:8] == 'operator':
continue
sig = sig_prefix + name
if sig in sig_dict:
sig_dict[sig].append(n)
else:
sig_dict[sig] = [n]
return sig_dict
|
Produce standard documentation for memberdef_nodes.
|
def handle_typical_memberdefs_no_overload(self, signature, memberdef_nodes):
"""Produce standard documentation for memberdef_nodes."""
for n in memberdef_nodes:
self.add_text(['\n', '%feature("docstring") ', signature, ' "', '\n'])
if self.with_function_signature:
self.add_line_with_subsequent_indent(self.get_function_signature(n))
self.subnode_parse(n, pieces=[], ignore=['definition', 'name'])
self.add_text(['";', '\n'])
|
Produces docstring entries containing an "Overloaded function"
section with the documentation for each overload, if the function is
overloaded and self.with_overloaded_functions is set. Else, produce
normal documentation.
|
def handle_typical_memberdefs(self, signature, memberdef_nodes):
"""Produces docstring entries containing an "Overloaded function"
section with the documentation for each overload, if the function is
overloaded and self.with_overloaded_functions is set. Else, produce
normal documentation.
"""
if len(memberdef_nodes) == 1 or not self.with_overloaded_functions:
self.handle_typical_memberdefs_no_overload(signature, memberdef_nodes)
return
self.add_text(['\n', '%feature("docstring") ', signature, ' "', '\n'])
if self.with_function_signature:
for n in memberdef_nodes:
self.add_line_with_subsequent_indent(self.get_function_signature(n))
self.add_text('\n')
self.add_text(['Overloaded function', '\n',
'-------------------'])
for n in memberdef_nodes:
self.add_text('\n')
self.add_line_with_subsequent_indent('* ' + self.get_function_signature(n))
self.subnode_parse(n, pieces=[], indent=4, ignore=['definition', 'name'])
self.add_text(['";', '\n'])
|
This is the only place where text wrapping is automatically performed.
Generally, this function parses the node (locally), wraps the text, and
then adds the result to self.pieces. However, it may be convenient to
allow the previous content of self.pieces to be included in the text
wrapping. For this, use the following *convention*:
If self.pieces ends with '', treat the _previous_ entry as part of the
current paragraph. Else, insert new-line and start a new paragraph
and "wrapping context".
Paragraphs always end with ' \n', but if the parsed content ends with
the special symbol '', this is passed on.
|
def do_para(self, node):
"""This is the only place where text wrapping is automatically performed.
Generally, this function parses the node (locally), wraps the text, and
then adds the result to self.pieces. However, it may be convenient to
allow the previous content of self.pieces to be included in the text
wrapping. For this, use the following *convention*:
If self.pieces ends with '', treat the _previous_ entry as part of the
current paragraph. Else, insert new-line and start a new paragraph
and "wrapping context".
Paragraphs always end with ' \n', but if the parsed content ends with
the special symbol '', this is passed on.
"""
if self.pieces[-1:] == ['']:
pieces, self.pieces = self.pieces[:-2], self.pieces[-2:-1]
else:
self.add_text('\n')
pieces, self.pieces = self.pieces, ['']
self.subnode_parse(node)
dont_end_paragraph = self.pieces[-1:] == ['']
# Now do the text wrapping:
width = self.textwidth - self.indent
wrapped_para = []
for line in ''.join(self.pieces).splitlines():
keep_markdown_newline = line[-2:] == ' '
w_line = textwrap.wrap(line, width=width, break_long_words=False)
if w_line == []:
w_line = ['']
if keep_markdown_newline:
w_line[-1] = w_line[-1] + ' '
for wl in w_line:
wrapped_para.append(wl + '\n')
if wrapped_para:
if wrapped_para[-1][-3:] != ' \n':
wrapped_para[-1] = wrapped_para[-1][:-1] + ' \n'
if dont_end_paragraph:
wrapped_para.append('')
pieces.extend(wrapped_para)
self.pieces = pieces
|
This produces %feature("docstring") entries for classes, and handles
class, namespace and file memberdef entries specially to allow for
overloaded functions. For other cases, passes parsing on to standard
handlers (which may produce unexpected results).
|
def do_compounddef(self, node):
"""This produces %feature("docstring") entries for classes, and handles
class, namespace and file memberdef entries specially to allow for
overloaded functions. For other cases, passes parsing on to standard
handlers (which may produce unexpected results).
"""
kind = node.attributes['kind'].value
if kind in ('class', 'struct'):
prot = node.attributes['prot'].value
if prot != 'public':
return
self.add_text('\n\n')
classdefn = self.extract_text(self.get_specific_subnodes(node, 'compoundname'))
classname = classdefn.split('::')[-1]
self.add_text('%%feature("docstring") %s "\n' % classdefn)
if self.with_constructor_list:
constructor_nodes = []
for n in self.get_specific_subnodes(node, 'memberdef', recursive=2):
if n.attributes['prot'].value == 'public':
if self.extract_text(self.get_specific_subnodes(n, 'definition')) == classdefn + '::' + classname:
constructor_nodes.append(n)
for n in constructor_nodes:
self.add_line_with_subsequent_indent(self.get_function_signature(n))
names = ('briefdescription','detaileddescription')
sub_dict = self.get_specific_nodes(node, names)
for n in ('briefdescription','detaileddescription'):
if n in sub_dict:
self.parse(sub_dict[n])
if self.with_constructor_list:
self.make_constructor_list(constructor_nodes, classname)
if self.with_attribute_list:
self.make_attribute_list(node)
sub_list = self.get_specific_subnodes(node, 'includes')
if sub_list:
self.parse(sub_list[0])
self.add_text(['";', '\n'])
names = ['compoundname', 'briefdescription','detaileddescription', 'includes']
self.subnode_parse(node, ignore = names)
elif kind in ('file', 'namespace'):
nodes = node.getElementsByTagName('sectiondef')
for n in nodes:
self.parse(n)
# now explicitely handle possibly overloaded member functions.
if kind in ['class', 'struct','file', 'namespace']:
md_nodes = self.get_memberdef_nodes_and_signatures(node, kind)
for sig in md_nodes:
self.handle_typical_memberdefs(sig, md_nodes[sig])
|
Handle cases outside of class, struct, file or namespace. These are
now dealt with by `handle_overloaded_memberfunction`.
Do these even exist???
|
def do_memberdef(self, node):
"""Handle cases outside of class, struct, file or namespace. These are
now dealt with by `handle_overloaded_memberfunction`.
Do these even exist???
"""
prot = node.attributes['prot'].value
id = node.attributes['id'].value
kind = node.attributes['kind'].value
tmp = node.parentNode.parentNode.parentNode
compdef = tmp.getElementsByTagName('compounddef')[0]
cdef_kind = compdef.attributes['kind'].value
if cdef_kind in ('file', 'namespace', 'class', 'struct'):
# These cases are now handled by `handle_typical_memberdefs`
return
if prot != 'public':
return
first = self.get_specific_nodes(node, ('definition', 'name'))
name = self.extract_text(first['name'])
if name[:8] == 'operator': # Don't handle operators yet.
return
if not 'definition' in first or kind in ['variable', 'typedef']:
return
data = self.extract_text(first['definition'])
self.add_text('\n')
self.add_text(['/* where did this entry come from??? */', '\n'])
self.add_text('%feature("docstring") %s "\n%s' % (data, data))
for n in node.childNodes:
if n not in first.values():
self.parse(n)
self.add_text(['";', '\n'])
|
For a user defined section def a header field is present
which should not be printed as such, so we comment it in the
output.
|
def do_header(self, node):
"""For a user defined section def a header field is present
which should not be printed as such, so we comment it in the
output."""
data = self.extract_text(node)
self.add_text('\n/*\n %s \n*/\n' % data)
# If our immediate sibling is a 'description' node then we
# should comment that out also and remove it from the parent
# node's children.
parent = node.parentNode
idx = parent.childNodes.index(node)
if len(parent.childNodes) >= idx + 2:
nd = parent.childNodes[idx + 2]
if nd.nodeName == 'description':
nd = parent.removeChild(nd)
self.add_text('\n/*')
self.subnode_parse(nd)
self.add_text('\n*/\n')
|
Decide whether to show documentation on a variable.
|
def visiblename(name, all=None, obj=None):
"""Decide whether to show documentation on a variable."""
# Certain special names are redundant or internal.
# XXX Remove __initializing__?
if name in {'__author__', '__builtins__', '__cached__', '__credits__',
'__date__', '__doc__', '__file__', '__spec__',
'__loader__', '__module__', '__name__', '__package__',
'__path__', '__qualname__', '__slots__', '__version__'}:
return 0
if name.endswith("_swigregister"):
return 0
if name.startswith("__swig"):
return 0
# Private names are hidden, but special names are displayed.
if name.startswith('__') and name.endswith('__'): return 1
# Namedtuples have public fields and methods with a single leading underscore
if name.startswith('_') and hasattr(obj, '_fields'):
return True
if all is not None:
# only document that which the programmer exported in __all__
return name in all
else:
return not name.startswith('_')
|
Download the bulk API result file for a single batch
|
def _download_file(uri, bulk_api):
"""Download the bulk API result file for a single batch"""
resp = requests.get(uri, headers=bulk_api.headers(), stream=True)
with tempfile.TemporaryFile("w+b") as f:
for chunk in resp.iter_content(chunk_size=None):
f.write(chunk)
f.seek(0)
yield f
|
Yield successive n-sized chunks from l.
|
def _split_batches(self, data, batch_size):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(data), batch_size):
yield data[i : i + batch_size]
|
Load data for a single step.
|
def _load_mapping(self, mapping):
"""Load data for a single step."""
mapping["oid_as_pk"] = bool(mapping.get("fields", {}).get("Id"))
job_id, local_ids_for_batch = self._create_job(mapping)
result = self._wait_for_job(job_id)
# We store inserted ids even if some batches failed
self._store_inserted_ids(mapping, job_id, local_ids_for_batch)
return result
|
Initiate a bulk insert and upload batches to run in parallel.
|
def _create_job(self, mapping):
"""Initiate a bulk insert and upload batches to run in parallel."""
job_id = self.bulk.create_insert_job(mapping["sf_object"], contentType="CSV")
self.logger.info(" Created bulk job {}".format(job_id))
# Upload batches
local_ids_for_batch = {}
for batch_file, local_ids in self._get_batches(mapping):
batch_id = self.bulk.post_batch(job_id, batch_file)
local_ids_for_batch[batch_id] = local_ids
self.logger.info(" Uploaded batch {}".format(batch_id))
self.bulk.close_job(job_id)
return job_id, local_ids_for_batch
|
Get data from the local db
|
def _get_batches(self, mapping, batch_size=10000):
"""Get data from the local db"""
action = mapping.get("action", "insert")
fields = mapping.get("fields", {}).copy()
static = mapping.get("static", {})
lookups = mapping.get("lookups", {})
record_type = mapping.get("record_type")
# Skip Id field on insert
if action == "insert" and "Id" in fields:
del fields["Id"]
# Build the list of fields to import
columns = []
columns.extend(fields.keys())
columns.extend(lookups.keys())
columns.extend(static.keys())
if record_type:
columns.append("RecordTypeId")
# default to the profile assigned recordtype if we can't find any
# query for the RT by developer name
query = (
"SELECT Id FROM RecordType WHERE SObjectType='{0}'"
"AND DeveloperName = '{1}' LIMIT 1"
)
record_type_id = self.sf.query(
query.format(mapping.get("sf_object"), record_type)
)["records"][0]["Id"]
query = self._query_db(mapping)
total_rows = 0
batch_num = 1
def start_batch():
batch_file = io.BytesIO()
writer = unicodecsv.writer(batch_file)
writer.writerow(columns)
batch_ids = []
return batch_file, writer, batch_ids
batch_file, writer, batch_ids = start_batch()
for row in query.yield_per(batch_size):
total_rows += 1
# Add static values to row
pkey = row[0]
row = list(row[1:]) + list(static.values())
if record_type:
row.append(record_type_id)
writer.writerow([self._convert(value) for value in row])
batch_ids.append(pkey)
# Yield and start a new file every [batch_size] rows
if not total_rows % batch_size:
batch_file.seek(0)
self.logger.info(" Processing batch {}".format(batch_num))
yield batch_file, batch_ids
batch_file, writer, batch_ids = start_batch()
batch_num += 1
# Yield result file for final batch
if batch_ids:
batch_file.seek(0)
yield batch_file, batch_ids
self.logger.info(
" Prepared {} rows for import to {}".format(
total_rows, mapping["sf_object"]
)
)
|
Build a query to retrieve data from the local db.
Includes columns from the mapping
as well as joining to the id tables to get real SF ids
for lookups.
|
def _query_db(self, mapping):
"""Build a query to retrieve data from the local db.
Includes columns from the mapping
as well as joining to the id tables to get real SF ids
for lookups.
"""
model = self.models[mapping.get("table")]
# Use primary key instead of the field mapped to SF Id
fields = mapping.get("fields", {}).copy()
if mapping["oid_as_pk"]:
del fields["Id"]
id_column = model.__table__.primary_key.columns.keys()[0]
columns = [getattr(model, id_column)]
for f in fields.values():
columns.append(model.__table__.columns[f])
lookups = mapping.get("lookups", {}).copy()
for lookup in lookups.values():
lookup["aliased_table"] = aliased(
self.metadata.tables["{}_sf_ids".format(lookup["table"])]
)
columns.append(lookup["aliased_table"].columns.sf_id)
query = self.session.query(*columns)
if "record_type" in mapping and hasattr(model, "record_type"):
query = query.filter(model.record_type == mapping["record_type"])
if "filters" in mapping:
filter_args = []
for f in mapping["filters"]:
filter_args.append(text(f))
query = query.filter(*filter_args)
for sf_field, lookup in lookups.items():
# Outer join with lookup ids table:
# returns main obj even if lookup is null
key_field = get_lookup_key_field(lookup, sf_field)
value_column = getattr(model, key_field)
query = query.outerjoin(
lookup["aliased_table"],
lookup["aliased_table"].columns.id == value_column,
)
# Order by foreign key to minimize lock contention
# by trying to keep lookup targets in the same batch
lookup_column = getattr(model, key_field)
query = query.order_by(lookup_column)
self.logger.info(str(query))
return query
|
Get the job results and store inserted SF Ids in a new table
|
def _store_inserted_ids(self, mapping, job_id, local_ids_for_batch):
"""Get the job results and store inserted SF Ids in a new table"""
id_table_name = self._reset_id_table(mapping)
conn = self.session.connection()
for batch_id, local_ids in local_ids_for_batch.items():
try:
results_url = "{}/job/{}/batch/{}/result".format(
self.bulk.endpoint, job_id, batch_id
)
# Download entire result file to a temporary file first
# to avoid the server dropping connections
with _download_file(results_url, self.bulk) as f:
self.logger.info(
" Downloaded results for batch {}".format(batch_id)
)
self._store_inserted_ids_for_batch(
f, local_ids, id_table_name, conn
)
self.logger.info(
" Updated {} for batch {}".format(id_table_name, batch_id)
)
except Exception: # pragma: nocover
# If we can't download one result file,
# don't let that stop us from downloading the others
self.logger.error(
"Could not download batch results: {}".format(batch_id)
)
continue
self.session.commit()
|
Create an empty table to hold the inserted SF Ids
|
def _reset_id_table(self, mapping):
"""Create an empty table to hold the inserted SF Ids"""
if not hasattr(self, "_initialized_id_tables"):
self._initialized_id_tables = set()
id_table_name = "{}_sf_ids".format(mapping["table"])
if id_table_name not in self._initialized_id_tables:
if id_table_name in self.metadata.tables:
self.metadata.remove(self.metadata.tables[id_table_name])
id_table = Table(
id_table_name,
self.metadata,
Column("id", Unicode(255), primary_key=True),
Column("sf_id", Unicode(18)),
)
if id_table.exists():
id_table.drop()
id_table.create()
self._initialized_id_tables.add(id_table_name)
return id_table_name
|
Returns the first mapping for a table name
|
def _get_mapping_for_table(self, table):
""" Returns the first mapping for a table name """
for mapping in self.mappings.values():
if mapping["table"] == table:
return mapping
|
Loads the configuration from YAML, if no override config was passed in initially.
|
def _load_config(self):
""" Loads the configuration from YAML, if no override config was passed in initially. """
if (
self.config
): # any config being pre-set at init will short circuit out, but not a plain {}
return
# Verify that we're in a project
repo_root = self.repo_root
if not repo_root:
raise NotInProject(
"No git repository was found in the current path. You must be in a git repository to set up and use CCI for a project."
)
# Verify that the project's root has a config file
if not self.config_project_path:
raise ProjectConfigNotFound(
"The file {} was not found in the repo root: {}. Are you in a CumulusCI Project directory?".format(
self.config_filename, repo_root
)
)
# Load the project's yaml config file
with open(self.config_project_path, "r") as f_config:
project_config = ordered_yaml_load(f_config)
if project_config:
self.config_project.update(project_config)
# Load the local project yaml config file if it exists
if self.config_project_local_path:
with open(self.config_project_local_path, "r") as f_local_config:
local_config = ordered_yaml_load(f_local_config)
if local_config:
self.config_project_local.update(local_config)
# merge in any additional yaml that was passed along
if self.additional_yaml:
additional_yaml_config = ordered_yaml_load(self.additional_yaml)
if additional_yaml_config:
self.config_additional_yaml.update(additional_yaml_config)
self.config = merge_config(
OrderedDict(
[
("global_config", self.config_global),
("global_local", self.config_global_local),
("project_config", self.config_project),
("project_local_config", self.config_project_local),
("additional_yaml", self.config_additional_yaml),
]
)
)
|
Initializes sentry.io error logging for this session
|
def init_sentry(self,):
""" Initializes sentry.io error logging for this session """
if not self.use_sentry:
return
sentry_config = self.keychain.get_service("sentry")
tags = {
"repo": self.repo_name,
"branch": self.repo_branch,
"commit": self.repo_commit,
"cci version": cumulusci.__version__,
}
tags.update(self.config.get("sentry_tags", {}))
env = self.config.get("sentry_environment", "CumulusCI CLI")
self.sentry = raven.Client(
dsn=sentry_config.dsn,
environment=env,
tags=tags,
processors=("raven.processors.SanitizePasswordsProcessor",),
)
|
Query GitHub releases to find the previous production release
|
def get_previous_version(self):
"""Query GitHub releases to find the previous production release"""
gh = self.get_github_api()
repo = gh.repository(self.repo_owner, self.repo_name)
most_recent = None
for release in repo.releases():
# Return the second release that matches the release prefix
if release.tag_name.startswith(self.project__git__prefix_release):
if most_recent is None:
most_recent = release
else:
return LooseVersion(self.get_version_for_tag(release.tag_name))
|
location of the user local directory for the project
e.g., ~/.cumulusci/NPSP-Extension-Test/
|
def project_local_dir(self):
""" location of the user local directory for the project
e.g., ~/.cumulusci/NPSP-Extension-Test/ """
# depending on where we are in bootstrapping the BaseGlobalConfig
# the canonical projectname could be located in one of two places
if self.project__name:
name = self.project__name
else:
name = self.config_project.get("project", {}).get("name", "")
if name is None:
name = (
""
) # not entirely sure why this was happening in tests but this is the goal...
path = os.path.join(
os.path.expanduser("~"), self.global_config_obj.config_local_dir, name
)
if not os.path.isdir(path):
os.makedirs(path)
return path
|
Resolves the project -> dependencies section of cumulusci.yml
to convert dynamic github dependencies into static dependencies
by inspecting the referenced repositories
Keyword arguments:
:param dependencies: a list of dependencies to resolve
:param include_beta: when true, return the latest github release,
even if pre-release; else return the latest stable release
|
def get_static_dependencies(self, dependencies=None, include_beta=None):
"""Resolves the project -> dependencies section of cumulusci.yml
to convert dynamic github dependencies into static dependencies
by inspecting the referenced repositories
Keyword arguments:
:param dependencies: a list of dependencies to resolve
:param include_beta: when true, return the latest github release,
even if pre-release; else return the latest stable release
"""
if not dependencies:
dependencies = self.project__dependencies
if not dependencies:
return []
static_dependencies = []
for dependency in dependencies:
if "github" not in dependency:
static_dependencies.append(dependency)
else:
static = self.process_github_dependency(
dependency, include_beta=include_beta
)
static_dependencies.extend(static)
return static_dependencies
|
Initializes self.logger
|
def _init_logger(self):
""" Initializes self.logger """
if self.flow:
self.logger = self.flow.logger.getChild(self.__class__.__name__)
else:
self.logger = logging.getLogger(__name__)
|
Initializes self.options
|
def _init_options(self, kwargs):
""" Initializes self.options """
self.options = self.task_config.options
if self.options is None:
self.options = {}
if kwargs:
self.options.update(kwargs)
# Handle dynamic lookup of project_config values via $project_config.attr
for option, value in list(self.options.items()):
try:
if value.startswith("$project_config."):
attr = value.replace("$project_config.", "", 1)
self.options[option] = getattr(self.project_config, attr, None)
except AttributeError:
pass
|
Log the beginning of the task execution
|
def _log_begin(self):
""" Log the beginning of the task execution """
self.logger.info("Beginning task: %s", self.__class__.__name__)
if self.salesforce_task and not self.flow:
self.logger.info("%15s %s", "As user:", self.org_config.username)
self.logger.info("%15s %s", "In org:", self.org_config.org_id)
self.logger.info("")
|
poll for a result in a loop
|
def _poll(self):
""" poll for a result in a loop """
while True:
self.poll_count += 1
self._poll_action()
if self.poll_complete:
break
time.sleep(self.poll_interval_s)
self._poll_update_interval()
|
update the polling interval to be used next iteration
|
def _poll_update_interval(self):
""" update the polling interval to be used next iteration """
# Increase by 1 second every 3 polls
if old_div(self.poll_count, 3) > self.poll_interval_level:
self.poll_interval_level += 1
self.poll_interval_s += 1
self.logger.info(
"Increased polling interval to %d seconds", self.poll_interval_s
)
|
Returns a ProjectConfig for the given project
|
def get_project_config(self, *args, **kwargs):
""" Returns a ProjectConfig for the given project """
warnings.warn(
"BaseGlobalConfig.get_project_config is pending deprecation",
DeprecationWarning,
)
return self.project_config_class(self, *args, **kwargs)
|
Loads the local configuration
|
def _load_config(self):
""" Loads the local configuration """
# load the global config
with open(self.config_global_path, "r") as f_config:
config = ordered_yaml_load(f_config)
self.config_global = config
# Load the local config
if self.config_global_local_path:
config = ordered_yaml_load(open(self.config_global_local_path, "r"))
self.config_global_local = config
self.config = merge_config(
OrderedDict(
[
("global_config", self.config_global),
("global_local", self.config_global_local),
]
)
)
|
Username for the org connection.
|
def username(self):
""" Username for the org connection. """
username = self.config.get("username")
if not username:
username = self.userinfo__preferred_username
return username
|
Opens a file for tracking the time of the last version check
|
def timestamp_file():
"""Opens a file for tracking the time of the last version check"""
config_dir = os.path.join(
os.path.expanduser("~"), BaseGlobalConfig.config_local_dir
)
if not os.path.exists(config_dir):
os.mkdir(config_dir)
timestamp_file = os.path.join(config_dir, "cumulus_timestamp")
try:
with open(timestamp_file, "r+") as f:
yield f
except IOError: # file does not exist
with open(timestamp_file, "w+") as f:
yield f
|
Decorator which passes the CCI config object as the first arg to a click command.
|
def pass_config(func=None, **config_kw):
"""Decorator which passes the CCI config object as the first arg to a click command."""
def decorate(func):
def new_func(*args, **kw):
config = load_config(**config_kw)
func(config, *args, **kw)
return functools.update_wrapper(new_func, func)
if func is None:
return decorate
else:
return decorate(func)
|
list the services that can be configured
|
def list_commands(self, ctx):
""" list the services that can be configured """
config = load_config(**self.load_config_kwargs)
services = self._get_services_config(config)
return sorted(services.keys())
|
parse a datetime returned from the salesforce API.
in python 3 we should just use a strptime %z, but until then we're just going
to assert that its a fixed offset of +0000 since thats the observed behavior. getting
python 2 to support fixed offset parsing is too complicated for what we need imo.
|
def parse_api_datetime(value):
""" parse a datetime returned from the salesforce API.
in python 3 we should just use a strptime %z, but until then we're just going
to assert that its a fixed offset of +0000 since thats the observed behavior. getting
python 2 to support fixed offset parsing is too complicated for what we need imo."""
dt = datetime.strptime(value[0:DATETIME_LEN], API_DATE_FORMAT)
offset_str = value[DATETIME_LEN:]
assert offset_str in ["+0000", "Z"], "The Salesforce API returned a weird timezone."
return dt
|
Recursively walk a directory and remove XML elements
|
def removeXmlElement(name, directory, file_pattern, logger=None):
""" Recursively walk a directory and remove XML elements """
for path, dirs, files in os.walk(os.path.abspath(directory)):
for filename in fnmatch.filter(files, file_pattern):
filepath = os.path.join(path, filename)
remove_xml_element_file(name, filepath)
|
Remove XML elements from a single file
|
def remove_xml_element_file(name, path):
""" Remove XML elements from a single file """
ET.register_namespace("", "http://soap.sforce.com/2006/04/metadata")
tree = elementtree_parse_file(path)
tree = remove_xml_element(name, tree)
return tree.write(path, encoding=UTF8, xml_declaration=True)
|
Remove XML elements from a string
|
def remove_xml_element_string(name, content):
""" Remove XML elements from a string """
ET.register_namespace("", "http://soap.sforce.com/2006/04/metadata")
tree = ET.fromstring(content)
tree = remove_xml_element(name, tree)
clean_content = ET.tostring(tree, encoding=UTF8)
return clean_content
|
Removes XML elements from an ElementTree content tree
|
def remove_xml_element(name, tree):
""" Removes XML elements from an ElementTree content tree """
# root = tree.getroot()
remove = tree.findall(
".//{{http://soap.sforce.com/2006/04/metadata}}{}".format(name)
)
if not remove:
return tree
parent_map = {c: p for p in tree.iter() for c in p}
for elem in remove:
parent = parent_map[elem]
parent.remove(elem)
return tree
|
Replaces %%%NAMESPACE%%% for all files and ___NAMESPACE___ in all
filenames in the zip with the either '' if no namespace is provided
or 'namespace__' if provided.
|
def zip_inject_namespace(
zip_src,
namespace=None,
managed=None,
filename_token=None,
namespace_token=None,
namespaced_org=None,
logger=None,
):
""" Replaces %%%NAMESPACE%%% for all files and ___NAMESPACE___ in all
filenames in the zip with the either '' if no namespace is provided
or 'namespace__' if provided.
"""
# Handle namespace and filename tokens
if not filename_token:
filename_token = "___NAMESPACE___"
if not namespace_token:
namespace_token = "%%%NAMESPACE%%%"
if managed is True and namespace:
namespace_prefix = namespace + "__"
else:
namespace_prefix = ""
# Handle tokens %%%NAMESPACED_ORG%%% and ___NAMESPACED_ORG___
namespaced_org_token = "%%%NAMESPACED_ORG%%%"
namespaced_org_file_token = "___NAMESPACED_ORG___"
namespaced_org = namespace_prefix if namespaced_org else ""
# Handle token %%%NAMESPACE_OR_C%%% for lightning components
namespace_or_c_token = "%%%NAMESPACE_OR_C%%%"
namespace_or_c = namespace if managed and namespace else "c"
# Handle token %%%NAMESPACED_ORG_OR_C%%%
namespaced_org_or_c_token = "%%%NAMESPACED_ORG_OR_C%%%"
namespaced_org_or_c = namespace if namespaced_org else "c"
zip_dest = zipfile.ZipFile(io.BytesIO(), "w", zipfile.ZIP_DEFLATED)
for name in zip_src.namelist():
orig_name = str(name)
content = zip_src.read(name)
try:
content = content.decode("utf-8")
except UnicodeDecodeError:
# if we cannot decode the content, don't try and replace it.
pass
else:
prev_content = content
content = content.replace(namespace_token, namespace_prefix)
if logger and content != prev_content:
logger.info(
' {}: Replaced %%%NAMESPACE%%% with "{}"'.format(name, namespace)
)
prev_content = content
content = content.replace(namespace_or_c_token, namespace_or_c)
if logger and content != prev_content:
logger.info(
' {}: Replaced %%%NAMESPACE_OR_C%%% with "{}"'.format(
name, namespace_or_c
)
)
prev_content = content
content = content.replace(namespaced_org_token, namespaced_org)
if logger and content != prev_content:
logger.info(
' {}: Replaced %%%NAMESPACED_ORG%%% with "{}"'.format(
name, namespaced_org
)
)
prev_content = content
content = content.replace(namespaced_org_or_c_token, namespaced_org_or_c)
if logger and content != prev_content:
logger.info(
' {}: Replaced %%%NAMESPACED_ORG_OR_C%%% with "{}"'.format(
name, namespaced_org_or_c
)
)
content = content.encode("utf-8")
# Replace namespace token in file name
name = name.replace(filename_token, namespace_prefix)
name = name.replace(namespaced_org_file_token, namespaced_org)
if logger and name != orig_name:
logger.info(" {}: renamed to {}".format(orig_name, name))
zip_dest.writestr(name, content)
return zip_dest
|
Given a namespace, strips 'namespace__' from all files and filenames
in the zip
|
def zip_strip_namespace(zip_src, namespace, logger=None):
""" Given a namespace, strips 'namespace__' from all files and filenames
in the zip
"""
namespace_prefix = "{}__".format(namespace)
lightning_namespace = "{}:".format(namespace)
zip_dest = zipfile.ZipFile(io.BytesIO(), "w", zipfile.ZIP_DEFLATED)
for name in zip_src.namelist():
orig_content = zip_src.read(name)
try:
orig_content = orig_content.decode("utf-8")
except UnicodeDecodeError:
# if we cannot decode the content, don't try and replace it.
new_content = orig_content
else:
new_content = orig_content.replace(namespace_prefix, "")
new_content = new_content.replace(lightning_namespace, "c:")
name = name.replace(namespace_prefix, "") # not...sure...this..gets...used
if orig_content != new_content and logger:
logger.info(
" {file_name}: removed {namespace}".format(
file_name=name, namespace=namespace_prefix
)
)
new_content = new_content.encode("utf-8")
zip_dest.writestr(name, new_content)
return zip_dest
|
Given a namespace, replaces 'namespace__' with %%%NAMESPACE%%% for all
files and ___NAMESPACE___ in all filenames in the zip
|
def zip_tokenize_namespace(zip_src, namespace, logger=None):
""" Given a namespace, replaces 'namespace__' with %%%NAMESPACE%%% for all
files and ___NAMESPACE___ in all filenames in the zip
"""
if not namespace:
return zip_src
namespace_prefix = "{}__".format(namespace)
lightning_namespace = "{}:".format(namespace)
zip_dest = zipfile.ZipFile(io.BytesIO(), "w", zipfile.ZIP_DEFLATED)
for name in zip_src.namelist():
content = zip_src.read(name)
try:
content = content.decode("utf-8")
except UnicodeDecodeError:
# Probably a binary file; leave it untouched
pass
else:
content = content.replace(namespace_prefix, "%%%NAMESPACE%%%")
content = content.replace(lightning_namespace, "%%%NAMESPACE_OR_C%%%")
content = content.encode("utf-8")
name = name.replace(namespace_prefix, "___NAMESPACE___")
zip_dest.writestr(name, content)
return zip_dest
|
Given a zipfile, cleans all *-meta.xml files in the zip for
deployment by stripping all <packageVersions/> elements
|
def zip_clean_metaxml(zip_src, logger=None):
""" Given a zipfile, cleans all *-meta.xml files in the zip for
deployment by stripping all <packageVersions/> elements
"""
zip_dest = zipfile.ZipFile(io.BytesIO(), "w", zipfile.ZIP_DEFLATED)
changed = []
for name in zip_src.namelist():
content = zip_src.read(name)
if name.startswith(META_XML_CLEAN_DIRS) and name.endswith("-meta.xml"):
try:
content.decode("utf-8")
except UnicodeDecodeError:
# if we cannot decode the content, it may be binary;
# don't try and replace it.
pass
else:
clean_content = remove_xml_element_string("packageVersions", content)
if clean_content != content:
changed.append(name)
content = clean_content
zip_dest.writestr(name, content)
if changed and logger:
logger.info(
"Cleaned package versions from {} meta.xml files".format(len(changed))
)
return zip_dest
|
Document a (project specific) task configuration in RST format.
|
def doc_task(task_name, task_config, project_config=None, org_config=None):
""" Document a (project specific) task configuration in RST format. """
from cumulusci.core.utils import import_class
doc = []
doc.append("{}\n==========================================\n".format(task_name))
doc.append("**Description:** {}\n".format(task_config.description))
doc.append("**Class::** {}\n".format(task_config.class_path))
task_class = import_class(task_config.class_path)
task_docs = textwrap.dedent(task_class.task_docs.strip("\n"))
if task_docs:
doc.append(task_docs + "\n")
if task_class.task_options:
doc.append("Options:\n------------------------------------------\n")
defaults = task_config.options or {}
for name, option in list(task_class.task_options.items()):
default = defaults.get(name)
if default:
default = " **Default: {}**".format(default)
else:
default = ""
if option.get("required"):
doc.append(
"* **{}** *(required)*: {}{}".format(
name, option.get("description"), default
)
)
else:
doc.append(
"* **{}**: {}{}".format(name, option.get("description"), default)
)
return "\n".join(doc)
|
Context manager that creates a temporary directory and chdirs to it.
When the context manager exits it returns to the previous cwd
and deletes the temporary directory.
|
def temporary_dir():
"""Context manager that creates a temporary directory and chdirs to it.
When the context manager exits it returns to the previous cwd
and deletes the temporary directory.
"""
d = tempfile.mkdtemp()
try:
with cd(d):
yield d
finally:
if os.path.exists(d):
shutil.rmtree(d)
|
Returns a boolean for whether filepath is contained in dirpath.
Normalizes the paths (e.g. resolving symlinks and ..)
so this is the safe way to make sure a user-configured path
is located inside the user's project repo.
|
def in_directory(filepath, dirpath):
"""Returns a boolean for whether filepath is contained in dirpath.
Normalizes the paths (e.g. resolving symlinks and ..)
so this is the safe way to make sure a user-configured path
is located inside the user's project repo.
"""
filepath = os.path.realpath(filepath)
dirpath = os.path.realpath(dirpath)
return filepath == dirpath or filepath.startswith(os.path.join(dirpath, ""))
|
Log progress while iterating.
|
def log_progress(
iterable,
logger,
batch_size=10000,
progress_message="Processing... ({})",
done_message="Done! (Total: {})",
):
"""Log progress while iterating.
"""
i = 0
for x in iterable:
yield x
i += 1
if not i % batch_size:
logger.info(progress_message.format(i))
logger.info(done_message.format(i))
|
Returns the login url which will automatically log into the target
Salesforce org. By default, the org_name passed to the library
constructor is used but this can be overridden with the org option
to log into a different org.
|
def login_url(self, org=None):
""" Returns the login url which will automatically log into the target
Salesforce org. By default, the org_name passed to the library
constructor is used but this can be overridden with the org option
to log into a different org.
"""
if org is None:
org = self.org
else:
org = self.keychain.get_org(org)
return org.start_url
|
Runs a named CumulusCI task for the current project with optional
support for overriding task options via kwargs.
Examples:
| =Keyword= | =task_name= | =task_options= | =comment= |
| Run Task | deploy | | Run deploy with standard options |
| Run Task | deploy | path=path/to/some/metadata | Run deploy with custom path |
|
def run_task(self, task_name, **options):
""" Runs a named CumulusCI task for the current project with optional
support for overriding task options via kwargs.
Examples:
| =Keyword= | =task_name= | =task_options= | =comment= |
| Run Task | deploy | | Run deploy with standard options |
| Run Task | deploy | path=path/to/some/metadata | Run deploy with custom path |
"""
task_config = self.project_config.get_task(task_name)
class_path = task_config.class_path
logger.console("\n")
task_class, task_config = self._init_task(class_path, options, task_config)
return self._run_task(task_class, task_config)
|
Returns the namespace prefix (including __) for the specified package name.
(Defaults to project__package__name_managed from the current project config.)
Returns an empty string if the package is not installed as a managed package.
|
def get_namespace_prefix(self, package=None):
""" Returns the namespace prefix (including __) for the specified package name.
(Defaults to project__package__name_managed from the current project config.)
Returns an empty string if the package is not installed as a managed package.
"""
result = ""
if package is None:
package = self.project_config.project__package__name_managed
packages = self.tooling.query(
"SELECT SubscriberPackage.NamespacePrefix, SubscriberPackage.Name "
"FROM InstalledSubscriberPackage"
)
match = [
p for p in packages["records"] if p["SubscriberPackage"]["Name"] == package
]
if match:
result = match[0]["SubscriberPackage"]["NamespacePrefix"] + "__"
return result
|
Runs a CumulusCI task class with task options via kwargs.
Use this keyword to run logic from CumulusCI tasks which have not
been configured in the project's cumulusci.yml file. This is
most useful in cases where a test needs to use task logic for
logic unique to the test and thus not worth making into a named
task for the project
Examples:
| =Keyword= | =task_class= | =task_options= |
| Run Task Class | cumulusci.task.utils.DownloadZip | url=http://test.com/test.zip dir=test_zip |
|
def run_task_class(self, class_path, **options):
""" Runs a CumulusCI task class with task options via kwargs.
Use this keyword to run logic from CumulusCI tasks which have not
been configured in the project's cumulusci.yml file. This is
most useful in cases where a test needs to use task logic for
logic unique to the test and thus not worth making into a named
task for the project
Examples:
| =Keyword= | =task_class= | =task_options= |
| Run Task Class | cumulusci.task.utils.DownloadZip | url=http://test.com/test.zip dir=test_zip |
"""
logger.console("\n")
task_class, task_config = self._init_task(class_path, options, TaskConfig())
return self._run_task(task_class, task_config)
|
Returns a TaskConfig
|
def get_task(self, name):
""" Returns a TaskConfig """
config = getattr(self, "tasks__{}".format(name))
if not config:
raise TaskNotFoundError("Task not found: {}".format(name))
return TaskConfig(config)
|
Returns a FlowConfig
|
def get_flow(self, name):
""" Returns a FlowConfig """
config = getattr(self, "flows__{}".format(name))
if not config:
raise FlowNotFoundError("Flow not found: {}".format(name))
return FlowConfig(config)
|
Returns the rendered release notes from all parsers as a string
|
def render(self):
""" Returns the rendered release notes from all parsers as a string """
release_notes = []
for parser in self.parsers:
parser_content = parser.render()
if parser_content is not None:
release_notes.append(parser_content)
return u"\r\n\r\n".join(release_notes)
|
Merge existing and new release content.
|
def _update_release_content(self, release, content):
"""Merge existing and new release content."""
if release.body:
new_body = []
current_parser = None
is_start_line = False
for parser in self.parsers:
parser.replaced = False
# update existing sections
for line in release.body.splitlines():
if current_parser:
if current_parser._is_end_line(current_parser._process_line(line)):
parser_content = current_parser.render()
if parser_content:
# replace existing section with new content
new_body.append(parser_content + "\r\n")
current_parser = None
for parser in self.parsers:
if (
parser._render_header().strip()
== parser._process_line(line).strip()
):
parser.replaced = True
current_parser = parser
is_start_line = True
break
else:
is_start_line = False
if is_start_line:
continue
if current_parser:
continue
else:
# preserve existing sections
new_body.append(line.strip())
# catch section without end line
if current_parser:
new_body.append(current_parser.render())
# add new sections at bottom
for parser in self.parsers:
parser_content = parser.render()
if parser_content and not parser.replaced:
new_body.append(parser_content + "\r\n")
content = u"\r\n".join(new_body)
return content
|
Get a primed and readytogo flow coordinator.
|
def get_flow(self, name, options=None):
""" Get a primed and readytogo flow coordinator. """
config = self.project_config.get_flow(name)
callbacks = self.callback_class()
coordinator = FlowCoordinator(
self.project_config,
config,
name=name,
options=options,
skip=None,
callbacks=callbacks,
)
return coordinator
|
loads the environment variables as unicode if ascii
|
def _get_env(self):
""" loads the environment variables as unicode if ascii """
env = {}
for k, v in os.environ.items():
k = k.decode() if isinstance(k, bytes) else k
v = v.decode() if isinstance(v, bytes) else v
env[k] = v
return list(env.items())
|
Import a class from a string module class path
|
def import_class(path):
""" Import a class from a string module class path """
components = path.split(".")
module = components[:-1]
module = ".".join(module)
mod = __import__(module, fromlist=[native_str(components[-1])])
return getattr(mod, native_str(components[-1]))
|
Create a timezone-aware datetime object from a datetime string.
|
def parse_datetime(dt_str, format):
"""Create a timezone-aware datetime object from a datetime string."""
t = time.strptime(dt_str, format)
return datetime(t[0], t[1], t[2], t[3], t[4], t[5], t[6], pytz.UTC)
|
Determine True/False from argument
|
def process_bool_arg(arg):
""" Determine True/False from argument """
if isinstance(arg, bool):
return arg
elif isinstance(arg, basestring):
if arg.lower() in ["true", "1"]:
return True
elif arg.lower() in ["false", "0"]:
return False
|
Parse a string into a list separated by commas with whitespace stripped
|
def process_list_arg(arg):
""" Parse a string into a list separated by commas with whitespace stripped """
if isinstance(arg, list):
return arg
elif isinstance(arg, basestring):
args = []
for part in arg.split(","):
args.append(part.strip())
return args
|
decode ISO-8859-1 to unicode, when using sf api
|
def decode_to_unicode(content):
""" decode ISO-8859-1 to unicode, when using sf api """
if content and not isinstance(content, str):
try:
# Try to decode ISO-8859-1 to unicode
return content.decode("ISO-8859-1")
except UnicodeEncodeError:
# Assume content is unicode already
return content
return content
|
recursively deep-merge the configs into one another (highest priority comes first)
|
def merge_config(configs):
""" recursively deep-merge the configs into one another (highest priority comes first) """
new_config = {}
for name, config in configs.items():
new_config = dictmerge(new_config, config, name)
return new_config
|
Deeply merge two ``dict``s that consist of lists, dicts, and scalars.
This function (recursively) merges ``b`` INTO ``a``, does not copy any values, and returns ``a``.
based on https://stackoverflow.com/a/15836901/5042831
NOTE: tuples and arbitrary objects are NOT handled and will raise TypeError
|
def dictmerge(a, b, name=None):
""" Deeply merge two ``dict``s that consist of lists, dicts, and scalars.
This function (recursively) merges ``b`` INTO ``a``, does not copy any values, and returns ``a``.
based on https://stackoverflow.com/a/15836901/5042831
NOTE: tuples and arbitrary objects are NOT handled and will raise TypeError """
key = None
if b is None:
return a
try:
if a is None or isinstance(a, (bytes, int, str, float)):
# first run, or if ``a``` is a scalar
a = b
elif isinstance(a, list):
# lists can be only appended
if isinstance(b, list):
# merge lists
a.extend(b)
else:
# append to list
a.append(b)
elif isinstance(a, dict):
# dicts must be merged
if isinstance(b, dict):
for key in b:
if key in a:
a[key] = dictmerge(a[key], b[key], name)
else:
a[key] = copy.copy(b[key])
else:
raise TypeError(
'Cannot merge non-dict of type "{}" into dict "{}"'.format(
type(b), a
)
)
else:
raise TypeError(
'dictmerge does not supporting merging "{}" into "{}"'.format(
type(b), type(a)
)
)
except TypeError as e:
raise ConfigMergeError(
'TypeError "{}" in key "{}" when merging "{}" into "{}"'.format(
e, key, type(b), type(a)
),
config_name=name,
)
return a
|
Create a Version in MetaDeploy if it doesn't already exist
|
def _find_or_create_version(self, product):
"""Create a Version in MetaDeploy if it doesn't already exist
"""
tag = self.options["tag"]
label = self.project_config.get_version_for_tag(tag)
result = self._call_api(
"GET", "/versions", params={"product": product["id"], "label": label}
)
if len(result["data"]) == 0:
version = self._call_api(
"POST",
"/versions",
json={
"product": product["url"],
"label": label,
"description": self.options.get("description", ""),
"is_production": True,
"commit_ish": tag,
"is_listed": False,
},
)
self.logger.info("Created {}".format(version["url"]))
else:
version = result["data"][0]
self.logger.info("Found {}".format(version["url"]))
return version
|
Generate the html. `libraries` is a list of LibraryDocumentation objects
|
def _render_html(self, libraries):
"""Generate the html. `libraries` is a list of LibraryDocumentation objects"""
title = self.options.get("title", "Keyword Documentation")
date = time.strftime("%A %B %d, %I:%M %p")
cci_version = cumulusci.__version__
stylesheet_path = os.path.join(os.path.dirname(__file__), "stylesheet.css")
with open(stylesheet_path) as f:
stylesheet = f.read()
jinjaenv = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)), autoescape=False
)
jinjaenv.filters["robot_html"] = robot.utils.html_format
template = jinjaenv.get_template("template.html")
return template.render(
libraries=libraries,
title=title,
cci_version=cci_version,
stylesheet=stylesheet,
date=date,
)
|
Uses sfdx force:org:create to create the org
|
def create_org(self):
""" Uses sfdx force:org:create to create the org """
if not self.config_file:
# FIXME: raise exception
return
if not self.scratch_org_type:
self.config["scratch_org_type"] = "workspace"
# If the scratch org definition itself contains an `adminEmail` entry,
# we don't want to override it from our own configuration, which may
# simply come from the user's Git config.
with open(self.config_file, "r") as org_def:
org_def_data = json.load(org_def)
org_def_has_email = "adminEmail" in org_def_data
options = {
"config_file": self.config_file,
"devhub": " --targetdevhubusername {}".format(self.devhub)
if self.devhub
else "",
"namespaced": " -n" if not self.namespaced else "",
"days": " --durationdays {}".format(self.days) if self.days else "",
"alias": sarge.shell_format(' -a "{0!s}"', self.sfdx_alias)
if self.sfdx_alias
else "",
"email": sarge.shell_format('adminEmail="{0!s}"', self.email_address)
if self.email_address and not org_def_has_email
else "",
"extraargs": os.environ.get("SFDX_ORG_CREATE_ARGS", ""),
}
# This feels a little dirty, but the use cases for extra args would mostly
# work best with env vars
command = "sfdx force:org:create -f {config_file}{devhub}{namespaced}{days}{alias} {email} {extraargs}".format(
**options
)
self.logger.info("Creating scratch org with command {}".format(command))
p = sarge.Command(
command,
stdout=sarge.Capture(buffer_size=-1),
stderr=sarge.Capture(buffer_size=-1),
shell=True,
)
p.run()
stderr = [line.strip() for line in io.TextIOWrapper(p.stderr)]
stdout = [line.strip() for line in io.TextIOWrapper(p.stdout)]
if p.returncode:
message = "{}: \n{}\n{}".format(
FAILED_TO_CREATE_SCRATCH_ORG, "\n".join(stdout), "\n".join(stderr)
)
raise ScratchOrgException(message)
re_obj = re.compile("Successfully created scratch org: (.+), username: (.+)")
for line in stdout:
match = re_obj.search(line)
if match:
self.config["org_id"] = match.group(1)
self.config["username"] = match.group(2)
self.logger.info(line)
for line in stderr:
self.logger.error(line)
self.config["date_created"] = datetime.datetime.now()
if self.config.get("set_password"):
self.generate_password()
# Flag that this org has been created
self.config["created"] = True
|
Generates an org password with the sfdx utility.
|
def generate_password(self):
"""Generates an org password with the sfdx utility. """
if self.password_failed:
self.logger.warning("Skipping resetting password since last attempt failed")
return
# Set a random password so it's available via cci org info
command = sarge.shell_format(
"sfdx force:user:password:generate -u {0}", self.username
)
self.logger.info(
"Generating scratch org user password with command {}".format(command)
)
p = sarge.Command(
command,
stdout=sarge.Capture(buffer_size=-1),
stderr=sarge.Capture(buffer_size=-1),
shell=True,
)
p.run()
stderr = io.TextIOWrapper(p.stderr).readlines()
stdout = io.TextIOWrapper(p.stdout).readlines()
if p.returncode:
self.config["password_failed"] = True
# Don't throw an exception because of failure creating the
# password, just notify in a log message
self.logger.warning(
"Failed to set password: \n{}\n{}".format(
"\n".join(stdout), "\n".join(stderr)
)
)
|
Uses sfdx force:org:delete to delete the org
|
def delete_org(self):
""" Uses sfdx force:org:delete to delete the org """
if not self.created:
self.logger.info(
"Skipping org deletion: the scratch org has not been created"
)
return
command = sarge.shell_format("sfdx force:org:delete -p -u {0}", self.username)
self.logger.info("Deleting scratch org with command {}".format(command))
p = sarge.Command(command, stdout=sarge.Capture(buffer_size=-1), shell=True)
p.run()
stdout = []
for line in io.TextIOWrapper(p.stdout):
stdout.append(line)
if line.startswith("An error occurred deleting this org"):
self.logger.error(line)
else:
self.logger.info(line)
if p.returncode:
message = "Failed to delete scratch org: \n{}".format("".join(stdout))
raise ScratchOrgException(message)
# Flag that this org has been deleted
self.config["created"] = False
self.config["username"] = None
self.config["date_created"] = None
|
Use sfdx force:org:describe to refresh token instead of built in OAuth handling
|
def refresh_oauth_token(self, keychain):
""" Use sfdx force:org:describe to refresh token instead of built in OAuth handling """
if hasattr(self, "_scratch_info"):
# Cache the scratch_info for 1 hour to avoid unnecessary calls out
# to sfdx CLI
delta = datetime.datetime.utcnow() - self._scratch_info_date
if delta.total_seconds() > 3600:
del self._scratch_info
# Force a token refresh
self.force_refresh_oauth_token()
# Get org info via sfdx force:org:display
self.scratch_info
# Get additional org info by querying API
self._load_orginfo()
|
returns the time (in seconds) that the batch took, if complete
|
def delta(self):
""" returns the time (in seconds) that the batch took, if complete """
completed_date = parse_api_datetime(self.batch["CompletedDate"])
created_date = parse_api_datetime(self.batch["CreatedDate"])
td = completed_date - created_date
return td.total_seconds()
|
Convert Connected App to service
|
def _convert_connected_app(self):
"""Convert Connected App to service"""
if self.services and "connected_app" in self.services:
# already a service
return
connected_app = self.get_connected_app()
if not connected_app:
# not configured
return
self.logger.warning(
"Reading Connected App info from deprecated config."
" Connected App should be changed to a service."
" If using environment keychain, update the environment variable."
" Otherwise, it has been handled automatically and you should not"
" see this message again."
)
ca_config = ServiceConfig(
{
"callback_url": connected_app.callback_url,
"client_id": connected_app.client_id,
"client_secret": connected_app.client_secret,
}
)
self.set_service("connected_app", ca_config)
|
Creates all scratch org configs for the project in the keychain if
a keychain org doesn't already exist
|
def _load_scratch_orgs(self):
""" Creates all scratch org configs for the project in the keychain if
a keychain org doesn't already exist """
current_orgs = self.list_orgs()
if not self.project_config.orgs__scratch:
return
for config_name in self.project_config.orgs__scratch.keys():
if config_name in current_orgs:
# Don't overwrite an existing keychain org
continue
self.create_scratch_org(config_name, config_name)
|
Adds/Updates a scratch org config to the keychain from a named config
|
def create_scratch_org(self, org_name, config_name, days=None, set_password=True):
""" Adds/Updates a scratch org config to the keychain from a named config """
scratch_config = getattr(
self.project_config, "orgs__scratch__{}".format(config_name)
)
if days is not None:
# Allow override of scratch config's default days
scratch_config["days"] = days
else:
# Use scratch config days or default of 1 day
scratch_config.setdefault("days", 1)
scratch_config["set_password"] = bool(set_password)
scratch_config["scratch"] = True
scratch_config.setdefault("namespaced", False)
scratch_config["config_name"] = config_name
scratch_config["sfdx_alias"] = "{}__{}".format(
self.project_config.project__name, org_name
)
org_config = ScratchOrgConfig(scratch_config, org_name)
self.set_org(org_config)
|
re-encrypt stored services and orgs with the new key
|
def change_key(self, key):
""" re-encrypt stored services and orgs with the new key """
services = {}
for service_name in self.list_services():
services[service_name] = self.get_service(service_name)
orgs = {}
for org_name in self.list_orgs():
orgs[org_name] = self.get_org(org_name)
self.key = key
if orgs:
for org_name, org_config in list(orgs.items()):
self.set_org(org_config)
if services:
for service_name, service_config in list(services.items()):
self.set_service(service_name, service_config)
self._convert_connected_app()
|
retrieve the name and configuration of the default org
|
def get_default_org(self):
""" retrieve the name and configuration of the default org """
for org in self.list_orgs():
org_config = self.get_org(org)
if org_config.default:
return org, org_config
return None, None
|
set the default org for tasks by name key
|
def set_default_org(self, name):
""" set the default org for tasks by name key """
org = self.get_org(name)
self.unset_default_org()
org.config["default"] = True
self.set_org(org)
|
unset the default orgs for tasks
|
def unset_default_org(self):
""" unset the default orgs for tasks """
for org in self.list_orgs():
org_config = self.get_org(org)
if org_config.default:
del org_config.config["default"]
self.set_org(org_config)
|
retrieve an org configuration by name key
|
def get_org(self, name):
""" retrieve an org configuration by name key """
if name not in self.orgs:
self._raise_org_not_found(name)
return self._get_org(name)
|
list the orgs configured in the keychain
|
def list_orgs(self):
""" list the orgs configured in the keychain """
orgs = list(self.orgs.keys())
orgs.sort()
return orgs
|
Store a ServiceConfig in the keychain
|
def set_service(self, name, service_config, project=False):
""" Store a ServiceConfig in the keychain """
if not self.project_config.services or name not in self.project_config.services:
self._raise_service_not_valid(name)
self._validate_service(name, service_config)
self._set_service(name, service_config, project)
self._load_services()
|
Retrieve a stored ServiceConfig from the keychain or exception
:param name: the service name to retrieve
:type name: str
:rtype ServiceConfig
:return the configured Service
|
def get_service(self, name):
""" Retrieve a stored ServiceConfig from the keychain or exception
:param name: the service name to retrieve
:type name: str
:rtype ServiceConfig
:return the configured Service
"""
self._convert_connected_app()
if not self.project_config.services or name not in self.project_config.services:
self._raise_service_not_valid(name)
if name not in self.services:
self._raise_service_not_configured(name)
return self._get_service(name)
|
list the services configured in the keychain
|
def list_services(self):
""" list the services configured in the keychain """
services = list(self.services.keys())
services.sort()
return services
|
Start the Python debugger when robotframework is running.
This makes sure that pdb can use stdin/stdout even though
robotframework has redirected I/O.
|
def set_pdb_trace(pm=False):
"""Start the Python debugger when robotframework is running.
This makes sure that pdb can use stdin/stdout even though
robotframework has redirected I/O.
"""
import sys
import pdb
for attr in ("stdin", "stdout", "stderr"):
setattr(sys, attr, getattr(sys, "__%s__" % attr))
if pm:
# Post-mortem debugging of an exception
pdb.post_mortem()
else:
pdb.set_trace()
|
Decorator to turn on automatic retries of flaky selenium failures.
Decorate a robotframework library class to turn on retries for all
selenium calls from that library:
@selenium_retry
class MyLibrary(object):
# Decorate a method to turn it back off for that method
@selenium_retry(False)
def some_keyword(self):
self.selenium.click_button('foo')
Or turn it off by default but turn it on for some methods
(the class-level decorator is still required):
@selenium_retry(False)
class MyLibrary(object):
@selenium_retry(True)
def some_keyword(self):
self.selenium.click_button('foo')
|
def selenium_retry(target=None, retry=True):
"""Decorator to turn on automatic retries of flaky selenium failures.
Decorate a robotframework library class to turn on retries for all
selenium calls from that library:
@selenium_retry
class MyLibrary(object):
# Decorate a method to turn it back off for that method
@selenium_retry(False)
def some_keyword(self):
self.selenium.click_button('foo')
Or turn it off by default but turn it on for some methods
(the class-level decorator is still required):
@selenium_retry(False)
class MyLibrary(object):
@selenium_retry(True)
def some_keyword(self):
self.selenium.click_button('foo')
"""
if isinstance(target, bool):
# Decorator was called with a single boolean argument
retry = target
target = None
def decorate(target):
if isinstance(target, type):
cls = target
# Metaclass time.
# We're going to generate a new subclass that:
# a) mixes in RetryingSeleniumLibraryMixin
# b) sets the initial value of `retry_selenium`
return type(
cls.__name__,
(cls, RetryingSeleniumLibraryMixin),
{"retry_selenium": retry, "__doc__": cls.__doc__},
)
func = target
@functools.wraps(func)
def run_with_retry(self, *args, **kwargs):
# Set the retry setting and run the original function.
old_retry = self.retry_selenium
self.retry = retry
try:
return func(self, *args, **kwargs)
finally:
# Restore the previous value
self.retry_selenium = old_retry
set_pdb_trace()
run_with_retry.is_selenium_retry_decorator = True
return run_with_retry
if target is None:
# Decorator is being used with arguments
return decorate
else:
# Decorator was used without arguments
return decorate(target)
|
Run a single selenium command and retry once.
The retry happens for certain errors that are likely to be resolved
by retrying.
|
def selenium_execute_with_retry(self, execute, command, params):
"""Run a single selenium command and retry once.
The retry happens for certain errors that are likely to be resolved
by retrying.
"""
try:
return execute(command, params)
except Exception as e:
if isinstance(e, ALWAYS_RETRY_EXCEPTIONS) or (
isinstance(e, WebDriverException)
and "Other element would receive the click" in str(e)
):
# Retry
self.builtin.log("Retrying {} command".format(command), level="WARN")
time.sleep(2)
return execute(command, params)
else:
raise
|
Gets the last release tag before self.current_tag
|
def _get_last_tag(self):
""" Gets the last release tag before self.current_tag """
current_version = LooseVersion(
self._get_version_from_tag(self.release_notes_generator.current_tag)
)
versions = []
for tag in self.repo.tags():
if not tag.name.startswith(self.github_info["prefix_prod"]):
continue
version = LooseVersion(self._get_version_from_tag(tag.name))
if version >= current_version:
continue
versions.append(version)
if versions:
versions.sort()
return "{}{}".format(self.github_info["prefix_prod"], versions[-1])
|
Gets all pull requests from the repo since we can't do a filtered
date merged search
|
def _get_pull_requests(self):
""" Gets all pull requests from the repo since we can't do a filtered
date merged search """
for pull in self.repo.pull_requests(
state="closed", base=self.github_info["master_branch"], direction="asc"
):
if self._include_pull_request(pull):
yield pull
|
Checks if the given pull_request was merged to the default branch
between self.start_date and self.end_date
|
def _include_pull_request(self, pull_request):
""" Checks if the given pull_request was merged to the default branch
between self.start_date and self.end_date """
merged_date = pull_request.merged_at
if not merged_date:
return False
if self.last_tag:
last_tag_sha = self.last_tag_info["commit"].sha
if pull_request.merge_commit_sha == last_tag_sha:
# Github commit dates can be different from the merged_at date
return False
current_tag_sha = self.current_tag_info["commit"].sha
if pull_request.merge_commit_sha == current_tag_sha:
return True
# include PRs before current tag
if merged_date <= self.start_date:
if self.end_date:
# include PRs after last tag
if (
merged_date > self.end_date
and pull_request.merge_commit_sha != last_tag_sha
):
return True
else:
# no last tag, include all PRs before current tag
return True
return False
|
Monkey patch robotframework to do postmortem debugging
|
def patch_statusreporter():
"""Monkey patch robotframework to do postmortem debugging
"""
from robot.running.statusreporter import StatusReporter
orig_exit = StatusReporter.__exit__
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_val and isinstance(exc_val, Exception):
set_pdb_trace(pm=True)
return orig_exit(self, exc_type, exc_val, exc_tb)
StatusReporter.__exit__ = __exit__
|
Returns the value of the first name element found inside of element
|
def get_item_name(self, item, parent):
""" Returns the value of the first name element found inside of element """
names = self.get_name_elements(item)
if not names:
raise MissingNameElementError
name = names[0].text
prefix = self.item_name_prefix(parent)
if prefix:
name = prefix + name
return name
|
Step details formatted for logging output.
|
def for_display(self):
""" Step details formatted for logging output. """
skip = ""
if self.skip:
skip = " [SKIP]"
result = "{step_num}: {path}{skip}".format(
step_num=self.step_num, path=self.path, skip=skip
)
description = self.task_config.get("description")
if description:
result += ": {}".format(description)
return result
|
Run a step.
:return: StepResult
|
def run_step(self):
"""
Run a step.
:return: StepResult
"""
# Resolve ^^task_name.return_value style option syntax
task_config = self.step.task_config.copy()
task_config["options"] = task_config["options"].copy()
self.flow.resolve_return_value_options(task_config["options"])
exc = None
try:
task = self.step.task_class(
self.project_config,
TaskConfig(task_config),
org_config=self.org_config,
name=self.step.task_name,
stepnum=self.step.step_num,
flow=self.flow,
)
self._log_options(task)
task()
except Exception as e:
self.flow.logger.exception(
"Exception in task {}".format(self.step.task_name)
)
exc = e
return StepResult(
self.step.step_num,
self.step.task_name,
self.step.path,
task.result,
task.return_values,
exc,
)
|
Given the flow config and everything else, create a list of steps to run, sorted by step number.
:return: List[StepSpec]
|
def _init_steps(self,):
"""
Given the flow config and everything else, create a list of steps to run, sorted by step number.
:return: List[StepSpec]
"""
self._check_old_yaml_format()
config_steps = self.flow_config.steps
self._check_infinite_flows(config_steps)
steps = []
for number, step_config in config_steps.items():
specs = self._visit_step(number, step_config)
steps.extend(specs)
return sorted(steps, key=attrgetter("step_num"))
|
for each step (as defined in the flow YAML), _visit_step is called with only
the first two parameters. this takes care of validating the step, collating the
option overrides, and if it is a task, creating a StepSpec for it.
If it is a flow, we recursively call _visit_step with the rest of the parameters of context.
:param number: LooseVersion representation of the current step number
:param step_config: the current step's config (dict from YAML)
:param visited_steps: used when called recursively for nested steps, becomes the return value
:param parent_options: used when called recursively for nested steps, options from parent flow
:param parent_ui_options: used when called recursively for nested steps, UI options from parent flow
:param from_flow: used when called recursively for nested steps, name of parent flow
:return: List[StepSpec] a list of all resolved steps including/under the one passed in
|
def _visit_step(
self,
number,
step_config,
visited_steps=None,
parent_options=None,
parent_ui_options=None,
from_flow=None,
):
"""
for each step (as defined in the flow YAML), _visit_step is called with only
the first two parameters. this takes care of validating the step, collating the
option overrides, and if it is a task, creating a StepSpec for it.
If it is a flow, we recursively call _visit_step with the rest of the parameters of context.
:param number: LooseVersion representation of the current step number
:param step_config: the current step's config (dict from YAML)
:param visited_steps: used when called recursively for nested steps, becomes the return value
:param parent_options: used when called recursively for nested steps, options from parent flow
:param parent_ui_options: used when called recursively for nested steps, UI options from parent flow
:param from_flow: used when called recursively for nested steps, name of parent flow
:return: List[StepSpec] a list of all resolved steps including/under the one passed in
"""
number = LooseVersion(str(number))
if visited_steps is None:
visited_steps = []
if parent_options is None:
parent_options = {}
if parent_ui_options is None:
parent_ui_options = {}
# Step Validation
# - A step is either a task OR a flow.
if all(k in step_config for k in ("flow", "task")):
raise FlowConfigError(
"Step {} is configured as both a flow AND a task. \n\t{}.".format(
number, step_config
)
)
# Skips
# - either in YAML (with the None string)
# - or by providing a skip list to the FlowRunner at initialization.
if (
("flow" in step_config and step_config["flow"] == "None")
or ("task" in step_config and step_config["task"] == "None")
or ("task" in step_config and step_config["task"] in self.skip)
):
visited_steps.append(
StepSpec(
number,
step_config.get("task", step_config.get("flow")),
step_config.get("options", {}),
None,
from_flow=from_flow,
skip=True, # someday we could use different vals for why skipped
)
)
return visited_steps
if "task" in step_config:
name = step_config["task"]
# get the base task_config from the project config, as a dict for easier manipulation.
# will raise if the task doesn't exist / is invalid
task_config = copy.deepcopy(self.project_config.get_task(name).config)
if "options" not in task_config:
task_config["options"] = {}
# merge the options together, from task_config all the way down through parent_options
step_overrides = copy.deepcopy(parent_options.get(name, {}))
step_overrides.update(step_config.get("options", {}))
task_config["options"].update(step_overrides)
# merge UI options from task config and parent flow
if "ui_options" not in task_config:
task_config["ui_options"] = {}
step_ui_overrides = copy.deepcopy(parent_ui_options.get(name, {}))
step_ui_overrides.update(step_config.get("ui_options", {}))
task_config["ui_options"].update(step_ui_overrides)
# merge runtime options
if name in self.runtime_options:
task_config["options"].update(self.runtime_options[name])
# get implementation class. raise/fail if it doesn't exist, because why continue
try:
task_class = import_class(task_config["class_path"])
except (ImportError, AttributeError):
# TODO: clean this up and raise a taskimporterror or something else correcter.
raise FlowConfigError("Task named {} has bad classpath")
visited_steps.append(
StepSpec(
number,
name,
task_config,
task_class,
step_config.get("ignore_failure", False),
from_flow=from_flow,
)
)
return visited_steps
if "flow" in step_config:
name = step_config["flow"]
if from_flow:
path = ".".join([from_flow, name])
else:
path = name
step_options = step_config.get("options", {})
step_ui_options = step_config.get("ui_options", {})
flow_config = self.project_config.get_flow(name)
for sub_number, sub_stepconf in flow_config.steps.items():
# append the flow number to the child number, since its a LooseVersion.
# e.g. if we're in step 2.3 which references a flow with steps 1-5, it
# simply ends up as five steps: 2.3.1, 2.3.2, 2.3.3, 2.3.4, 2.3.5
# TODO: how does this work with nested flowveride? what does defining step 2.3.2 later do?
num = "{}.{}".format(number, sub_number)
self._visit_step(
num,
sub_stepconf,
visited_steps,
parent_options=step_options,
parent_ui_options=step_ui_options,
from_flow=path,
)
return visited_steps
|
Recursively loop through the flow_config and check if there are any cycles.
:param steps: Set of step definitions to loop through
:param flows: Flows already visited.
:return: None
|
def _check_infinite_flows(self, steps, flows=None):
"""
Recursively loop through the flow_config and check if there are any cycles.
:param steps: Set of step definitions to loop through
:param flows: Flows already visited.
:return: None
"""
if flows is None:
flows = []
for step in steps.values():
if "flow" in step:
flow = step["flow"]
if flow == "None":
continue
if flow in flows:
raise FlowInfiniteLoopError(
"Infinite flows detected with flow {}".format(flow)
)
flows.append(flow)
flow_config = self.project_config.get_flow(flow)
self._check_infinite_flows(flow_config.steps, flows)
|
Test and refresh credentials to the org specified.
|
def _init_org(self):
""" Test and refresh credentials to the org specified. """
self.logger.info(
"Verifying and refreshing credentials for the specified org: {}.".format(
self.org_config.name
)
)
orig_config = self.org_config.config.copy()
# attempt to refresh the token, this can throw...
self.org_config.refresh_oauth_token(self.project_config.keychain)
if self.org_config.config != orig_config:
self.logger.info("Org info has changed, updating org in keychain")
self.project_config.keychain.set_org(self.org_config)
|
Handle dynamic option value lookups in the format ^^task_name.attr
|
def resolve_return_value_options(self, options):
"""Handle dynamic option value lookups in the format ^^task_name.attr"""
for key, value in options.items():
if isinstance(value, str) and value.startswith(RETURN_VALUE_OPTION_PREFIX):
path, name = value[len(RETURN_VALUE_OPTION_PREFIX) :].rsplit(".", 1)
result = self._find_result_by_path(path)
options[key] = result.return_values.get(name)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.