_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3
values | text stringlengths 75 19.8k | language stringclasses 1
value | meta_information dict |
|---|---|---|---|---|---|
q272600 | GitHub_Traffic.write_referrers_to_file | test | def write_referrers_to_file(self, file_path='',
date=str(datetime.date.today()), organization='llnl'):
"""
Writes the referrers data to file.
"""
self.remove_date(file_path=file_path, date=date)
referrers_exists = os.path.isfile(file_path)
with open(file_path, 'a') as out:
if not referrers_exists:
out.write('date,organization,referrer,count,count_log,uniques,'
+ 'uniques_logged\n')
sorted_referrers = sorted(self.referrers_lower)#sort based on lowercase
for referrer in sorted_referrers:
ref_name = self.referrers_lower[referrer]#grab real name from
count = self.referrers[ref_name][0]
uniques = self.referrers[ref_name][1]
if count == 1:#so we don't display 0 for count of 1
count = 1.5
if uniques == 1:
uniques = 1.5
count_logged = math.log(count)
uniques_logged = math.log(uniques)
out.write(date + ',' + organization + ','
+ ref_name + ',' + str(count) + ',' + str(count_logged) + ','
+ str(uniques) + ',' + str(uniques_logged) + '\n')
out.close() | python | {
"resource": ""
} |
q272601 | process_json | test | def process_json(filename):
"""
Converts a DOE CODE .json file into DOE CODE projects
Yields DOE CODE records from a DOE CODE .json file
"""
logger.debug('Processing DOE CODE json: %s', filename)
doecode_json = json.load(open(filename))
for record in doecode_json['records']:
yield record | python | {
"resource": ""
} |
q272602 | process_url | test | def process_url(url, key):
"""
Yields DOE CODE records from a DOE CODE .json URL response
Converts a DOE CODE API .json URL response into DOE CODE projects
"""
logger.debug('Fetching DOE CODE JSON: %s', url)
if key is None:
raise ValueError('DOE CODE API Key value is missing!')
response = requests.get(url, headers={"Authorization": "Basic " + key})
doecode_json = response.json()
for record in doecode_json['records']:
yield record | python | {
"resource": ""
} |
q272603 | process | test | def process(filename=None, url=None, key=None):
"""
Yeilds DOE CODE records based on provided input sources
param:
filename (str): Path to a DOE CODE .json file
url (str): URL for a DOE CODE server json file
key (str): API Key for connecting to DOE CODE server
"""
if filename is not None:
yield from process_json(filename)
elif url and key:
yield from process_url(url, key) | python | {
"resource": ""
} |
q272604 | GitHub_Users_Emails.login | test | def login(self, username='', password=''):
"""
Performs a login and sets the Github object via given credentials. If
credentials are empty or incorrect then prompts user for credentials.
Stores the authentication token in a CREDENTIALS_FILE used for future
logins. Handles Two Factor Authentication.
"""
try:
token = ''
id = ''
if not os.path.isfile('CREDENTIALS_FILE'):
if(username == '' or password == ''):
username = raw_input('Username: ')
password = getpass.getpass('Password: ')
note = 'GitHub Organization Stats App'
note_url = 'http://software.llnl.gov/'
scopes = ['user', 'repo']
auth = github3.authorize(username, password, scopes, note,
note_url, two_factor_callback=self.prompt_2fa)
token = auth.token
id = auth.id
with open('CREDENTIALS_FILE', 'w+') as fd:
fd.write(token + '\n')
fd.write(str(id))
fd.close()
else:
with open('CREDENTIALS_FILE', 'r') as fd:
token = fd.readline().strip()
id = fd.readline().strip()
fd.close()
print "Logging in."
self.logged_in_gh = github3.login(token=token, two_factor_callback=self.prompt_2fa)
self.logged_in_gh.user().to_json()
except (ValueError, AttributeError, github3.models.GitHubError) as e:
print 'Bad credentials. Try again.'
self.login() | python | {
"resource": ""
} |
q272605 | GitHub_Users_Emails.get_mems_of_org | test | def get_mems_of_org(self):
"""
Retrieves the emails of the members of the organization. Note this Only
gets public emails. Private emails would need authentication for each
user.
"""
print 'Getting members\' emails.'
for member in self.org_retrieved.iter_members():
login = member.to_json()['login']
user_email = self.logged_in_gh.user(login).to_json()['email']
if user_email is not None:
self.emails[login] = user_email
else:#user has no public email
self.emails[login] = 'none'
#used for sorting regardless of case
self.logins_lower[login.lower()] = login | python | {
"resource": ""
} |
q272606 | GitHub_Users_Emails.write_to_file | test | def write_to_file(self, file_path=''):
"""
Writes the user emails to file.
"""
with open(file_path, 'w+') as out:
out.write('user, email\n')
sorted_names = sorted(self.logins_lower)#sort based on lowercase
for login in sorted_names:
out.write(self.logins_lower[login] + ','
+ self.emails[self.logins_lower[login]] + '\n')
out.close() | python | {
"resource": ""
} |
q272607 | connect | test | def connect(url, username, password):
"""
Return a connected Bitbucket session
"""
bb_session = stashy.connect(url, username, password)
logger.info('Connected to: %s as %s', url, username)
return bb_session | python | {
"resource": ""
} |
q272608 | connect | test | def connect(url='https://gitlab.com', token=None):
"""
Return a connected GitLab session
``token`` should be a ``private_token`` from Gitlab
"""
if token is None:
token = os.environ.get('GITLAB_API_TOKEN', None)
gl_session = gitlab.Gitlab(url, token)
try:
gl_session.version()
except (gitlab.execeptions.GitlabAuthenticationError):
raise RuntimeError('Invalid or missing GITLAB_API_TOKEN')
logger.info('Connected to: %s', url)
return gl_session | python | {
"resource": ""
} |
q272609 | query_repos | test | def query_repos(gl_session, repos=None):
"""
Yields Gitlab project objects for all projects in Bitbucket
"""
if repos is None:
repos = []
for repo in repos:
yield gl_session.projects.get(repo)
if not repos:
for project in gl_session.projects.list(as_list=False):
yield project | python | {
"resource": ""
} |
q272610 | git_repo_to_sloc | test | def git_repo_to_sloc(url):
"""
Given a Git repository URL, returns number of lines of code based on cloc
Reference:
- cloc: https://github.com/AlDanial/cloc
- https://www.omg.org/spec/AFP/
- Another potential way to calculation effort
Sample cloc output:
{
"header": {
"cloc_url": "github.com/AlDanial/cloc",
"cloc_version": "1.74",
"elapsed_seconds": 0.195950984954834,
"n_files": 27,
"n_lines": 2435,
"files_per_second": 137.78956000769,
"lines_per_second": 12426.5769858787
},
"C++": {
"nFiles": 7,
"blank": 121,
"comment": 314,
"code": 371
},
"C/C++ Header": {
"nFiles": 8,
"blank": 107,
"comment": 604,
"code": 191
},
"CMake": {
"nFiles": 11,
"blank": 49,
"comment": 465,
"code": 165
},
"Markdown": {
"nFiles": 1,
"blank": 18,
"comment": 0,
"code": 30
},
"SUM": {
"blank": 295,
"comment": 1383,
"code": 757,
"nFiles": 27
}
}
"""
with tempfile.TemporaryDirectory() as tmp_dir:
logger.debug('Cloning: url=%s tmp_dir=%s', url, tmp_dir)
tmp_clone = os.path.join(tmp_dir, 'clone-dir')
cmd = ['git', 'clone', '--depth=1', url, tmp_clone]
execute(cmd)
cmd = ['cloc', '--json', tmp_clone]
out, _ = execute(cmd)
try:
json_start = out.find('{"header"')
json_blob = out[json_start:].replace('\\n', '').replace('\'', '')
cloc_json = json.loads(json_blob)
sloc = cloc_json['SUM']['code']
except json.decoder.JSONDecodeError:
logger.debug('Error Decoding: url=%s, out=%s', url, out)
sloc = 0
logger.debug('SLOC: url=%s, sloc=%d', url, sloc)
return sloc | python | {
"resource": ""
} |
q272611 | compute_labor_hours | test | def compute_labor_hours(sloc, month_hours='cocomo_book'):
"""
Compute the labor hours, given a count of source lines of code
The intention is to use the COCOMO II model to compute this value.
References:
- http://csse.usc.edu/tools/cocomoii.php
- http://docs.python-guide.org/en/latest/scenarios/scrape/
"""
# Calculation of hours in a month
if month_hours == 'hours_per_year':
# Use number of working hours in a year:
# (40 Hours / week) * (52 weeks / year) / (12 months / year) ~= 173.33
HOURS_PER_PERSON_MONTH = 40.0 * 52 / 12
else:
# Use value from COCOMO II Book (month_hours=='cocomo_book'):
# Reference: https://dl.acm.org/citation.cfm?id=557000
# This is the value used by the Code.gov team:
# https://github.com/GSA/code-gov/blob/master/LABOR_HOUR_CALC.md
HOURS_PER_PERSON_MONTH = 152.0
cocomo_url = 'http://csse.usc.edu/tools/cocomoii.php'
page = requests.post(cocomo_url, data={'new_size': sloc})
try:
person_months = float(EFFORT_REGEX.search(page.text).group(1))
except AttributeError:
logger.error('Unable to find Person Months in page text: sloc=%s', sloc)
# If there is no match, and .search(..) returns None
person_months = 0
labor_hours = person_months * HOURS_PER_PERSON_MONTH
logger.debug('sloc=%d labor_hours=%d', sloc, labor_hours)
return labor_hours | python | {
"resource": ""
} |
q272612 | _prune_dict_null_str | test | def _prune_dict_null_str(dictionary):
"""
Prune the "None" or emptry string values from dictionary items
"""
for key, value in list(dictionary.items()):
if value is None or str(value) == '':
del dictionary[key]
if isinstance(value, dict):
dictionary[key] = _prune_dict_null_str(dictionary[key])
return dictionary | python | {
"resource": ""
} |
q272613 | GitHubQueryManager._readGQL | test | def _readGQL(self, filePath, verbose=False):
"""Read a 'pretty' formatted GraphQL query file into a one-line string.
Removes line breaks and comments. Condenses white space.
Args:
filePath (str): A relative or absolute path to a file containing
a GraphQL query.
File may use comments and multi-line formatting.
.. _GitHub GraphQL Explorer:
https://developer.github.com/v4/explorer/
verbose (Optional[bool]): If False, prints will be suppressed.
Defaults to False.
Returns:
str: A single line GraphQL query.
"""
if not os.path.isfile(filePath):
raise RuntimeError("Query file '%s' does not exist." % (filePath))
lastModified = os.path.getmtime(filePath)
absPath = os.path.abspath(filePath)
if absPath == self.__queryPath and lastModified == self.__queryTimestamp:
_vPrint(verbose, "Using cached query '%s'" % (os.path.basename(self.__queryPath)))
query_in = self.__query
else:
_vPrint(verbose, "Reading '%s' ... " % (filePath), end="", flush=True)
with open(filePath, "r") as q:
# Strip all comments and newlines.
query_in = re.sub(r'#.*(\n|\Z)', '\n', q.read())
# Condense extra whitespace.
query_in = re.sub(r'\s+', ' ', query_in)
# Remove any leading or trailing whitespace.
query_in = re.sub(r'(\A\s+)|(\s+\Z)', '', query_in)
_vPrint(verbose, "File read!")
self.__queryPath = absPath
self.__queryTimestamp = lastModified
self.__query = query_in
return query_in | python | {
"resource": ""
} |
q272614 | GitHubQueryManager.queryGitHubFromFile | test | def queryGitHubFromFile(self, filePath, gitvars={}, verbosity=0, **kwargs):
"""Submit a GitHub GraphQL query from a file.
Can only be used with GraphQL queries.
For REST queries, see the 'queryGitHub' method.
Args:
filePath (str): A relative or absolute path to a file containing
a GraphQL query.
File may use comments and multi-line formatting.
.. _GitHub GraphQL Explorer:
https://developer.github.com/v4/explorer/
gitvars (Optional[Dict]): All query variables.
Defaults to empty.
GraphQL Only.
verbosity (Optional[int]): Changes output verbosity levels.
If < 0, all extra printouts are suppressed.
If == 0, normal print statements are displayed.
If > 0, additional status print statements are displayed.
Defaults to 0.
**kwargs: Keyword arguments for the 'queryGitHub' method.
Returns:
Dict: A JSON style dictionary.
"""
gitquery = self._readGQL(filePath, verbose=(verbosity >= 0))
return self.queryGitHub(gitquery, gitvars=gitvars, verbosity=verbosity, **kwargs) | python | {
"resource": ""
} |
q272615 | GitHubQueryManager._submitQuery | test | def _submitQuery(self, gitquery, gitvars={}, verbose=False, rest=False):
"""Send a curl request to GitHub.
Args:
gitquery (str): The query or endpoint itself.
Examples:
query: 'query { viewer { login } }'
endpoint: '/user'
gitvars (Optional[Dict]): All query variables.
Defaults to empty.
verbose (Optional[bool]): If False, stderr prints will be
suppressed. Defaults to False.
rest (Optional[bool]): If True, uses the REST API instead
of GraphQL. Defaults to False.
Returns:
{
'statusNum' (int): The HTTP status code.
'headDict' (Dict[str]): The response headers.
'linkDict' (Dict[int]): Link based pagination data.
'result' (str): The body of the response.
}
"""
errOut = DEVNULL if not verbose else None
authhead = 'Authorization: bearer ' + self.__githubApiToken
bashcurl = 'curl -iH TMPauthhead -X POST -d TMPgitquery https://api.github.com/graphql' if not rest \
else 'curl -iH TMPauthhead https://api.github.com' + gitquery
bashcurl_list = bashcurl.split()
bashcurl_list[2] = authhead
if not rest:
gitqueryJSON = json.dumps({'query': gitquery, 'variables': json.dumps(gitvars)})
bashcurl_list[6] = gitqueryJSON
fullResponse = check_output(bashcurl_list, stderr=errOut).decode()
_vPrint(verbose, "\n" + fullResponse)
fullResponse = fullResponse.split('\r\n\r\n')
heads = fullResponse[0].split('\r\n')
if len(fullResponse) > 1:
result = fullResponse[1]
else:
result = ""
http = heads[0].split()
statusNum = int(http[1])
# Parse headers into a useful dictionary
headDict = {}
headDict["http"] = heads[0]
for header in heads[1:]:
h = header.split(': ')
headDict[h[0]] = h[1]
# Parse any Link headers even further
linkDict = None
if "Link" in headDict:
linkProperties = headDict["Link"].split(', ')
propDict = {}
for item in linkProperties:
divided = re.split(r'<https://api.github.com|>; rel="|"', item)
propDict[divided[2]] = divided[1]
linkDict = propDict
return {'statusNum': statusNum, 'headDict': headDict, 'linkDict': linkDict, 'result': result} | python | {
"resource": ""
} |
q272616 | GitHubQueryManager._awaitReset | test | def _awaitReset(self, utcTimeStamp, verbose=True):
"""Wait until the given UTC timestamp.
Args:
utcTimeStamp (int): A UTC format timestamp.
verbose (Optional[bool]): If False, all extra printouts will be
suppressed. Defaults to True.
"""
resetTime = pytz.utc.localize(datetime.utcfromtimestamp(utcTimeStamp))
_vPrint(verbose, "--- Current Timestamp")
_vPrint(verbose, " %s" % (time.strftime('%c')))
now = pytz.utc.localize(datetime.utcnow())
waitTime = round((resetTime - now).total_seconds()) + 1
_vPrint(verbose, "--- Current UTC Timestamp")
_vPrint(verbose, " %s" % (now.strftime('%c')))
_vPrint(verbose, "--- GITHUB NEEDS A BREAK Until UTC Timestamp")
_vPrint(verbose, " %s" % (resetTime.strftime('%c')))
self._countdown(waitTime, printString="--- Waiting %*d seconds...", verbose=verbose)
_vPrint(verbose, "--- READY!") | python | {
"resource": ""
} |
q272617 | GitHubQueryManager._countdown | test | def _countdown(self, waitTime=0, printString="Waiting %*d seconds...", verbose=True):
"""Makes a pretty countdown.
Args:
gitquery (str): The query or endpoint itself.
Examples:
query: 'query { viewer { login } }'
endpoint: '/user'
printString (Optional[str]): A counter message to display.
Defaults to 'Waiting %*d seconds...'
verbose (Optional[bool]): If False, all extra printouts will be
suppressed. Defaults to True.
"""
if waitTime <= 0:
waitTime = self.__retryDelay
for remaining in range(waitTime, 0, -1):
_vPrint(verbose, "\r" + printString % (len(str(waitTime)), remaining), end="", flush=True)
time.sleep(1)
if verbose:
_vPrint(verbose, "\r" + printString % (len(str(waitTime)), 0)) | python | {
"resource": ""
} |
q272618 | DataManager.fileLoad | test | def fileLoad(self, filePath=None, updatePath=True):
"""Load a JSON data file into the internal JSON data dictionary.
Current internal data will be overwritten.
If no file path is provided, the stored data file path will be used.
Args:
filePath (Optional[str]): A relative or absolute path to a
'.json' file. Defaults to None.
updatePath (Optional[bool]): Specifies whether or not to update
the stored data file path. Defaults to True.
"""
if not filePath:
filePath = self.filePath
if not os.path.isfile(filePath):
raise FileNotFoundError("Data file '%s' does not exist." % (filePath))
else:
print("Importing existing data file '%s' ... " % (filePath), end="", flush=True)
with open(filePath, "r") as q:
data_raw = q.read()
print("Imported!")
self.data = json.loads(data_raw)
if updatePath:
self.filePath = filePath | python | {
"resource": ""
} |
q272619 | DataManager.fileSave | test | def fileSave(self, filePath=None, updatePath=False):
"""Write the internal JSON data dictionary to a JSON data file.
If no file path is provided, the stored data file path will be used.
Args:
filePath (Optional[str]): A relative or absolute path to a
'.json' file. Defaults to None.
updatePath (Optional[bool]): Specifies whether or not to update
the stored data file path. Defaults to False.
"""
if not filePath:
filePath = self.filePath
if not os.path.isfile(filePath):
print("Data file '%s' does not exist, will create new file." % (filePath))
if not os.path.exists(os.path.split(filePath)[0]):
os.makedirs(os.path.split(filePath)[0])
dataJsonString = json.dumps(self.data, indent=4, sort_keys=True)
print("Writing to file '%s' ... " % (filePath), end="", flush=True)
with open(filePath, "w") as fileout:
fileout.write(dataJsonString)
print("Wrote file!")
if updatePath:
self.filePath = filePath | python | {
"resource": ""
} |
q272620 | create_tfs_connection | test | def create_tfs_connection(url, token):
"""
Creates the TFS Connection Context
"""
if token is None:
token = os.environ.get('TFS_API_TOKEN', None)
tfs_credentials = BasicAuthentication('', token)
tfs_connection = VssConnection(base_url=url, creds=tfs_credentials)
return tfs_connection | python | {
"resource": ""
} |
q272621 | create_tfs_project_analysis_client | test | def create_tfs_project_analysis_client(url, token=None):
"""
Create a project_analysis_client.py client for a Team Foundation Server Enterprise connection instance.
This is helpful for understanding project languages, but currently blank for all our test conditions.
If token is not provided, will attempt to use the TFS_API_TOKEN
environment variable if present.
"""
if token is None:
token = os.environ.get('TFS_API_TOKEN', None)
tfs_connection = create_tfs_connection(url, token)
project_analysis_client = tfs_connection.get_client('vsts.project_analysis.v4_1.project_analysis_client.ProjectAnalysisClient')
if project_analysis_client is None:
msg = 'Unable to connect to TFS Enterprise (%s) with provided token.'
raise RuntimeError(msg, url)
return project_analysis_client | python | {
"resource": ""
} |
q272622 | create_tfs_core_client | test | def create_tfs_core_client(url, token=None):
"""
Create a core_client.py client for a Team Foundation Server Enterprise connection instance
If token is not provided, will attempt to use the TFS_API_TOKEN
environment variable if present.
"""
if token is None:
token = os.environ.get('TFS_API_TOKEN', None)
tfs_connection = create_tfs_connection(url, token)
tfs_client = tfs_connection.get_client('vsts.core.v4_1.core_client.CoreClient')
if tfs_client is None:
msg = 'Unable to connect to TFS Enterprise (%s) with provided token.'
raise RuntimeError(msg, url)
return tfs_client | python | {
"resource": ""
} |
q272623 | create_tfs_git_client | test | def create_tfs_git_client(url, token=None):
"""
Creates a TFS Git Client to pull Git repo info
"""
if token is None:
token = os.environ.get('TFS_API_TOKEN', None)
tfs_connection = create_tfs_connection(url, token)
tfs_git_client = tfs_connection.get_client('vsts.git.v4_1.git_client.GitClient')
if tfs_git_client is None:
msg = 'Unable to create TFS Git Client, failed to connect to TFS Enterprise (%s) with provided token.'
raise RuntimeError(msg, url)
return tfs_git_client | python | {
"resource": ""
} |
q272624 | create_tfs_tfvc_client | test | def create_tfs_tfvc_client(url, token=None):
"""
Creates a TFS TFVC Client to pull TFVC repo info
"""
if token is None:
token = os.environ.get('TFS_API_TOKEN', None)
tfs_connection = create_tfs_connection(url, token)
tfs_tfvc_client = tfs_connection.get_client('vsts.tfvc.v4_1.tfvc_client.TfvcClient')
if tfs_tfvc_client is None:
msg = 'Unable to create TFS Git Client, failed to connect to TFS Enterprise (%s) with provided token.'
raise RuntimeError(msg, url)
return tfs_tfvc_client | python | {
"resource": ""
} |
q272625 | get_git_repos | test | def get_git_repos(url, token, collection, project):
"""
Returns a list of all git repos for the supplied project within the supplied collection
"""
git_client = create_tfs_git_client('{url}/{collection_name}'.format(url=url, collection_name=collection.name), token)
logger.debug('Retrieving Git Repos for Project: {project_name}'.format(project_name=project.name))
return git_client.get_repositories(project.id) | python | {
"resource": ""
} |
q272626 | get_tfvc_repos | test | def get_tfvc_repos(url, token, collection, project):
"""
Returns a list of all tfvc branches for the supplied project within the supplied collection
"""
branch_list = []
tfvc_client = create_tfs_tfvc_client('{url}/{collection_name}'.format(url=url, collection_name=collection.name), token)
logger.debug('Retrieving Tfvc Branches for Project: {project_name}'.format(project_name=project.name))
branches = tfvc_client.get_branches(project.id, True, True, False, True)
if branches:
branch_list.extend(branches)
else:
logger.debug('No Tfvcc Branches in Project: {project_name}'.format(project_name=project.name))
return branch_list | python | {
"resource": ""
} |
q272627 | GitHub_LLNL_Year_Commits.get_year_commits | test | def get_year_commits(self, username='', password='', organization='llnl', force=True):
"""
Does setup such as login, printing API info, and waiting for GitHub to
build the commit statistics. Then gets the last year of commits and
prints them to file.
"""
date = str(datetime.date.today())
file_path = ('year_commits.csv')
if force or not os.path.isfile(file_path):
my_github.login(username, password)
calls_beginning = self.logged_in_gh.ratelimit_remaining + 1
print 'Rate Limit: ' + str(calls_beginning)
my_github.get_org(organization)
my_github.repos(building_stats=True)
print "Letting GitHub build statistics."
time.sleep(30)
print "Trying again."
my_github.repos(building_stats=False)
my_github.calc_total_commits(starting_commits=35163)
my_github.write_to_file()
calls_remaining = self.logged_in_gh.ratelimit_remaining
calls_used = calls_beginning - calls_remaining
print ('Rate Limit Remaining: ' + str(calls_remaining) + '\nUsed '
+ str(calls_used) + ' API calls.') | python | {
"resource": ""
} |
q272628 | GitHub_LLNL_Year_Commits.calc_total_commits | test | def calc_total_commits(self, starting_commits=0):
"""
Uses the weekly commits and traverses back through the last
year, each week subtracting the weekly commits and storing them. It
needs an initial starting commits number, which should be taken from
the most up to date number from github_stats.py output.
"""
for week_of_commits in self.commits_dict_list:
try:
self.commits[week_of_commits['week']] -= week_of_commits['total']
except KeyError:
total = self.commits[week_of_commits['week']] \
= -week_of_commits['total']
self.sorted_weeks = sorted(self.commits)
#reverse because lower numbered weeks are older in time.
#we traverse from most recent to oldest
for week in reversed(self.sorted_weeks):
self.commits[week] = self.commits[week] + starting_commits
starting_commits = self.commits[week] | python | {
"resource": ""
} |
q272629 | GitHub_LLNL_Year_Commits.write_to_file | test | def write_to_file(self):
"""
Writes the weeks with associated commits to file.
"""
with open('../github_stats_output/last_year_commits.csv', 'w+') as output:
output.write('date,organization,repos,members,teams,'
+ 'unique_contributors,total_contributors,forks,'
+ 'stargazers,pull_requests,open_issues,has_readme,'
+ 'has_license,pull_requests_open,pull_requests_closed,'
+ 'commits\n')
#no reverse this time to print oldest first
previous_commits = 0
for week in self.sorted_weeks:
if str(self.commits[week]) != previous_commits:#delete dups
week_formatted = datetime.datetime.utcfromtimestamp(
week ).strftime('%Y-%m-%d')
output.write(week_formatted
+ ',llnl,0,0,0,0,0,0,0,0,0,0,0,0,0,'
+ str(self.commits[week]) + '\n')
previous_commits = str(self.commits[week]) | python | {
"resource": ""
} |
q272630 | configure | test | def configure(backends, raise_errors=False):
"""Instantiate and configures backends.
:arg list-of-dicts backends: the backend configuration as a list of dicts where
each dict specifies a separate backend.
Each backend dict consists of two things:
1. ``class`` with a value that is either a Python class or a dotted
Python path to one
2. ``options`` dict with options for the backend in question to
configure it
See the documentation for the backends you're using to know what is
configurable in the options dict.
:arg raise_errors bool: whether or not to raise an exception if something
happens in configuration; if it doesn't raise an exception, it'll log
the exception
For example, this sets up a
:py:class:`markus.backends.logging.LoggingMetrics` backend::
markus.configure([
{
'class': 'markus.backends.logging.LoggingMetrics',
'options': {
'logger_name': 'metrics'
}
}
])
You can set up as many backends as you like.
.. Note::
During application startup, Markus should get configured before the app
starts generating metrics. Any metrics generated before Markus is
configured will get dropped.
However, anything can call :py:func:`markus.get_metrics` and get a
:py:class:`markus.main.MetricsInterface` before Markus has been
configured including at module load time.
"""
good_backends = []
for backend in backends:
clspath = backend['class']
options = backend.get('options', {})
if isinstance(clspath, str):
modpath, clsname = split_clspath(clspath)
try:
__import__(modpath)
module = sys.modules[modpath]
cls = getattr(module, clsname)
except Exception:
logger.exception('Exception while importing %s', clspath)
if raise_errors:
raise
continue
else:
cls = clspath
try:
good_backends.append(cls(options))
except Exception:
logger.exception(
'Exception thrown while instantiating %s, %s',
clspath,
options
)
if raise_errors:
raise
_change_metrics(good_backends) | python | {
"resource": ""
} |
q272631 | get_metrics | test | def get_metrics(thing, extra=''):
"""Return MetricsInterface instance with specified name.
The name is used as the prefix for all keys generated with this
:py:class:`markus.main.MetricsInterface`.
The :py:class:`markus.main.MetricsInterface` is not tied to metrics
backends. The list of active backends are globally configured. This allows
us to create :py:class:`markus.main.MetricsInterface` classes without
having to worry about bootstrapping order of the app.
:arg class/instance/str thing: The name to use as a key prefix.
If this is a class, it uses the dotted Python path. If this is an
instance, it uses the dotted Python path plus ``str(instance)``.
:arg str extra: Any extra bits to add to the end of the name.
:returns: a ``MetricsInterface`` instance
Examples:
>>> from markus import get_metrics
Create a MetricsInterface with the name "myapp" and generate a count with
stat "myapp.thing1" and value 1:
>>> metrics = get_metrics('myapp')
>>> metrics.incr('thing1', value=1)
Create a MetricsInterface with the prefix of the Python module it's being
called in:
>>> metrics = get_metrics(__name__)
Create a MetricsInterface with the prefix as the qualname of the class:
>>> class Foo:
... def __init__(self):
... self.metrics = get_metrics(self)
Create a prefix of the class path plus some identifying information:
>>> class Foo:
... def __init__(self, myname):
... self.metrics = get_metrics(self, extra=myname)
...
>>> foo = Foo('jim')
Assume that ``Foo`` is defined in the ``myapp`` module. Then this will
generate the name ``myapp.Foo.jim``.
"""
thing = thing or ''
if not isinstance(thing, str):
# If it's not a str, it's either a class or an instance. Handle
# accordingly.
if type(thing) == type:
thing = '%s.%s' % (thing.__module__, thing.__name__)
else:
thing = '%s.%s' % (
thing.__class__.__module__, thing.__class__.__name__
)
if extra:
thing = '%s.%s' % (thing, extra)
return MetricsInterface(thing) | python | {
"resource": ""
} |
q272632 | MetricsInterface.timing | test | def timing(self, stat, value, tags=None):
"""Record a timing value.
Record the length of time of something to be added to a set of values from
which a statistical distribution is derived.
Depending on the backend, you might end up with count, average, median,
95% and max for a set of timing values.
This is useful for analyzing how long things take to occur. For
example, how long it takes for a function to run, to upload files, or
for a database query to execute.
:arg string stat: A period delimited alphanumeric key.
:arg int value: A timing in milliseconds.
:arg list-of-strings tags: Each string in the tag consists of a key and
a value separated by a colon. Tags can make it easier to break down
metrics for analysis.
For example ``['env:stage', 'compressed:yes']``.
For example:
>>> import time
>>> import markus
>>> metrics = markus.get_metrics('foo')
>>> def upload_file(payload):
... start_time = time.perf_counter() # this is in seconds
... # upload the file
... timing = (time.perf_counter() - start_time) * 1000.0 # convert to ms
... metrics.timing('upload_file_time', value=timing)
.. Note::
If you're timing a function or a block of code, it's probably more
convenient to use :py:meth:`markus.main.MetricsInterface.timer` or
:py:meth:`markus.main.MetricsInterface.timer_decorator`.
"""
full_stat = self._full_stat(stat)
for backend in _get_metrics_backends():
backend.timing(full_stat, value=value, tags=tags) | python | {
"resource": ""
} |
q272633 | MetricsInterface.timer | test | def timer(self, stat, tags=None):
"""Contextmanager for easily computing timings.
:arg string stat: A period delimited alphanumeric key.
:arg list-of-strings tags: Each string in the tag consists of a key and
a value separated by a colon. Tags can make it easier to break down
metrics for analysis.
For example ``['env:stage', 'compressed:yes']``.
For example:
>>> mymetrics = get_metrics(__name__)
>>> def long_function():
... with mymetrics.timer('long_function'):
... # perform some thing we want to keep metrics on
... pass
.. Note::
All timings generated with this are in milliseconds.
"""
if six.PY3:
start_time = time.perf_counter()
else:
start_time = time.time()
yield
if six.PY3:
end_time = time.perf_counter()
else:
end_time = time.time()
delta = end_time - start_time
self.timing(stat, value=delta * 1000.0, tags=tags) | python | {
"resource": ""
} |
q272634 | MetricsInterface.timer_decorator | test | def timer_decorator(self, stat, tags=None):
"""Timer decorator for easily computing timings.
:arg string stat: A period delimited alphanumeric key.
:arg list-of-strings tags: Each string in the tag consists of a key and
a value separated by a colon. Tags can make it easier to break down
metrics for analysis.
For example ``['env:stage', 'compressed:yes']``.
For example:
>>> mymetrics = get_metrics(__name__)
>>> @mymetrics.timer_decorator('long_function')
... def long_function():
... # perform some thing we want to keep metrics on
... pass
.. Note::
All timings generated with this are in milliseconds.
"""
def _inner(fun):
@wraps(fun)
def _timer_decorator(*args, **kwargs):
with self.timer(stat, tags):
return fun(*args, **kwargs)
return _timer_decorator
return _inner | python | {
"resource": ""
} |
q272635 | generate_tag | test | def generate_tag(key, value=None):
"""Generate a tag for use with the tag backends.
The key and value (if there is one) are sanitized according to the
following rules:
1. after the first character, all characters must be alphanumeric,
underscore, minus, period, or slash--invalid characters are converted
to "_"
2. lowercase
If a value is provided, the final tag is `key:value`.
The final tag must start with a letter. If it doesn't, an "a" is prepended.
The final tag is truncated to 200 characters.
If the final tag is "device", "host", or "source", then a "_" will be
appended the end.
:arg str key: the key to use
:arg str value: the value (if any)
:returns: the final tag
Examples:
>>> generate_tag('yellow')
'yellow'
>>> generate_tag('rule', 'is_yellow')
'rule:is_yellow'
Example with ``incr``:
>>> import markus
>>> mymetrics = markus.get_metrics(__name__)
>>> mymetrics.incr('somekey', value=1,
... tags=[generate_tag('rule', 'is_yellow')])
"""
# Verify the types
if not isinstance(key, six.string_types):
raise ValueError('key must be a string type, but got %r instead' % key)
if not isinstance(value, six.string_types + (NONE_TYPE,)):
raise ValueError('value must be None or a string type, but got %r instead' % value)
# Sanitize the key
key = BAD_TAG_CHAR_REGEXP.sub('_', key).strip()
# Build the tag
if value is None or not value.strip():
tag = key
else:
value = BAD_TAG_CHAR_REGEXP.sub('_', value).strip()
tag = '%s:%s' % (key, value)
if tag and not tag[0].isalpha():
tag = 'a' + tag
# Lowercase and truncate
tag = tag.lower()[:200]
# Add _ if it's a reserved word
if tag in ['device', 'host', 'source']:
tag = tag + '_'
return tag | python | {
"resource": ""
} |
q272636 | LoggingMetrics.timing | test | def timing(self, stat, value, tags=None):
"""Report a timing."""
self._log('timing', stat, value, tags) | python | {
"resource": ""
} |
q272637 | LoggingMetrics.histogram | test | def histogram(self, stat, value, tags=None):
"""Report a histogram."""
self._log('histogram', stat, value, tags) | python | {
"resource": ""
} |
q272638 | LoggingRollupMetrics.rollup | test | def rollup(self):
"""Roll up stats and log them."""
now = time.time()
if now < self.next_rollup:
return
self.next_rollup = now + self.flush_interval
for key, values in sorted(self.incr_stats.items()):
self.logger.info(
'%s INCR %s: count:%d|rate:%d/%d',
self.leader,
key,
len(values),
sum(values),
self.flush_interval
)
self.incr_stats[key] = []
for key, values in sorted(self.gauge_stats.items()):
if values:
self.logger.info(
'%s GAUGE %s: count:%d|current:%s|min:%s|max:%s',
self.leader,
key,
len(values),
values[-1],
min(values),
max(values),
)
else:
self.logger.info('%s (gauge) %s: no data', self.leader, key)
self.gauge_stats[key] = []
for key, values in sorted(self.histogram_stats.items()):
if values:
self.logger.info(
(
'%s HISTOGRAM %s: '
'count:%d|min:%.2f|avg:%.2f|median:%.2f|ninety-five:%.2f|max:%.2f'
),
self.leader,
key,
len(values),
min(values),
statistics.mean(values),
statistics.median(values),
values[int(len(values) * 95 / 100)],
max(values)
)
else:
self.logger.info('%s (histogram) %s: no data', self.leader, key)
self.histogram_stats[key] = [] | python | {
"resource": ""
} |
q272639 | order_enum | test | def order_enum(field, members):
"""
Make an annotation value that can be used to sort by an enum field.
``field``
The name of an EnumChoiceField.
``members``
An iterable of Enum members in the order to sort by.
Use like:
.. code-block:: python
desired_order = [MyEnum.bar, MyEnum.baz, MyEnum.foo]
ChoiceModel.objects\\
.annotate(my_order=order_enum('choice', desired_order))\\
.order_by('my_order')
As Enums are iterable, ``members`` can be the Enum itself
if the default ordering is desired:
.. code-block:: python
ChoiceModel.objects\\
.annotate(my_order=order_enum('choice', MyEnum))\\
.order_by('my_order')
.. warning:: On Python 2, Enums may not have a consistent order,
depending upon how they were defined.
You can set an explicit order using ``__order__`` to fix this.
See the ``enum34`` docs for more information.
Any enum members not present in the list of members
will be sorted to the end of the results.
"""
members = list(members)
return Case(
*(When(**{field: member, 'then': i})
for i, member in enumerate(members)),
default=len(members),
output_field=IntegerField()) | python | {
"resource": ""
} |
q272640 | EnumChoiceField.from_db_value | test | def from_db_value(self, value, expression, connection, context):
"""
Convert a string from the database into an Enum value
"""
if value is None:
return value
return self.enum[value] | python | {
"resource": ""
} |
q272641 | EnumChoiceField.to_python | test | def to_python(self, value):
"""
Convert a string from a form into an Enum value.
"""
if value is None:
return value
if isinstance(value, self.enum):
return value
return self.enum[value] | python | {
"resource": ""
} |
q272642 | EnumChoiceField.get_prep_value | test | def get_prep_value(self, value):
"""
Convert an Enum value into a string for the database
"""
if value is None:
return None
if isinstance(value, self.enum):
return value.name
raise ValueError("Unknown value {value:r} of type {cls}".format(
value=value, cls=type(value))) | python | {
"resource": ""
} |
q272643 | _resolve_path | test | def _resolve_path(obj, path):
"""path is a mul of coord or a coord"""
if obj.__class__ not in path.context.accept:
result = set()
for ctx in path.context.accept:
result |= {e for u in obj[ctx] for e in _resolve_path(u, path)}
return result
if isinstance(obj, Text):
if path.index is not None:
return {obj.children[path.index]}
return set(obj.children)
if isinstance(obj, (Fact, Theory)):
return _resolve_path_tree_graph(obj.tree_graph, path)
if isinstance(obj, Topic):
if path.kind == 'r':
if path.index is not None:
return {obj.root[path.index]}
return set(obj.root)
else:
if path.index is not None:
return {obj.flexing[path.index]}
return set(obj.flexing) | python | {
"resource": ""
} |
q272644 | project_usls_on_dictionary | test | def project_usls_on_dictionary(usls, allowed_terms=None):
"""`usls` is an iterable of usl.
return a mapping term -> usl list
"""
cells_to_usls = defaultdict(set)
tables = set()
for u in usls:
for t in u.objects(Term):
for c in t.singular_sequences:
# This is the first time we meet the cell c
if not cells_to_usls[c]:
tables.update(c.relations.contained)
cells_to_usls[c].add(u)
if allowed_terms:
allowed_terms = set(allowed_terms)
tables = tables & allowed_terms
cells_to_usls = {c: l for c, l in cells_to_usls.items() if c in allowed_terms}
tables_to_usls = {
table: list(set(u for c in table.singular_sequences for u in cells_to_usls[c]))
for table in tables if not isinstance(table, TableSet)
}
return tables_to_usls | python | {
"resource": ""
} |
q272645 | Histogram.mean | test | def mean(self):
"""Returns the mean value."""
if self.counter.value > 0:
return self.sum.value / self.counter.value
return 0.0 | python | {
"resource": ""
} |
q272646 | Meter.mark | test | def mark(self, value=1):
"""Record an event with the meter. By default it will record one event.
:param value: number of event to record
"""
self.counter += value
self.m1_rate.update(value)
self.m5_rate.update(value)
self.m15_rate.update(value) | python | {
"resource": ""
} |
q272647 | Meter.mean_rate | test | def mean_rate(self):
"""
Returns the mean rate of the events since the start of the process.
"""
if self.counter.value == 0:
return 0.0
else:
elapsed = time() - self.start_time
return self.counter.value / elapsed | python | {
"resource": ""
} |
q272648 | Derive.mark | test | def mark(self, value=1):
"""Record an event with the derive.
:param value: counter value to record
"""
last = self.last.get_and_set(value)
if last <= value:
value = value - last
super(Derive, self).mark(value) | python | {
"resource": ""
} |
q272649 | StatsDReporter.send_metric | test | def send_metric(self, name, metric):
"""Send metric and its snapshot."""
config = SERIALIZER_CONFIG[class_name(metric)]
mmap(
self._buffered_send_metric,
self.serialize_metric(
metric,
name,
config['keys'],
config['serialized_type']
)
)
if hasattr(metric, 'snapshot') and config.get('snapshot_keys'):
mmap(
self._buffered_send_metric,
self.serialize_metric(
metric.snapshot,
name,
config['snapshot_keys'],
config['serialized_type']
)
) | python | {
"resource": ""
} |
q272650 | StatsDReporter.serialize_metric | test | def serialize_metric(self, metric, m_name, keys, m_type):
"""Serialize and send available measures of a metric."""
return [
self.format_metric_string(m_name, getattr(metric, key), m_type)
for key in keys
] | python | {
"resource": ""
} |
q272651 | StatsDReporter.format_metric_string | test | def format_metric_string(self, name, value, m_type):
"""Compose a statsd compatible string for a metric's measurement."""
# NOTE(romcheg): This serialized metric template is based on
# statsd's documentation.
template = '{name}:{value}|{m_type}\n'
if self.prefix:
name = "{prefix}.{m_name}".format(prefix=self.prefix, m_name=name)
return template.format(name=name, value=value, m_type=m_type) | python | {
"resource": ""
} |
q272652 | StatsDReporter._buffered_send_metric | test | def _buffered_send_metric(self, metric_str):
"""Add a metric to the buffer."""
self.batch_count += 1
self.batch_buffer += metric_str
# NOTE(romcheg): Send metrics if the number of metrics in the buffer
# has reached the threshold for sending.
if self.batch_count >= self.batch_size:
self._send() | python | {
"resource": ""
} |
q272653 | IniStorage.get | test | def get(self, section, option, **kwargs):
"""
Get method that raises MissingSetting if the value was unset.
This differs from the SafeConfigParser which may raise either a
NoOptionError or a NoSectionError.
We take extra **kwargs because the Python 3.5 configparser extends the
get method signature and it calls self with those parameters.
def get(self, section, option, *, raw=False, vars=None,
fallback=_UNSET):
"""
try:
ret = super(ExactOnlineConfig, self).get(section, option, **kwargs)
except (NoOptionError, NoSectionError):
raise MissingSetting(option, section)
return ret | python | {
"resource": ""
} |
q272654 | _json_safe | test | def _json_safe(data):
"""
json.loads wants an unistr in Python3. Convert it.
"""
if not hasattr(data, 'encode'):
try:
data = data.decode('utf-8')
except UnicodeDecodeError:
raise ValueError(
'Expected valid UTF8 for JSON data, got %r' % (data,))
return data | python | {
"resource": ""
} |
q272655 | ExactOnlineConfig.get_or_set_default | test | def get_or_set_default(self, section, option, value):
"""
Base method to fetch values and to set defaults in case they
don't exist.
"""
try:
ret = self.get(section, option)
except MissingSetting:
self.set(section, option, value)
ret = value
return ret | python | {
"resource": ""
} |
q272656 | ExactInvoice.get_ledger_code_to_guid_map | test | def get_ledger_code_to_guid_map(self, codes):
"""
Convert set of human codes and to a dict of code to exactonline
guid mappings.
Example::
ret = inv.get_ledger_code_to_guid_map(['1234', '5555'])
ret == {'1234': '<guid1_from_exactonline_ledgeraccounts>',
'5555': '<guid2_from_exactonline_ledgeraccounts>'}
"""
if codes:
codes = set(str(i) for i in codes)
ledger_ids = self._api.ledgeraccounts.filter(code__in=codes)
ret = dict((str(i['Code']), i['ID']) for i in ledger_ids)
found = set(ret.keys())
missing = (codes - found)
if missing:
raise UnknownLedgerCodes(missing)
return ret
return {} | python | {
"resource": ""
} |
q272657 | V1Division.get_divisions | test | def get_divisions(self):
"""
Get the "current" division and return a dictionary of divisions
so the user can select the right one.
"""
ret = self.rest(GET('v1/current/Me?$select=CurrentDivision'))
current_division = ret[0]['CurrentDivision']
assert isinstance(current_division, int)
urlbase = 'v1/%d/' % (current_division,)
resource = urljoin(urlbase, 'hrm/Divisions?$select=Code,Description')
ret = self.rest(GET(resource))
choices = dict((i['Code'], i['Description']) for i in ret)
return choices, current_division | python | {
"resource": ""
} |
q272658 | Invoices.map_exact2foreign_invoice_numbers | test | def map_exact2foreign_invoice_numbers(self, exact_invoice_numbers=None):
"""
Optionally supply a list of ExactOnline invoice numbers.
Returns a dictionary of ExactOnline invoice numbers to foreign
(YourRef) invoice numbers.
"""
# Quick, select all. Not the most nice to the server though.
if exact_invoice_numbers is None:
ret = self.filter(select='InvoiceNumber,YourRef')
return dict((i['InvoiceNumber'], i['YourRef']) for i in ret)
# Slower, select what we want to know. More work for us.
exact_to_foreign_map = {}
# Do it in batches. If we append 300 InvoiceNumbers at once, we
# get a 12kB URI. (If the list is empty, we skip the entire
# forloop and correctly return the empty dict.)
exact_invoice_numbers = list(set(exact_invoice_numbers)) # unique
for offset in range(0, len(exact_invoice_numbers), 40):
batch = exact_invoice_numbers[offset:(offset + 40)]
filter_ = ' or '.join(
'InvoiceNumber eq %s' % (i,) for i in batch)
assert filter_ # if filter was empty, we'd get all!
ret = self.filter(filter=filter_, select='InvoiceNumber,YourRef')
exact_to_foreign_map.update(
dict((i['InvoiceNumber'], i['YourRef']) for i in ret))
# Any values we missed?
for exact_invoice_number in exact_invoice_numbers:
if exact_invoice_number not in exact_to_foreign_map:
exact_to_foreign_map[exact_invoice_number] = None
return exact_to_foreign_map | python | {
"resource": ""
} |
q272659 | solve | test | def solve(grid):
"""
solve a Sudoku grid inplace
"""
clauses = sudoku_clauses()
for i in range(1, 10):
for j in range(1, 10):
d = grid[i - 1][j - 1]
# For each digit already known, a clause (with one literal).
# Note:
# We could also remove all variables for the known cells
# altogether (which would be more efficient). However, for
# the sake of simplicity, we decided not to do that.
if d:
clauses.append([v(i, j, d)])
# solve the SAT problem
sol = set(pycosat.solve(clauses))
def read_cell(i, j):
# return the digit of cell i, j according to the solution
for d in range(1, 10):
if v(i, j, d) in sol:
return d
for i in range(1, 10):
for j in range(1, 10):
grid[i - 1][j - 1] = read_cell(i, j) | python | {
"resource": ""
} |
q272660 | view | test | def view(injector):
"""Create Django class-based view from injector class."""
handler = create_handler(View, injector)
apply_http_methods(handler, injector)
return injector.let(as_view=handler.as_view) | python | {
"resource": ""
} |
q272661 | form_view | test | def form_view(injector):
"""Create Django form processing class-based view from injector class."""
handler = create_handler(FormView, injector)
apply_form_methods(handler, injector)
return injector.let(as_view=handler.as_view) | python | {
"resource": ""
} |
q272662 | method_view | test | def method_view(injector):
"""Create Flask method based dispatching view from injector class."""
handler = create_handler(MethodView)
apply_http_methods(handler, injector)
return injector.let(as_view=handler.as_view) | python | {
"resource": ""
} |
q272663 | api_view | test | def api_view(injector):
"""Create DRF class-based API view from injector class."""
handler = create_handler(APIView, injector)
apply_http_methods(handler, injector)
apply_api_view_methods(handler, injector)
return injector.let(as_view=handler.as_view) | python | {
"resource": ""
} |
q272664 | generic_api_view | test | def generic_api_view(injector):
"""Create DRF generic class-based API view from injector class."""
handler = create_handler(GenericAPIView, injector)
apply_http_methods(handler, injector)
apply_api_view_methods(handler, injector)
apply_generic_api_view_methods(handler, injector)
return injector.let(as_view=handler.as_view) | python | {
"resource": ""
} |
q272665 | model_view_set | test | def model_view_set(injector):
"""Create DRF model view set from injector class."""
handler = create_handler(ModelViewSet, injector)
apply_api_view_methods(handler, injector)
apply_generic_api_view_methods(handler, injector)
apply_model_view_set_methods(handler, injector)
return injector.let(as_viewset=lambda: handler) | python | {
"resource": ""
} |
q272666 | stream_from_fd | test | def stream_from_fd(fd, loop):
"""Recieve a streamer for a given file descriptor."""
reader = asyncio.StreamReader(loop=loop)
protocol = asyncio.StreamReaderProtocol(reader, loop=loop)
waiter = asyncio.futures.Future(loop=loop)
transport = UnixFileDescriptorTransport(
loop=loop,
fileno=fd,
protocol=protocol,
waiter=waiter,
)
try:
yield from waiter
except Exception:
transport.close()
if loop.get_debug():
logger.debug("Read fd %r connected: (%r, %r)", fd, transport, protocol)
return reader, transport | python | {
"resource": ""
} |
q272667 | UnixFileDescriptorTransport._read_ready | test | def _read_ready(self):
"""Called by the event loop whenever the fd is ready for reading."""
try:
data = os.read(self._fileno, self.max_size)
except InterruptedError:
# No worries ;)
pass
except OSError as exc:
# Some OS-level problem, crash.
self._fatal_error(exc, "Fatal read error on file descriptor read")
else:
if data:
self._protocol.data_received(data)
else:
# We reached end-of-file.
if self._loop.get_debug():
logger.info("%r was closed by the kernel", self)
self._closing = False
self.pause_reading()
self._loop.call_soon(self._protocol.eof_received)
self._loop.call_soon(self._call_connection_lost, None) | python | {
"resource": ""
} |
q272668 | UnixFileDescriptorTransport._close | test | def _close(self, error=None):
"""Actual closing code, both from manual close and errors."""
self._closing = True
self.pause_reading()
self._loop.call_soon(self._call_connection_lost, error) | python | {
"resource": ""
} |
q272669 | UnixFileDescriptorTransport._call_connection_lost | test | def _call_connection_lost(self, error):
"""Finalize closing."""
try:
self._protocol.connection_lost(error)
finally:
os.close(self._fileno)
self._fileno = None
self._protocol = None
self._loop = None | python | {
"resource": ""
} |
q272670 | Watcher.watch | test | def watch(self, path, flags, *, alias=None):
"""Add a new watching rule."""
if alias is None:
alias = path
if alias in self.requests:
raise ValueError("A watch request is already scheduled for alias %s" % alias)
self.requests[alias] = (path, flags)
if self._fd is not None:
# We've started, register the watch immediately.
self._setup_watch(alias, path, flags) | python | {
"resource": ""
} |
q272671 | Watcher.unwatch | test | def unwatch(self, alias):
"""Stop watching a given rule."""
if alias not in self.descriptors:
raise ValueError("Unknown watch alias %s; current set is %r" % (alias, list(self.descriptors.keys())))
wd = self.descriptors[alias]
errno = LibC.inotify_rm_watch(self._fd, wd)
if errno != 0:
raise IOError("Failed to close watcher %d: errno=%d" % (wd, errno))
del self.descriptors[alias]
del self.requests[alias]
del self.aliases[wd] | python | {
"resource": ""
} |
q272672 | Watcher._setup_watch | test | def _setup_watch(self, alias, path, flags):
"""Actual rule setup."""
assert alias not in self.descriptors, "Registering alias %s twice!" % alias
wd = LibC.inotify_add_watch(self._fd, path, flags)
if wd < 0:
raise IOError("Error setting up watch on %s with flags %s: wd=%s" % (
path, flags, wd))
self.descriptors[alias] = wd
self.aliases[wd] = alias | python | {
"resource": ""
} |
q272673 | Watcher.setup | test | def setup(self, loop):
"""Start the watcher, registering new watches if any."""
self._loop = loop
self._fd = LibC.inotify_init()
for alias, (path, flags) in self.requests.items():
self._setup_watch(alias, path, flags)
# We pass ownership of the fd to the transport; it will close it.
self._stream, self._transport = yield from aioutils.stream_from_fd(self._fd, loop) | python | {
"resource": ""
} |
q272674 | Watcher.get_event | test | def get_event(self):
"""Fetch an event.
This coroutine will swallow events for removed watches.
"""
while True:
prefix = yield from self._stream.readexactly(PREFIX.size)
if prefix == b'':
# We got closed, return None.
return
wd, flags, cookie, length = PREFIX.unpack(prefix)
path = yield from self._stream.readexactly(length)
# All async performed, time to look at the event's content.
if wd not in self.aliases:
# Event for a removed watch, skip it.
continue
decoded_path = struct.unpack('%ds' % length, path)[0].rstrip(b'\x00').decode('utf-8')
return Event(
flags=flags,
cookie=cookie,
name=decoded_path,
alias=self.aliases[wd],
) | python | {
"resource": ""
} |
q272675 | Message.touch | test | def touch(self):
"""
Respond to ``nsqd`` that you need more time to process the message.
"""
assert not self._has_responded
self.trigger(event.TOUCH, message=self) | python | {
"resource": ""
} |
q272676 | BackoffTimer.success | test | def success(self):
"""Update the timer to reflect a successfull call"""
if self.interval == 0.0:
return
self.short_interval -= self.short_unit
self.long_interval -= self.long_unit
self.short_interval = max(self.short_interval, Decimal(0))
self.long_interval = max(self.long_interval, Decimal(0))
self.update_interval() | python | {
"resource": ""
} |
q272677 | BackoffTimer.failure | test | def failure(self):
"""Update the timer to reflect a failed call"""
self.short_interval += self.short_unit
self.long_interval += self.long_unit
self.short_interval = min(self.short_interval, self.max_short_timer)
self.long_interval = min(self.long_interval, self.max_long_timer)
self.update_interval() | python | {
"resource": ""
} |
q272678 | Reader.close | test | def close(self):
"""
Closes all connections stops all periodic callbacks
"""
for conn in self.conns.values():
conn.close()
self.redist_periodic.stop()
if self.query_periodic is not None:
self.query_periodic.stop() | python | {
"resource": ""
} |
q272679 | Reader.is_starved | test | def is_starved(self):
"""
Used to identify when buffered messages should be processed and responded to.
When max_in_flight > 1 and you're batching messages together to perform work
is isn't possible to just compare the len of your list of buffered messages against
your configured max_in_flight (because max_in_flight may not be evenly divisible
by the number of producers you're connected to, ie. you might never get that many
messages... it's a *max*).
Example::
def message_handler(self, nsq_msg, reader):
# buffer messages
if reader.is_starved():
# perform work
reader = nsq.Reader(...)
reader.set_message_handler(functools.partial(message_handler, reader=reader))
nsq.run()
"""
for conn in itervalues(self.conns):
if conn.in_flight > 0 and conn.in_flight >= (conn.last_rdy * 0.85):
return True
return False | python | {
"resource": ""
} |
q272680 | Reader.connect_to_nsqd | test | def connect_to_nsqd(self, host, port):
"""
Adds a connection to ``nsqd`` at the specified address.
:param host: the address to connect to
:param port: the port to connect to
"""
assert isinstance(host, string_types)
assert isinstance(port, int)
conn = AsyncConn(host, port, **self.conn_kwargs)
conn.on('identify', self._on_connection_identify)
conn.on('identify_response', self._on_connection_identify_response)
conn.on('auth', self._on_connection_auth)
conn.on('auth_response', self._on_connection_auth_response)
conn.on('error', self._on_connection_error)
conn.on('close', self._on_connection_close)
conn.on('ready', self._on_connection_ready)
conn.on('message', self._on_message)
conn.on('heartbeat', self._on_heartbeat)
conn.on('backoff', functools.partial(self._on_backoff_resume, success=False))
conn.on('resume', functools.partial(self._on_backoff_resume, success=True))
conn.on('continue', functools.partial(self._on_backoff_resume, success=None))
if conn.id in self.conns:
return
# only attempt to re-connect once every 10s per destination
# this throttles reconnects to failed endpoints
now = time.time()
last_connect_attempt = self.connection_attempts.get(conn.id)
if last_connect_attempt and last_connect_attempt > now - 10:
return
self.connection_attempts[conn.id] = now
logger.info('[%s:%s] connecting to nsqd', conn.id, self.name)
conn.connect()
return conn | python | {
"resource": ""
} |
q272681 | Reader.query_lookupd | test | def query_lookupd(self):
"""
Trigger a query of the configured ``nsq_lookupd_http_addresses``.
"""
endpoint = self.lookupd_http_addresses[self.lookupd_query_index]
self.lookupd_query_index = (self.lookupd_query_index + 1) % len(self.lookupd_http_addresses)
# urlsplit() is faulty if scheme not present
if '://' not in endpoint:
endpoint = 'http://' + endpoint
scheme, netloc, path, query, fragment = urlparse.urlsplit(endpoint)
if not path or path == "/":
path = "/lookup"
params = parse_qs(query)
params['topic'] = self.topic
query = urlencode(_utf8_params(params), doseq=1)
lookupd_url = urlparse.urlunsplit((scheme, netloc, path, query, fragment))
req = tornado.httpclient.HTTPRequest(
lookupd_url, method='GET',
headers={'Accept': 'application/vnd.nsq; version=1.0'},
connect_timeout=self.lookupd_connect_timeout,
request_timeout=self.lookupd_request_timeout)
callback = functools.partial(self._finish_query_lookupd, lookupd_url=lookupd_url)
self.http_client.fetch(req, callback=callback) | python | {
"resource": ""
} |
q272682 | Reader.set_max_in_flight | test | def set_max_in_flight(self, max_in_flight):
"""Dynamically adjust the reader max_in_flight. Set to 0 to immediately disable a Reader"""
assert isinstance(max_in_flight, int)
self.max_in_flight = max_in_flight
if max_in_flight == 0:
# set RDY 0 to all connections
for conn in itervalues(self.conns):
if conn.rdy > 0:
logger.debug('[%s:%s] rdy: %d -> 0', conn.id, self.name, conn.rdy)
self._send_rdy(conn, 0)
self.total_rdy = 0
else:
self.need_rdy_redistributed = True
self._redistribute_rdy_state() | python | {
"resource": ""
} |
q272683 | Reader.giving_up | test | def giving_up(self, message):
"""
Called when a message has been received where ``msg.attempts > max_tries``
This is useful to subclass and override to perform a task (such as writing to disk, etc.)
:param message: the :class:`nsq.Message` received
"""
logger.warning('[%s] giving up on message %s after %d tries (max:%d) %r',
self.name, message.id, message.attempts, self.max_tries, message.body) | python | {
"resource": ""
} |
q272684 | EventedMixin.on | test | def on(self, name, callback):
"""
Listen for the named event with the specified callback.
:param name: the name of the event
:type name: string
:param callback: the callback to execute when the event is triggered
:type callback: callable
"""
assert callable(callback), 'callback is not callable'
if callback in self.__listeners[name]:
raise DuplicateListenerError
self.__listeners[name].append(callback) | python | {
"resource": ""
} |
q272685 | EventedMixin.off | test | def off(self, name, callback):
"""
Stop listening for the named event via the specified callback.
:param name: the name of the event
:type name: string
:param callback: the callback that was originally used
:type callback: callable
"""
if callback not in self.__listeners[name]:
raise InvalidListenerError
self.__listeners[name].remove(callback) | python | {
"resource": ""
} |
q272686 | EventedMixin.trigger | test | def trigger(self, name, *args, **kwargs):
"""
Execute the callbacks for the listeners on the specified event with the
supplied arguments.
All extra arguments are passed through to each callback.
:param name: the name of the event
:type name: string
"""
for ev in self.__listeners[name]:
ev(*args, **kwargs) | python | {
"resource": ""
} |
q272687 | Writer.pub | test | def pub(self, topic, msg, callback=None):
"""
publish a message to nsq
:param topic: nsq topic
:param msg: message body (bytes)
:param callback: function which takes (conn, data) (data may be nsq.Error)
"""
self._pub('pub', topic, msg, callback=callback) | python | {
"resource": ""
} |
q272688 | Learner.set_feature_transform | test | def set_feature_transform(self, mode='polynomial', degree=1):
'''
Transform data feature to high level
'''
if self.status != 'load_train_data':
print("Please load train data first.")
return self.train_X
self.feature_transform_mode = mode
self.feature_transform_degree = degree
self.train_X = self.train_X[:, 1:]
self.train_X = utility.DatasetLoader.feature_transform(
self.train_X,
self.feature_transform_mode,
self.feature_transform_degree
)
return self.train_X | python | {
"resource": ""
} |
q272689 | Learner.prediction | test | def prediction(self, input_data='', mode='test_data'):
'''
Make prediction
input test data
output the prediction
'''
prediction = {}
if (self.status != 'train'):
print("Please load train data and init W then train the W first.")
return prediction
if (input_data == ''):
print("Please input test data for prediction.")
return prediction
if mode == 'future_data':
data = input_data.split()
input_data_x = [float(v) for v in data]
input_data_x = utility.DatasetLoader.feature_transform(
np.array(input_data_x).reshape(1, -1),
self.feature_transform_mode,
self.feature_transform_degree
)
input_data_x = np.ravel(input_data_x)
prediction = self.score_function(input_data_x, self.W)
return {"input_data_x": input_data_x, "input_data_y": None, "prediction": prediction}
else:
data = input_data.split()
input_data_x = [float(v) for v in data[:-1]]
input_data_x = utility.DatasetLoader.feature_transform(
np.array(input_data_x).reshape(1, -1),
self.feature_transform_mode,
self.feature_transform_degree
)
input_data_x = np.ravel(input_data_x)
input_data_y = float(data[-1])
prediction = self.score_function(input_data_x, self.W)
return {"input_data_x": input_data_x, "input_data_y": input_data_y, "prediction": prediction} | python | {
"resource": ""
} |
q272690 | LogisticRegression.theta | test | def theta(self, s):
'''
Theta sigmoid function
'''
s = np.where(s < -709, -709, s)
return 1 / (1 + np.exp((-1) * s)) | python | {
"resource": ""
} |
q272691 | parse_log | test | def parse_log(log_file):
"""Retrieves some statistics from a single Trimmomatic log file.
This function parses Trimmomatic's log file and stores some trimming
statistics in an :py:class:`OrderedDict` object. This object contains
the following keys:
- ``clean_len``: Total length after trimming.
- ``total_trim``: Total trimmed base pairs.
- ``total_trim_perc``: Total trimmed base pairs in percentage.
- ``5trim``: Total base pairs trimmed at 5' end.
- ``3trim``: Total base pairs trimmed at 3' end.
Parameters
----------
log_file : str
Path to trimmomatic log file.
Returns
-------
x : :py:class:`OrderedDict`
Object storing the trimming statistics.
"""
template = OrderedDict([
# Total length after trimming
("clean_len", 0),
# Total trimmed base pairs
("total_trim", 0),
# Total trimmed base pairs in percentage
("total_trim_perc", 0),
# Total trimmed at 5' end
("5trim", 0),
# Total trimmed at 3' end
("3trim", 0),
# Bad reads (completely trimmed)
("bad_reads", 0)
])
with open(log_file) as fh:
for line in fh:
# This will split the log fields into:
# 0. read length after trimming
# 1. amount trimmed from the start
# 2. last surviving base
# 3. amount trimmed from the end
fields = [int(x) for x in line.strip().split()[-4:]]
if not fields[0]:
template["bad_reads"] += 1
template["5trim"] += fields[1]
template["3trim"] += fields[3]
template["total_trim"] += fields[1] + fields[3]
template["clean_len"] += fields[0]
total_len = template["clean_len"] + template["total_trim"]
if total_len:
template["total_trim_perc"] = round(
(template["total_trim"] / total_len) * 100, 2)
else:
template["total_trim_perc"] = 0
return template | python | {
"resource": ""
} |
q272692 | clean_up | test | def clean_up(fastq_pairs, clear):
"""Cleans the working directory of unwanted temporary files"""
# Find unpaired fastq files
unpaired_fastq = [f for f in os.listdir(".")
if f.endswith("_U.fastq.gz")]
# Remove unpaired fastq files, if any
for fpath in unpaired_fastq:
os.remove(fpath)
# Expected output to assess whether it is safe to remove temporary input
expected_out = [f for f in os.listdir(".") if f.endswith("_trim.fastq.gz")]
if clear == "true" and len(expected_out) == 2:
for fq in fastq_pairs:
# Get real path of fastq files, following symlinks
rp = os.path.realpath(fq)
logger.debug("Removing temporary fastq file path: {}".format(rp))
if re.match(".*/work/.{2}/.{30}/.*", rp):
os.remove(rp) | python | {
"resource": ""
} |
q272693 | merge_default_adapters | test | def merge_default_adapters():
"""Merges the default adapters file in the trimmomatic adapters directory
Returns
-------
str
Path with the merged adapters file.
"""
default_adapters = [os.path.join(ADAPTERS_PATH, x) for x in
os.listdir(ADAPTERS_PATH)]
filepath = os.path.join(os.getcwd(), "default_adapters.fasta")
with open(filepath, "w") as fh, \
fileinput.input(default_adapters) as in_fh:
for line in in_fh:
fh.write("{}{}".format(line, "\\n"))
return filepath | python | {
"resource": ""
} |
q272694 | main | test | def main(sample_id, fastq_pair, trim_range, trim_opts, phred, adapters_file,
clear):
""" Main executor of the trimmomatic template.
Parameters
----------
sample_id : str
Sample Identification string.
fastq_pair : list
Two element list containing the paired FastQ files.
trim_range : list
Two element list containing the trimming range.
trim_opts : list
Four element list containing several trimmomatic options:
[*SLIDINGWINDOW*; *LEADING*; *TRAILING*; *MINLEN*]
phred : int
Guessed phred score for the sample. The phred score is a generated
output from :py:class:`templates.integrity_coverage`.
adapters_file : str
Path to adapters file. If not provided, or the path is not available,
it will use the default adapters from Trimmomatic will be used
clear : str
Can be either 'true' or 'false'. If 'true', the input fastq files will
be removed at the end of the run, IF they are in the working directory
"""
logger.info("Starting trimmomatic")
# Create base CLI
cli = [
"java",
"-Xmx{}".format("$task.memory"[:-1].lower().replace(" ", "")),
"-jar",
TRIM_PATH.strip(),
"PE",
"-threads",
"$task.cpus"
]
# If the phred encoding was detected, provide it
try:
# Check if the provided PHRED can be converted to int
phred = int(phred)
phred_flag = "-phred{}".format(str(phred))
cli += [phred_flag]
# Could not detect phred encoding. Do not add explicit encoding to
# trimmomatic and let it guess
except ValueError:
pass
# Add input samples to CLI
cli += fastq_pair
# Add output file names
output_names = []
for i in range(len(fastq_pair)):
output_names.append("{}_{}_trim.fastq.gz".format(
SAMPLE_ID, str(i + 1)))
output_names.append("{}_{}_U.fastq.gz".format(
SAMPLE_ID, str(i + 1)))
cli += output_names
if trim_range != ["None"]:
cli += [
"CROP:{}".format(trim_range[1]),
"HEADCROP:{}".format(trim_range[0]),
]
if os.path.exists(adapters_file):
logger.debug("Using the provided adapters file '{}'".format(
adapters_file))
else:
logger.debug("Adapters file '{}' not provided or does not exist. Using"
" default adapters".format(adapters_file))
adapters_file = merge_default_adapters()
cli += [
"ILLUMINACLIP:{}:3:30:10:6:true".format(adapters_file)
]
#create log file im temporary dir to avoid issues when running on a docker container in macOS
logfile = os.path.join(tempfile.mkdtemp(prefix='tmp'), "{}_trimlog.txt".format(sample_id))
# Add trimmomatic options
cli += [
"SLIDINGWINDOW:{}".format(trim_opts[0]),
"LEADING:{}".format(trim_opts[1]),
"TRAILING:{}".format(trim_opts[2]),
"MINLEN:{}".format(trim_opts[3]),
"TOPHRED33",
"-trimlog",
logfile
]
logger.debug("Running trimmomatic subprocess with command: {}".format(cli))
p = subprocess.Popen(cli, stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate()
# Attempt to decode STDERR output from bytes. If unsuccessful, coerce to
# string
try:
stderr = stderr.decode("utf8")
except (UnicodeDecodeError, AttributeError):
stderr = str(stderr)
logger.info("Finished trimmomatic subprocess with STDOUT:\\n"
"======================================\\n{}".format(stdout))
logger.info("Finished trimmomatic subprocesswith STDERR:\\n"
"======================================\\n{}".format(stderr))
logger.info("Finished trimmomatic with return code: {}".format(
p.returncode))
trimmomatic_log(logfile, sample_id)
if p.returncode == 0 and os.path.exists("{}_1_trim.fastq.gz".format(
SAMPLE_ID)):
clean_up(fastq_pair, clear)
# Check if trimmomatic ran successfully. If not, write the error message
# to the status channel and exit.
with open(".status", "w") as status_fh:
if p.returncode != 0:
status_fh.write("fail")
return
else:
status_fh.write("pass") | python | {
"resource": ""
} |
q272695 | depth_file_reader | test | def depth_file_reader(depth_file):
"""
Function that parse samtools depth file and creates 3 dictionaries that
will be useful to make the outputs of this script, both the tabular file
and the json file that may be imported by pATLAS
Parameters
----------
depth_file: textIO
the path to depth file for each sample
Returns
-------
depth_dic_coverage: dict
dictionary with the coverage per position for each plasmid
"""
# dict to store the mean coverage for each reference
depth_dic_coverage = {}
for line in depth_file:
tab_split = line.split() # split by any white space
reference = "_".join(tab_split[0].strip().split("_")[0:3]) # store
# only the gi for the reference
position = tab_split[1]
num_reads_align = float(tab_split[2].rstrip())
if reference not in depth_dic_coverage:
depth_dic_coverage[reference] = {}
depth_dic_coverage[reference][position] = num_reads_align
logger.info("Finished parsing depth file.")
depth_file.close()
logger.debug("Size of dict_cov: {} kb".format(
asizeof(depth_dic_coverage)/1024))
return depth_dic_coverage | python | {
"resource": ""
} |
q272696 | main | test | def main(depth_file, json_dict, cutoff, sample_id):
"""
Function that handles the inputs required to parse depth files from bowtie
and dumps a dict to a json file that can be imported into pATLAS.
Parameters
----------
depth_file: str
the path to depth file for each sample
json_dict: str
the file that contains the dictionary with keys and values for
accessions
and their respective lengths
cutoff: str
the cutoff used to trim the unwanted matches for the minimum coverage
results from mapping. This value may range between 0 and 1.
sample_id: str
the id of the sample being parsed
"""
# check for the appropriate value for the cutoff value for coverage results
logger.debug("Cutoff value: {}. Type: {}".format(cutoff, type(cutoff)))
try:
cutoff_val = float(cutoff)
if cutoff_val < 0.4:
logger.warning("This cutoff value will generate a high volume of "
"plot data. Therefore '.report.json' can be too big")
except ValueError:
logger.error("Cutoff value should be a string such as: '0.6'. "
"The outputted value: {}. Make sure to provide an "
"appropriate value for --cov_cutoff".format(cutoff))
sys.exit(1)
# loads dict from file, this file is provided in docker image
plasmid_length = json.load(open(json_dict))
if plasmid_length:
logger.info("Loaded dictionary of plasmid lengths")
else:
logger.error("Something went wrong and plasmid lengths dictionary"
"could not be loaded. Check if process received this"
"param successfully.")
sys.exit(1)
# read depth file
depth_file_in = open(depth_file)
# first reads the depth file and generates dictionaries to handle the input
# to a simpler format
logger.info("Reading depth file and creating dictionary to dump.")
depth_dic_coverage = depth_file_reader(depth_file_in)
percentage_bases_covered, dict_cov = generate_jsons(depth_dic_coverage,
plasmid_length,
cutoff_val)
if percentage_bases_covered and dict_cov:
logger.info("percentage_bases_covered length: {}".format(
str(len(percentage_bases_covered))))
logger.info("dict_cov length: {}".format(str(len(dict_cov))))
else:
logger.error("Both dicts that dump to JSON file or .report.json are "
"empty.")
# then dump do file
logger.info("Dumping to {}".format("{}_mapping.json".format(depth_file)))
with open("{}_mapping.json".format(depth_file), "w") as output_json:
output_json.write(json.dumps(percentage_bases_covered))
json_dic = {
"tableRow": [{
"sample": sample_id,
"data": [{
"header": "Mapping",
"table": "plasmids",
"patlas_mapping": percentage_bases_covered,
"value": len(percentage_bases_covered)
}]
}],
"sample": sample_id,
"patlas_mapping": percentage_bases_covered,
"plotData": [{
"sample": sample_id,
"data": {
"patlasMappingSliding": dict_cov
},
}]
}
logger.debug("Size of dict_cov: {} kb".format(asizeof(json_dic)/1024))
logger.info("Writing to .report.json")
with open(".report.json", "w") as json_report:
json_report.write(json.dumps(json_dic, separators=(",", ":"))) | python | {
"resource": ""
} |
q272697 | Process._set_template | test | def _set_template(self, template):
"""Sets the path to the appropriate jinja template file
When a Process instance is initialized, this method will fetch
the location of the appropriate template file, based on the
``template`` argument. It will raise an exception is the template
file is not found. Otherwise, it will set the
:py:attr:`Process.template_path` attribute.
"""
# Set template directory
tpl_dir = join(dirname(abspath(__file__)), "templates")
# Set template file path
tpl_path = join(tpl_dir, template + ".nf")
if not os.path.exists(tpl_path):
raise eh.ProcessError(
"Template {} does not exist".format(tpl_path))
self._template_path = join(tpl_dir, template + ".nf") | python | {
"resource": ""
} |
q272698 | Process.set_main_channel_names | test | def set_main_channel_names(self, input_suffix, output_suffix, lane):
"""Sets the main channel names based on the provide input and
output channel suffixes. This is performed when connecting processes.
Parameters
----------
input_suffix : str
Suffix added to the input channel. Should be based on the lane
and an arbitrary unique id
output_suffix : str
Suffix added to the output channel. Should be based on the lane
and an arbitrary unique id
lane : int
Sets the lane of the process.
"""
self.input_channel = "{}_in_{}".format(self.template, input_suffix)
self.output_channel = "{}_out_{}".format(self.template, output_suffix)
self.lane = lane | python | {
"resource": ""
} |
q272699 | Process.get_user_channel | test | def get_user_channel(self, input_channel, input_type=None):
"""Returns the main raw channel for the process
Provided with at least a channel name, this method returns the raw
channel name and specification (the nextflow string definition)
for the process. By default, it will fork from the raw input of
the process' :attr:`~Process.input_type` attribute. However, this
behaviour can be overridden by providing the ``input_type`` argument.
If the specified or inferred input type exists in the
:attr:`~Process.RAW_MAPPING` dictionary, the channel info dictionary
will be retrieved along with the specified input channel. Otherwise,
it will return None.
An example of the returned dictionary is::
{"input_channel": "myChannel",
"params": "fastq",
"channel": "IN_fastq_raw",
"channel_str":"IN_fastq_raw = Channel.fromFilePairs(params.fastq)"
}
Returns
-------
dict or None
Dictionary with the complete raw channel info. None if no
channel is found.
"""
res = {"input_channel": input_channel}
itype = input_type if input_type else self.input_type
if itype in self.RAW_MAPPING:
channel_info = self.RAW_MAPPING[itype]
return {**res, **channel_info} | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.