_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3
values | text stringlengths 75 19.8k | language stringclasses 1
value | meta_information dict |
|---|---|---|---|---|---|
q272500 | delete | test | def delete(self, name):
'''delete an image from Google Storage.
Parameters
==========
name: the name of the file (or image) to delete
'''
bot.debug("DELETE %s" % name)
for file_object in files:
if isinstance(file_object, dict):
if "kind" in file_object:
if file_object['kind'] == "storage#object":
object_name = "/".join(file_object['id'].split('/')[:-1])
object_name = re.sub('%s/' %self._bucket['name'],'', object_name,1)
delete_object(service=self._bucket_service,
bucket_name=bucket['name'],
object_name=object_name) | python | {
"resource": ""
} |
q272501 | destroy | test | def destroy(self, name):
'''destroy an instance, meaning take down the instance and stop the build.
Parameters
==========
name: the name of the instance to stop building.
'''
instances = self._get_instances()
project = self._get_project()
zone = self._get_zone()
found = False
if 'items' in instances:
for instance in instances['items']:
if instance['name'] == name:
found = True
break
if found:
bot.info('Killing instance %s' %name)
return self._compute_service.instances().delete(project=project,
zone=zone,
instance=name).execute() | python | {
"resource": ""
} |
q272502 | get_subparsers | test | def get_subparsers(parser):
'''get_subparser will get a dictionary of subparsers, to help with printing help
'''
actions = [action for action in parser._actions
if isinstance(action, argparse._SubParsersAction)]
subparsers = dict()
for action in actions:
# get all subparsers and print help
for choice, subparser in action.choices.items():
subparsers[choice] = subparser
return subparsers | python | {
"resource": ""
} |
q272503 | RobotNamer.generate | test | def generate(self, delim='-', length=4, chars='0123456789'):
'''
Generate a robot name. Inspiration from Haikunator, but much more
poorly implemented ;)
Parameters
==========
delim: Delimiter
length: TokenLength
chars: TokenChars
'''
descriptor = self._select(self._descriptors)
noun = self._select(self._nouns)
numbers = ''.join((self._select(chars) for _ in range(length)))
return delim.join([descriptor, noun, numbers]) | python | {
"resource": ""
} |
q272504 | get_tmpdir | test | def get_tmpdir(requested_tmpdir=None, prefix="", create=True):
'''get a temporary directory for an operation. If SREGISTRY_TMPDIR
is set, return that. Otherwise, return the output of tempfile.mkdtemp
Parameters
==========
requested_tmpdir: an optional requested temporary directory, first
priority as is coming from calling function.
prefix: Given a need for a sandbox (or similar), we will need to
create a subfolder *within* the SREGISTRY_TMPDIR.
create: boolean to determine if we should create folder (True)
'''
from sregistry.defaults import SREGISTRY_TMPDIR
# First priority for the base goes to the user requested.
tmpdir = requested_tmpdir or SREGISTRY_TMPDIR
prefix = prefix or "sregistry-tmp"
prefix = "%s.%s" %(prefix, next(tempfile._get_candidate_names()))
tmpdir = os.path.join(tmpdir, prefix)
if not os.path.exists(tmpdir) and create is True:
os.mkdir(tmpdir)
return tmpdir | python | {
"resource": ""
} |
q272505 | extract_tar | test | def extract_tar(archive, output_folder, handle_whiteout=False):
'''extract a tar archive to a specified output folder
Parameters
==========
archive: the archive file to extract
output_folder: the output folder to extract to
handle_whiteout: use docker2oci variation to handle whiteout files
'''
from .terminal import run_command
# Do we want to remove whiteout files?
if handle_whiteout is True:
return _extract_tar(archive, output_folder)
# If extension is .tar.gz, use -xzf
args = '-xf'
if archive.endswith(".tar.gz"):
args = '-xzf'
# Just use command line, more succinct.
command = ["tar", args, archive, "-C", output_folder, "--exclude=dev/*"]
if not bot.is_quiet():
print("Extracting %s" % archive)
return run_command(command) | python | {
"resource": ""
} |
q272506 | _extract_tar | test | def _extract_tar(archive, output_folder):
'''use blob2oci to handle whiteout files for extraction. Credit for this
script goes to docker2oci by Olivier Freyermouth, and see script
folder for license.
Parameters
==========
archive: the archive to extract
output_folder the output folder (sandbox) to extract to
'''
from .terminal import ( run_command, which )
result = which('blob2oci')
if result['return_code'] != 0:
bot.error('Cannot find blob2oci script on path, exiting.')
sys.exit(1)
script = result['message']
command = ['exec' ,script, '--layer', archive, '--extract', output_folder]
if not bot.is_quiet():
print("Extracting %s" % archive)
return run_command(command) | python | {
"resource": ""
} |
q272507 | get_file_hash | test | def get_file_hash(filename):
'''find the SHA256 hash string of a file
'''
hasher = hashlib.sha256()
with open(filename, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hasher.update(chunk)
return hasher.hexdigest() | python | {
"resource": ""
} |
q272508 | read_file | test | def read_file(filename, mode="r", readlines=True):
'''write_file will open a file, "filename" and write content, "content"
and properly close the file
'''
with open(filename, mode) as filey:
if readlines is True:
content = filey.readlines()
else:
content = filey.read()
return content | python | {
"resource": ""
} |
q272509 | read_json | test | def read_json(filename, mode='r'):
'''read_json reads in a json file and returns
the data structure as dict.
'''
with open(filename, mode) as filey:
data = json.load(filey)
return data | python | {
"resource": ""
} |
q272510 | clean_up | test | def clean_up(files):
'''clean up will delete a list of files, only if they exist
'''
if not isinstance(files, list):
files = [files]
for f in files:
if os.path.exists(f):
bot.verbose3("Cleaning up %s" % f)
os.remove(f) | python | {
"resource": ""
} |
q272511 | push | test | def push(self, path, name, tag=None):
'''push an image to an S3 endpoint'''
path = os.path.abspath(path)
image = os.path.basename(path)
bot.debug("PUSH %s" % path)
if not os.path.exists(path):
bot.error('%s does not exist.' %path)
sys.exit(1)
# Extract the metadata
names = parse_image_name(remove_uri(name), tag=tag)
image_size = os.path.getsize(path) >> 20
# Create extra metadata, this is how we identify the image later
# *important* bug in boto3 will return these capitalized
# see https://github.com/boto/boto3/issues/1709
metadata = {'sizemb': "%s" % image_size,
'client': 'sregistry' }
self.bucket.upload_file(path, names['storage_uri'], {"Metadata": metadata }) | python | {
"resource": ""
} |
q272512 | get_or_create_collection | test | def get_or_create_collection(self, name):
'''get a collection if it exists. If it doesn't exist, create it first.
Parameters
==========
name: the collection name, usually parsed from get_image_names()['name']
'''
from sregistry.database.models import Collection
collection = self.get_collection(name)
# If it doesn't exist, create it
if collection is None:
collection = Collection(name=name)
self.session.add(collection)
self.session.commit()
return collection | python | {
"resource": ""
} |
q272513 | get_collection | test | def get_collection(self, name):
'''get a collection, if it exists, otherwise return None.
'''
from sregistry.database.models import Collection
return Collection.query.filter(Collection.name == name).first() | python | {
"resource": ""
} |
q272514 | get_container | test | def get_container(self, name, collection_id, tag="latest", version=None):
'''get a container, otherwise return None.
'''
from sregistry.database.models import Container
if version is None:
container = Container.query.filter_by(collection_id = collection_id,
name = name,
tag = tag).first()
else:
container = Container.query.filter_by(collection_id = collection_id,
name = name,
tag = tag,
version = version).first()
return container | python | {
"resource": ""
} |
q272515 | images | test | def images(self, query=None):
'''List local images in the database, optionally with a query.
Paramters
=========
query: a string to search for in the container or collection name|tag|uri
'''
from sregistry.database.models import Collection, Container
rows = []
if query is not None:
like = "%" + query + "%"
containers = Container.query.filter(or_(Container.name == query,
Container.tag.like(like),
Container.uri.like(like),
Container.name.like(like))).all()
else:
containers = Container.query.all()
if len(containers) > 0:
message = " [date] [client]\t[uri]"
bot.custom(prefix='Containers:', message=message, color="RED")
for c in containers:
uri = c.get_uri()
created_at = c.created_at.strftime('%B %d, %Y')
rows.append([created_at, " [%s]" %c.client, uri])
bot.table(rows)
return containers | python | {
"resource": ""
} |
q272516 | inspect | test | def inspect(self, name):
'''Inspect a local image in the database, which typically includes the
basic fields in the model.
'''
print(name)
container = self.get(name)
if container is not None:
collection = container.collection.name
fields = container.__dict__.copy()
fields['collection'] = collection
fields['metrics'] = json.loads(fields['metrics'])
del fields['_sa_instance_state']
fields['created_at'] = str(fields['created_at'])
print(json.dumps(fields, indent=4, sort_keys=True))
return fields | python | {
"resource": ""
} |
q272517 | rename | test | def rename(self, image_name, path):
'''rename performs a move, but ensures the path is maintained in storage
Parameters
==========
image_name: the image name (uri) to rename to.
path: the name to rename (basename is taken)
'''
container = self.get(image_name, quiet=True)
if container is not None:
if container.image is not None:
# The original directory for the container stays the same
dirname = os.path.dirname(container.image)
# But we derive a new filename and uri
names = parse_image_name( remove_uri (path) )
storage = os.path.join( self.storage,
os.path.dirname(names['storage']) )
# This is the collection folder
if not os.path.exists(storage):
os.mkdir(storage)
# Here we get the new full path, rename the container file
fullpath = os.path.abspath(os.path.join(dirname, names['storage']))
container = self.cp(move_to=fullpath,
container=container,
command="rename")
# On successful rename of file, update the uri
if container is not None:
container.uri = names['uri']
self.session.commit()
return container
bot.warning('%s not found' %(image_name)) | python | {
"resource": ""
} |
q272518 | mv | test | def mv(self, image_name, path):
'''Move an image from it's current location to a new path.
Removing the image from organized storage is not the recommended approach
however is still a function wanted by some.
Parameters
==========
image_name: the parsed image name.
path: the location to move the image to
'''
container = self.get(image_name, quiet=True)
if container is not None:
name = container.uri or container.get_uri()
image = container.image or ''
# Only continue if image file exists
if os.path.exists(image):
# Default assume directory, use image name and path fully
filename = os.path.basename(image)
filedir = os.path.abspath(path)
# If it's a file, use filename provided
if not os.path.isdir(path):
filename = os.path.basename(path)
filedir = os.path.dirname(path)
# If directory is empty, assume $PWD
if filedir == '':
filedir = os.getcwd()
# Copy to the fullpath from the storage
fullpath = os.path.abspath(os.path.join(filedir,filename))
return self.cp(move_to=fullpath,
container=container,
command="move")
bot.warning('%s not found' %(image_name)) | python | {
"resource": ""
} |
q272519 | rmi | test | def rmi(self, image_name):
'''Remove an image from the database and filesystem.
'''
container = self.rm(image_name, delete=True)
if container is not None:
bot.info("[rmi] %s" % container) | python | {
"resource": ""
} |
q272520 | add | test | def add(self, image_path=None,
image_uri=None,
image_name=None,
url=None,
metadata=None,
save=True,
copy=False):
'''get or create a container, including the collection to add it to.
This function can be used from a file on the local system, or via a URL
that has been downloaded. Either way, if one of url, version, or image_file
is not provided, the model is created without it. If a version is not
provided but a file path is, then the file hash is used.
Parameters
==========
image_path: full path to image file
image_name: if defined, the user wants a custom name (and not based on uri)
metadata: any extra metadata to keep for the image (dict)
save: if True, move the image to the cache if it's not there
copy: If True, copy the image instead of moving it.
image_name: a uri that gets parsed into a names object that looks like:
{'collection': 'vsoch',
'image': 'hello-world',
'storage': 'vsoch/hello-world-latest.img',
'tag': 'latest',
'version': '12345'
'uri': 'vsoch/hello-world:latest@12345'}
After running add, the user will take some image in a working
directory, add it to the database, and have it available for search
and use under SREGISTRY_STORAGE/<collection>/<container>
If the container was retrieved from a webby place, it should have version
If no version is found, the file hash is used.
'''
from sregistry.database.models import (
Container,
Collection
)
# We can only save if the image is provided
if image_path is not None:
if not os.path.exists(image_path) and save is True:
bot.error('Cannot find %s' %image_path)
sys.exit(1)
# An image uri is required for version, tag, etc.
if image_uri is None:
bot.error('You must provide an image uri <collection>/<namespace>')
sys.exit(1)
names = parse_image_name( remove_uri(image_uri) )
bot.debug('Adding %s to registry' % names['uri'])
# If Singularity is installed, inspect image for metadata
metadata = self.get_metadata(image_path, names=names)
collection = self.get_or_create_collection(names['collection'])
# Get a hash of the file for the version, or use provided
version = names.get('version')
if version == None:
if image_path != None:
version = get_image_hash(image_path)
else:
version = '' # we can't determine a version, not in API/no file
names = parse_image_name( remove_uri(image_uri), version=version )
# If save, move to registry storage first
if save is True and image_path is not None:
# If the user hasn't defined a custom name
if image_name is None:
image_name = self._get_storage_name(names)
if copy is True:
copyfile(image_path, image_name)
else:
shutil.move(image_path, image_name)
image_path = image_name
# Just in case the client didn't provide it, see if we have in metadata
if url is None and "url" in metadata:
url = metadata['url']
# First check that we don't have one already!
container = self.get_container(name=names['image'],
collection_id=collection.id,
tag=names['tag'],
version=version)
# The container did not exist, create it
if container is None:
action = "new"
container = Container(metrics=json.dumps(metadata),
name=names['image'],
image=image_path,
client=self.client_name,
tag=names['tag'],
version=version,
url=url,
uri=names['uri'],
collection_id=collection.id)
self.session.add(container)
collection.containers.append(container)
# The container existed, update it.
else:
action="update"
metrics=json.loads(container.metrics)
metrics.update(metadata)
container.url= url
container.client=self.client_name
if image_path is not None:
container.image=image_path
container.metrics=json.dumps(metrics)
self.session.commit()
bot.info("[container][%s] %s" % (action,names['uri']))
return container | python | {
"resource": ""
} |
q272521 | push | test | def push(self, path, name, tag=None):
'''push an image to Singularity Registry'''
path = os.path.abspath(path)
image = os.path.basename(path)
bot.debug("PUSH %s" % path)
if not os.path.exists(path):
bot.error('%s does not exist.' %path)
sys.exit(1)
# Interaction with a registry requires secrets
self.require_secrets()
# Extract the metadata
names = parse_image_name(remove_uri(name), tag=tag)
image_size = os.path.getsize(path) >> 20
# COLLECTION ###################################################################
# If the registry is provided in the uri, use it
if names['registry'] == None:
names['registry'] = self.base
# If the base doesn't start with http or https, add it
names = self._add_https(names)
# Prepare push request, this will return a collection ID if permission
url = '%s/push/' % names['registry']
auth_url = '%s/upload/chunked_upload' % names['registry']
SREGISTRY_EVENT = self.authorize(request_type="push",
names=names)
# Data fields for collection
fields = { 'collection': names['collection'],
'name':names['image'],
'tag': names['tag']}
headers = { 'Authorization': SREGISTRY_EVENT }
r = requests.post(auth_url, json=fields, headers=headers)
# Always tell the user what's going on!
message = self._read_response(r)
print('\n[1. Collection return status {0} {1}]'.format(r.status_code, message))
# Get the collection id, if created, and continue with upload
if r.status_code != 200:
sys.exit(1)
# UPLOAD #######################################################################
url = '%s/upload' % names['registry'].replace('/api','')
bot.debug('Seting upload URL to {0}'.format(url))
cid = r.json()['cid']
upload_to = os.path.basename(names['storage'])
SREGISTRY_EVENT = self.authorize(request_type="upload",
names=names)
encoder = MultipartEncoder(fields={'SREGISTRY_EVENT': SREGISTRY_EVENT,
'name': names['image'],
'collection': str(cid),
'tag': names['tag'],
'file1': (upload_to, open(path, 'rb'), 'text/plain')})
progress_callback = create_callback(encoder, self.quiet)
monitor = MultipartEncoderMonitor(encoder, progress_callback)
headers = {'Content-Type': monitor.content_type,
'Authorization': SREGISTRY_EVENT }
try:
r = requests.post(url, data=monitor, headers=headers)
r.raise_for_status()
message = r.json()['message']
print('\n[Return status {0} {1}]'.format(r.status_code, message))
except requests.HTTPError as e:
print('\nUpload failed: {0}.'.format(e))
except KeyboardInterrupt:
print('\nUpload cancelled.')
except Exception as e:
print(e) | python | {
"resource": ""
} |
q272522 | parse_header | test | def parse_header(recipe, header="from", remove_header=True):
'''take a recipe, and return the complete header, line. If
remove_header is True, only return the value.
Parameters
==========
recipe: the recipe file
headers: the header key to find and parse
remove_header: if true, remove the key
'''
parsed_header = None
fromline = [x for x in recipe.split('\n') if "%s:" %header in x.lower()]
# Case 1: We did not find the fromline
if len(fromline) == 0:
return ""
# Case 2: We found it!
if len(fromline) > 0:
fromline = fromline[0]
parsed_header = fromline.strip()
# Does the user want to clean it up?
if remove_header is True:
parsed_header = fromline.split(':', 1)[-1].strip()
return parsed_header | python | {
"resource": ""
} |
q272523 | find_single_recipe | test | def find_single_recipe(filename, pattern="Singularity", manifest=None):
'''find_single_recipe will parse a single file, and if valid,
return an updated manifest
Parameters
==========
filename: the filename to assess for a recipe
pattern: a default pattern to search for
manifest: an already started manifest
'''
if pattern is None:
pattern = "Singularity*"
recipe = None
file_basename = os.path.basename(filename)
if fnmatch.fnmatch(file_basename, pattern):
recipe = {'path': os.path.abspath(filename),
'modified':os.path.getmtime(filename)}
# If we already have the recipe, only add if more recent
if manifest is not None and recipe is not None:
container_uri = '/'.join(filename.split('/')[-2:])
if container_uri in manifest:
if manifest[container_uri]['modified'] < os.path.getmtime(filename):
manifest[container_uri] = recipe
else:
manifest[container_uri] = recipe
return manifest
return recipe | python | {
"resource": ""
} |
q272524 | create_build_package | test | def create_build_package(package_files):
'''given a list of files, copy them to a temporary folder,
compress into a .tar.gz, and rename based on the file hash.
Return the full path to the .tar.gz in the temporary folder.
Parameters
==========
package_files: a list of files to include in the tar.gz
'''
# Ensure package files all exist
for package_file in package_files:
if not os.path.exists(package_file):
bot.exit('Cannot find %s.' % package_file)
bot.log('Generating build package for %s files...' % len(package_files))
build_dir = get_tmpdir(prefix="sregistry-build")
build_tar = '%s/build.tar.gz' % build_dir
tar = tarfile.open(build_tar, "w:gz")
# Create the tar.gz
for package_file in package_files:
tar.add(package_file)
tar.close()
# Get hash (sha256), and rename file
sha256 = get_file_hash(build_tar)
hash_tar = "%s/%s.tar.gz" %(build_dir, sha256)
shutil.move(build_tar, hash_tar)
return hash_tar | python | {
"resource": ""
} |
q272525 | run_build | test | def run_build(self, config, bucket, names):
'''run a build, meaning creating a build. Retry if there is failure
'''
project = self._get_project()
# prefix, message, color
bot.custom('PROJECT', project, "CYAN")
bot.custom('BUILD ', config['steps'][0]['name'], "CYAN")
response = self._build_service.projects().builds().create(body=config,
projectId=project).execute()
build_id = response['metadata']['build']['id']
status = response['metadata']['build']['status']
bot.log("build %s: %s" % (build_id, status))
start = time.time()
while status not in ['COMPLETE', 'FAILURE', 'SUCCESS']:
time.sleep(15)
response = self._build_service.projects().builds().get(id=build_id,
projectId=project).execute()
build_id = response['id']
status = response['status']
bot.log("build %s: %s" % (build_id, status))
end = time.time()
bot.log('Total build time: %s seconds' % (round(end - start, 2)))
# If successful, update blob metadata and visibility
if status == 'SUCCESS':
# Does the user want to keep the container private?
env = 'SREGISTRY_GOOGLE_STORAGE_PRIVATE'
blob = bucket.blob(response['artifacts']['objects']['paths'][0])
# Make Public, if desired
if self._get_and_update_setting(env) == None:
blob.make_public()
response['public_url'] = blob.public_url
# Add the metadata directly to the object
update_blob_metadata(blob, response, config, bucket, names)
response['media_link'] = blob.media_link
response['size'] = blob.size
response['file_hash'] = blob.md5_hash
return response | python | {
"resource": ""
} |
q272526 | update_blob_metadata | test | def update_blob_metadata(blob, response, config, bucket, names):
'''a specific function to take a blob, along with a SUCCESS response
from Google build, the original config, and update the blob
metadata with the artifact file name, dependencies, and image hash.
'''
manifest = os.path.basename(response['results']['artifactManifest'])
manifest = json.loads(bucket.blob(manifest).download_as_string())
metadata = {'file_hash': manifest['file_hash'][0]['file_hash'][0]['value'],
'artifactManifest': response['results']['artifactManifest'],
'location': manifest['location'],
'storageSourceBucket': config['source']['storageSource']['bucket'],
'storageSourceObject': config['source']['storageSource']['object'],
'buildCommand': ' '.join(config['steps'][0]['args']),
'builder': config['steps'][0]['name'],
'media_link': blob.media_link,
'self_link': blob.self_link,
'size': blob.size,
'name': names['tag_uri'],
'type': "container"} # identifier that the blob is a container
blob.metadata = metadata
blob._properties['metadata'] = metadata
blob.patch() | python | {
"resource": ""
} |
q272527 | format_container_name | test | def format_container_name(name, special_characters=None):
'''format_container_name will take a name supplied by the user,
remove all special characters (except for those defined by "special-characters"
and return the new image name.
'''
if special_characters is None:
special_characters = []
return ''.join(e.lower()
for e in name if e.isalnum() or e in special_characters) | python | {
"resource": ""
} |
q272528 | SRegistryMessage.useColor | test | def useColor(self):
'''useColor will determine if color should be added
to a print. Will check if being run in a terminal, and
if has support for asci'''
COLORIZE = get_user_color_preference()
if COLORIZE is not None:
return COLORIZE
streams = [self.errorStream, self.outputStream]
for stream in streams:
if not hasattr(stream, 'isatty'):
return False
if not stream.isatty():
return False
return True | python | {
"resource": ""
} |
q272529 | SRegistryMessage.emitError | test | def emitError(self, level):
'''determine if a level should print to
stderr, includes all levels but INFO and QUIET'''
if level in [ABORT,
ERROR,
WARNING,
VERBOSE,
VERBOSE1,
VERBOSE2,
VERBOSE3,
DEBUG]:
return True
return False | python | {
"resource": ""
} |
q272530 | SRegistryMessage.write | test | def write(self, stream, message):
'''write will write a message to a stream,
first checking the encoding
'''
if isinstance(message, bytes):
message = message.decode('utf-8')
stream.write(message) | python | {
"resource": ""
} |
q272531 | SRegistryMessage.table | test | def table(self, rows, col_width=2):
'''table will print a table of entries. If the rows is
a dictionary, the keys are interpreted as column names. if
not, a numbered list is used.
'''
labels = [str(x) for x in range(1,len(rows)+1)]
if isinstance(rows, dict):
labels = list(rows.keys())
rows = list(rows.values())
for row in rows:
label = labels.pop(0)
label = label.ljust(col_width)
message = "\t".join(row)
self.custom(prefix=label,
message=message) | python | {
"resource": ""
} |
q272532 | push | test | def push(self, path, name, tag=None):
'''push an image to Globus endpoint. In this case, the name is the
globus endpoint id and path.
--name <endpointid>:/path/for/image
'''
# Split the name into endpoint and rest
endpoint, remote = self._parse_endpoint_name(name)
path = os.path.abspath(path)
image = os.path.basename(path)
bot.debug("PUSH %s" % path)
# Flatten image uri into image name
q = parse_image_name(image)
if not os.path.exists(path):
bot.error('%s does not exist.' %path)
sys.exit(1)
# Ensure we have a transfer client
if not hasattr(self, 'transfer_client'):
self._init_transfer_client()
# The user must have a personal endpoint
endpoints = self._get_endpoints()
if len(endpoints['my-endpoints']) == 0:
bot.error('You must have a personal endpoint to transfer the container')
sys.exit(1)
# Take the first endpoint that is active
source_endpoint = None
for eid,contender in endpoints['my-endpoints'].items():
if contender['gcp_connected'] is True:
source_endpoint = contender
break
# Exit if none are active, required!
if source_endpoint is None:
bot.error('No activated local endpoints online! Go online to transfer')
sys.exit(1)
# The destination endpoint should have an .singularity/shub folder set
self._create_endpoint_cache(endpoint)
# SREGISTRY_STORAGE must be an endpoint
# if the image isn't already there, add it first
added = self.add(image_path=path,
image_uri=q['uri'],
copy=True)
label = "Singularity Registry Transfer for %s" %added.name
tdata = globus_sdk.TransferData(self.transfer_client,
source_endpoint['id'],
endpoint,
label=label,
sync_level="checksum")
image = ".singularity/shub/%s" %image
tdata.add_item(added.image, image)
bot.info('Requesting transfer from local %s to %s:%s' %(SREGISTRY_STORAGE,
endpoint, image))
transfer_result = self.transfer_client.submit_transfer(tdata)
bot.info(transfer_result['message'])
return transfer_result | python | {
"resource": ""
} |
q272533 | get_template | test | def get_template(name):
'''return a default template for some function in sregistry
If there is no template, None is returned.
Parameters
==========
name: the name of the template to retrieve
'''
name = name.lower()
templates = dict()
templates['tarinfo'] = {"gid": 0,
"uid": 0,
"uname": "root",
"gname": "root",
"mode": 493}
if name in templates:
bot.debug("Found template for %s" % (name))
return templates[name]
else:
bot.warning("Cannot find template %s" % (name)) | python | {
"resource": ""
} |
q272534 | get_manifest | test | def get_manifest(self, repo_name, tag):
'''return the image manifest via the aws client, saved in self.manifest
'''
image = None
repo = self.aws.describe_images(repositoryName=repo_name)
if 'imageDetails' in repo:
for contender in repo.get('imageDetails'):
if tag in contender['imageTags']:
image = contender
break
# if the image isn't found, we need to exit
if image is None:
bot.exit('Cannot find %s:%s, is the uri correct?' %(repo_name, digest))
digest = image['imageDigest']
digests = self.aws.batch_get_image(repositoryName=repo_name,
imageIds=[{"imageDigest": digest,
"imageTag": tag}])
self.manifest = json.loads(digests['images'][0]['imageManifest'])
return self.manifest | python | {
"resource": ""
} |
q272535 | get_build_template | test | def get_build_template(name=None, manager='apt'):
'''get a particular build template, by default we return templates
that are based on package managers.
Parameters
==========
name: the full path of the template file to use.
manager: the package manager to use in the template (yum or apt)
'''
base = get_installdir()
if name is None:
name = "%s/main/templates/build/singularity-builder-%s.sh" %(base,
manager)
if os.path.exists(name):
bot.debug("Found template %s" %name)
return ''.join(read_file(name))
bot.warning("Template %s not found." %name) | python | {
"resource": ""
} |
q272536 | Client._update_secrets | test | def _update_secrets(self):
'''update secrets will take a secrets credential file
either located at .sregistry or the environment variable
SREGISTRY_CLIENT_SECRETS and update the current client
secrets as well as the associated API base. This is where you
should do any customization of the secrets flie, or using
it to update your client, if needed.
'''
# Get a setting for client myclient and some variable name VAR.
# returns None if not set
setting = self._get_setting('SREGISTRY_MYCLIENT_VAR')
# Get (and if found in environment (1) settings (2) update the variable
# It will still return None if not set
setting = self._get_and_update_setting('SREGISTRY_MYCLIENT_VAR')
# If you have a setting that is required and not found, you should exit.
# Here is how to read all client secrets
self.secrets = read_client_secrets()
# If you don't want to use the shared settings file, you have your own.
# Here is how to get if the user has a cache for you enabled, this
# returns a path (enabled) or None (disabled) that you should honor
# You can use this as a file path or folder and for both cases, you
# need to create the file or folder
if self._credential_cache is not None:
bot.info("credential cache set to %s" %self._credential_cache) | python | {
"resource": ""
} |
q272537 | _make_repr | test | def _make_repr(class_name, *args, **kwargs):
"""
Generate a repr string.
Positional arguments should be the positional arguments used to
construct the class. Keyword arguments should consist of tuples of
the attribute value and default. If the value is the default, then
it won't be rendered in the output.
Here's an example::
def __repr__(self):
return make_repr('MyClass', 'foo', name=(self.name, None))
The output of this would be something line ``MyClass('foo',
name='Will')``.
"""
arguments = [repr(arg) for arg in args]
arguments.extend(
"{}={!r}".format(name, value)
for name, (value, default) in sorted(kwargs.items())
if value != default
)
return "{}({})".format(class_name, ", ".join(arguments)) | python | {
"resource": ""
} |
q272538 | s3errors | test | def s3errors(path):
"""Translate S3 errors to FSErrors."""
try:
yield
except ClientError as error:
_error = error.response.get("Error", {})
error_code = _error.get("Code", None)
response_meta = error.response.get("ResponseMetadata", {})
http_status = response_meta.get("HTTPStatusCode", 200)
error_msg = _error.get("Message", None)
if error_code == "NoSuchBucket":
raise errors.ResourceError(path, exc=error, msg=error_msg)
if http_status == 404:
raise errors.ResourceNotFound(path)
elif http_status == 403:
raise errors.PermissionDenied(path=path, msg=error_msg)
else:
raise errors.OperationFailed(path=path, exc=error)
except SSLError as error:
raise errors.OperationFailed(path, exc=error)
except EndpointConnectionError as error:
raise errors.RemoteConnectionError(path, exc=error, msg="{}".format(error)) | python | {
"resource": ""
} |
q272539 | S3File.factory | test | def factory(cls, filename, mode, on_close):
"""Create a S3File backed with a temporary file."""
_temp_file = tempfile.TemporaryFile()
proxy = cls(_temp_file, filename, mode, on_close=on_close)
return proxy | python | {
"resource": ""
} |
q272540 | gravatar_url | test | def gravatar_url(user_or_email, size=GRAVATAR_DEFAULT_SIZE):
""" Builds a gravatar url from an user or email """
if hasattr(user_or_email, 'email'):
email = user_or_email.email
else:
email = user_or_email
try:
return escape(get_gravatar_url(email=email, size=size))
except:
return '' | python | {
"resource": ""
} |
q272541 | get_gravatar_url | test | def get_gravatar_url(email, size=GRAVATAR_DEFAULT_SIZE, default=GRAVATAR_DEFAULT_IMAGE,
rating=GRAVATAR_DEFAULT_RATING, secure=GRAVATAR_DEFAULT_SECURE):
"""
Builds a url to a gravatar from an email address.
:param email: The email to fetch the gravatar for
:param size: The size (in pixels) of the gravatar to fetch
:param default: What type of default image to use if the gravatar does not exist
:param rating: Used to filter the allowed gravatar ratings
:param secure: If True use https, otherwise plain http
"""
if secure:
url_base = GRAVATAR_SECURE_URL
else:
url_base = GRAVATAR_URL
# Calculate the email hash
email_hash = calculate_gravatar_hash(email)
# Build querystring
query_string = urlencode({
's': str(size),
'd': default,
'r': rating,
})
# Build url
url = '{base}avatar/{hash}.jpg?{qs}'.format(base=url_base,
hash=email_hash, qs=query_string)
return url | python | {
"resource": ""
} |
q272542 | has_gravatar | test | def has_gravatar(email):
"""
Returns True if the user has a gravatar, False if otherwise
"""
# Request a 404 response if the gravatar does not exist
url = get_gravatar_url(email, default=GRAVATAR_DEFAULT_IMAGE_404)
# Verify an OK response was received
try:
request = Request(url)
request.get_method = lambda: 'HEAD'
return 200 == urlopen(request).code
except (HTTPError, URLError):
return False | python | {
"resource": ""
} |
q272543 | get_gravatar_profile_url | test | def get_gravatar_profile_url(email, secure=GRAVATAR_DEFAULT_SECURE):
"""
Builds a url to a gravatar profile from an email address.
:param email: The email to fetch the gravatar for
:param secure: If True use https, otherwise plain http
"""
if secure:
url_base = GRAVATAR_SECURE_URL
else:
url_base = GRAVATAR_URL
# Calculate the email hash
email_hash = calculate_gravatar_hash(email)
# Build url
url = '{base}{hash}'.format(base=url_base, hash=email_hash)
return url | python | {
"resource": ""
} |
q272544 | chimera_blocks | test | def chimera_blocks(M=16, N=16, L=4):
"""
Generator for blocks for a chimera block quotient
"""
for x in xrange(M):
for y in xrange(N):
for u in (0, 1):
yield tuple((x, y, u, k) for k in xrange(L)) | python | {
"resource": ""
} |
q272545 | chimera_block_quotient | test | def chimera_block_quotient(G, blocks):
"""
Extract the blocks from a graph, and returns a
block-quotient graph according to the acceptability
functions block_good and eblock_good
Inputs:
G: a networkx graph
blocks: a tuple of tuples
"""
from networkx import Graph
from itertools import product
BG = Graph()
blockid = {}
for i, b in enumerate(blocks):
BG.add_node(i)
if not b or not all(G.has_node(x) for x in b):
continue
for q in b:
if q in blockid:
raise(RuntimeError, "two blocks overlap")
blockid[q] = i
for q, u in blockid.items():
ublock = blocks[u]
for p in G[q]:
if p not in blockid:
continue
v = blockid[p]
if BG.has_edge(u, v) or u == v:
continue
vblock = blocks[v]
if ublock[0][2] == vblock[0][2]:
block_edges = zip(ublock, vblock)
else:
block_edges = product(ublock, vblock)
if all(G.has_edge(x, y) for x, y in block_edges):
BG.add_edge(u, v)
return BG | python | {
"resource": ""
} |
q272546 | enumerate_resonance_smiles | test | def enumerate_resonance_smiles(smiles):
"""Return a set of resonance forms as SMILES strings, given a SMILES string.
:param smiles: A SMILES string.
:returns: A set containing SMILES strings for every possible resonance form.
:rtype: set of strings.
"""
mol = Chem.MolFromSmiles(smiles)
#Chem.SanitizeMol(mol) # MolFromSmiles does Sanitize by default
mesomers = ResonanceEnumerator().enumerate(mol)
return {Chem.MolToSmiles(m, isomericSmiles=True) for m in mesomers} | python | {
"resource": ""
} |
q272547 | ResonanceEnumerator.enumerate | test | def enumerate(self, mol):
"""Enumerate all possible resonance forms and return them as a list.
:param mol: The input molecule.
:type mol: rdkit.Chem.rdchem.Mol
:return: A list of all possible resonance forms of the molecule.
:rtype: list of rdkit.Chem.rdchem.Mol
"""
flags = 0
if self.kekule_all:
flags = flags | Chem.KEKULE_ALL
if self.allow_incomplete_octets:
flags = flags | Chem.ALLOW_INCOMPLETE_OCTETS
if self.allow_charge_separation:
flags = flags | Chem.ALLOW_CHARGE_SEPARATION
if self.unconstrained_anions:
flags = flags | Chem.UNCONSTRAINED_ANIONS
if self.unconstrained_cations:
flags = flags | Chem.UNCONSTRAINED_CATIONS
results = []
for result in Chem.ResonanceMolSupplier(mol, flags=flags, maxStructs=self.max_structures):
# This seems necessary? ResonanceMolSupplier only does a partial sanitization
Chem.SanitizeMol(result)
results.append(result)
return results | python | {
"resource": ""
} |
q272548 | Normalizer.normalize | test | def normalize(self, mol):
"""Apply a series of Normalization transforms to correct functional groups and recombine charges.
A series of transforms are applied to the molecule. For each Normalization, the transform is applied repeatedly
until no further changes occur. If any changes occurred, we go back and start from the first Normalization
again, in case the changes mean an earlier transform is now applicable. The molecule is returned once the entire
series of Normalizations cause no further changes or if max_restarts (default 200) is reached.
:param mol: The molecule to normalize.
:type mol: rdkit.Chem.rdchem.Mol
:return: The normalized fragment.
:rtype: rdkit.Chem.rdchem.Mol
"""
log.debug('Running Normalizer')
# Normalize each fragment separately to get around quirky RunReactants behaviour
fragments = []
for fragment in Chem.GetMolFrags(mol, asMols=True):
fragments.append(self._normalize_fragment(fragment))
# Join normalized fragments into a single molecule again
outmol = fragments.pop()
for fragment in fragments:
outmol = Chem.CombineMols(outmol, fragment)
Chem.SanitizeMol(outmol)
return outmol | python | {
"resource": ""
} |
q272549 | Normalizer._apply_transform | test | def _apply_transform(self, mol, rule):
"""Repeatedly apply normalization transform to molecule until no changes occur.
It is possible for multiple products to be produced when a rule is applied. The rule is applied repeatedly to
each of the products, until no further changes occur or after 20 attempts. If there are multiple unique products
after the final application, the first product (sorted alphabetically by SMILES) is chosen.
"""
mols = [mol]
for n in six.moves.range(20):
products = {}
for mol in mols:
for product in [x[0] for x in rule.RunReactants((mol,))]:
if Chem.SanitizeMol(product, catchErrors=True) == 0:
products[Chem.MolToSmiles(product, isomericSmiles=True)] = product
if products:
mols = [products[s] for s in sorted(products)]
else:
# If n == 0, the rule was not applicable and we return None
return mols[0] if n > 0 else None | python | {
"resource": ""
} |
q272550 | TautomerCanonicalizer.canonicalize | test | def canonicalize(self, mol):
"""Return a canonical tautomer by enumerating and scoring all possible tautomers.
:param mol: The input molecule.
:type mol: rdkit.Chem.rdchem.Mol
:return: The canonical tautomer.
:rtype: rdkit.Chem.rdchem.Mol
"""
# TODO: Overload the mol parameter to pass a list of pre-enumerated tautomers
tautomers = self._enumerate_tautomers(mol)
if len(tautomers) == 1:
return tautomers[0]
# Calculate score for each tautomer
highest = None
for t in tautomers:
smiles = Chem.MolToSmiles(t, isomericSmiles=True)
log.debug('Tautomer: %s', smiles)
score = 0
# Add aromatic ring scores
ssr = Chem.GetSymmSSSR(t)
for ring in ssr:
btypes = {t.GetBondBetweenAtoms(*pair).GetBondType() for pair in pairwise(ring)}
elements = {t.GetAtomWithIdx(idx).GetAtomicNum() for idx in ring}
if btypes == {BondType.AROMATIC}:
log.debug('Score +100 (aromatic ring)')
score += 100
if elements == {6}:
log.debug('Score +150 (carbocyclic aromatic ring)')
score += 150
# Add SMARTS scores
for tscore in self.scores:
for match in t.GetSubstructMatches(tscore.smarts):
log.debug('Score %+d (%s)', tscore.score, tscore.name)
score += tscore.score
# Add (P,S,Se,Te)-H scores
for atom in t.GetAtoms():
if atom.GetAtomicNum() in {15, 16, 34, 52}:
hs = atom.GetTotalNumHs()
if hs:
log.debug('Score %+d (%s-H bonds)', -hs, atom.GetSymbol())
score -= hs
# Set as highest if score higher or if score equal and smiles comes first alphabetically
if not highest or highest['score'] < score or (highest['score'] == score and smiles < highest['smiles']):
log.debug('New highest tautomer: %s (%s)', smiles, score)
highest = {'smiles': smiles, 'tautomer': t, 'score': score}
return highest['tautomer'] | python | {
"resource": ""
} |
q272551 | validate_smiles | test | def validate_smiles(smiles):
"""Return log messages for a given SMILES string using the default validations.
Note: This is a convenience function for quickly validating a single SMILES string. It is more efficient to use
the :class:`~molvs.validate.Validator` class directly when working with many molecules or when custom options
are needed.
:param string smiles: The SMILES for the molecule.
:returns: A list of log messages.
:rtype: list of strings.
"""
# Skip sanitize as standardize does this anyway
mol = Chem.MolFromSmiles(smiles)
logs = Validator().validate(mol)
return logs | python | {
"resource": ""
} |
q272552 | MetalDisconnector.disconnect | test | def disconnect(self, mol):
"""Break covalent bonds between metals and organic atoms under certain conditions.
The algorithm works as follows:
- Disconnect N, O, F from any metal.
- Disconnect other non-metals from transition metals + Al (but not Hg, Ga, Ge, In, Sn, As, Tl, Pb, Bi, Po).
- For every bond broken, adjust the charges of the begin and end atoms accordingly.
:param mol: The input molecule.
:type mol: rdkit.Chem.rdchem.Mol
:return: The molecule with metals disconnected.
:rtype: rdkit.Chem.rdchem.Mol
"""
log.debug('Running MetalDisconnector')
# Remove bonds that match SMARTS
for smarts in [self._metal_nof, self._metal_non]:
pairs = mol.GetSubstructMatches(smarts)
rwmol = Chem.RWMol(mol)
orders = []
for i, j in pairs:
# TODO: Could get the valence contributions of the bond instead of GetBondTypeAsDouble?
orders.append(int(mol.GetBondBetweenAtoms(i, j).GetBondTypeAsDouble()))
rwmol.RemoveBond(i, j)
# Adjust neighbouring charges accordingly
mol = rwmol.GetMol()
for n, (i, j) in enumerate(pairs):
chg = orders[n]
atom1 = mol.GetAtomWithIdx(i)
atom1.SetFormalCharge(atom1.GetFormalCharge() + chg)
atom2 = mol.GetAtomWithIdx(j)
atom2.SetFormalCharge(atom2.GetFormalCharge() - chg)
log.info('Removed covalent bond between %s and %s', atom1.GetSymbol(), atom2.GetSymbol())
Chem.SanitizeMol(mol)
return mol | python | {
"resource": ""
} |
q272553 | standardize_smiles | test | def standardize_smiles(smiles):
"""Return a standardized canonical SMILES string given a SMILES string.
Note: This is a convenience function for quickly standardizing a single SMILES string. It is more efficient to use
the :class:`~molvs.standardize.Standardizer` class directly when working with many molecules or when custom options
are needed.
:param string smiles: The SMILES for the molecule.
:returns: The SMILES for the standardized molecule.
:rtype: string.
"""
# Skip sanitize as standardize does this anyway
mol = Chem.MolFromSmiles(smiles, sanitize=False)
mol = Standardizer().standardize(mol)
return Chem.MolToSmiles(mol, isomericSmiles=True) | python | {
"resource": ""
} |
q272554 | enumerate_tautomers_smiles | test | def enumerate_tautomers_smiles(smiles):
"""Return a set of tautomers as SMILES strings, given a SMILES string.
:param smiles: A SMILES string.
:returns: A set containing SMILES strings for every possible tautomer.
:rtype: set of strings.
"""
# Skip sanitize as standardize does this anyway
mol = Chem.MolFromSmiles(smiles, sanitize=False)
mol = Standardizer().standardize(mol)
tautomers = TautomerEnumerator().enumerate(mol)
return {Chem.MolToSmiles(m, isomericSmiles=True) for m in tautomers} | python | {
"resource": ""
} |
q272555 | canonicalize_tautomer_smiles | test | def canonicalize_tautomer_smiles(smiles):
"""Return a standardized canonical tautomer SMILES string given a SMILES string.
Note: This is a convenience function for quickly standardizing and finding the canonical tautomer for a single
SMILES string. It is more efficient to use the :class:`~molvs.standardize.Standardizer` class directly when working
with many molecules or when custom options are needed.
:param string smiles: The SMILES for the molecule.
:returns: The SMILES for the standardize canonical tautomer.
:rtype: string.
"""
# Skip sanitize as standardize does this anyway
mol = Chem.MolFromSmiles(smiles, sanitize=False)
mol = Standardizer().standardize(mol)
tautomer = TautomerCanonicalizer().canonicalize(mol)
return Chem.MolToSmiles(tautomer, isomericSmiles=True) | python | {
"resource": ""
} |
q272556 | Standardizer.standardize | test | def standardize(self, mol):
"""Return a standardized version the given molecule.
The standardization process consists of the following stages: RDKit
:py:func:`~rdkit.Chem.rdmolops.RemoveHs`, RDKit :py:func:`~rdkit.Chem.rdmolops.SanitizeMol`,
:class:`~molvs.metal.MetalDisconnector`, :class:`~molvs.normalize.Normalizer`,
:class:`~molvs.charge.Reionizer`, RDKit :py:func:`~rdkit.Chem.rdmolops.AssignStereochemistry`.
:param mol: The molecule to standardize.
:type mol: rdkit.Chem.rdchem.Mol
:returns: The standardized molecule.
:rtype: rdkit.Chem.rdchem.Mol
"""
mol = copy.deepcopy(mol)
Chem.SanitizeMol(mol)
mol = Chem.RemoveHs(mol)
mol = self.disconnect_metals(mol)
mol = self.normalize(mol)
mol = self.reionize(mol)
Chem.AssignStereochemistry(mol, force=True, cleanIt=True)
# TODO: Check this removes symmetric stereocenters
return mol | python | {
"resource": ""
} |
q272557 | Standardizer.tautomer_parent | test | def tautomer_parent(self, mol, skip_standardize=False):
"""Return the tautomer parent of a given molecule.
:param mol: The input molecule.
:type mol: rdkit.Chem.rdchem.Mol
:param bool skip_standardize: Set to True if mol has already been standardized.
:returns: The tautomer parent molecule.
:rtype: rdkit.Chem.rdchem.Mol
"""
if not skip_standardize:
mol = self.standardize(mol)
tautomer = self.canonicalize_tautomer(mol)
tautomer = self.standardize(tautomer)
return tautomer | python | {
"resource": ""
} |
q272558 | Standardizer.fragment_parent | test | def fragment_parent(self, mol, skip_standardize=False):
"""Return the fragment parent of a given molecule.
The fragment parent is the largest organic covalent unit in the molecule.
:param mol: The input molecule.
:type mol: rdkit.Chem.rdchem.Mol
:param bool skip_standardize: Set to True if mol has already been standardized.
:returns: The fragment parent molecule.
:rtype: rdkit.Chem.rdchem.Mol
"""
if not skip_standardize:
mol = self.standardize(mol)
# TODO: Consider applying FragmentRemover first to remove salts, solvents?
fragment = self.largest_fragment(mol)
return fragment | python | {
"resource": ""
} |
q272559 | Standardizer.stereo_parent | test | def stereo_parent(self, mol, skip_standardize=False):
"""Return the stereo parent of a given molecule.
The stereo parent has all stereochemistry information removed from tetrahedral centers and double bonds.
:param mol: The input molecule.
:type mol: rdkit.Chem.rdchem.Mol
:param bool skip_standardize: Set to True if mol has already been standardized.
:returns: The stereo parent molecule.
:rtype: rdkit.Chem.rdchem.Mol
"""
if not skip_standardize:
mol = self.standardize(mol)
else:
mol = copy.deepcopy(mol)
Chem.RemoveStereochemistry(mol)
return mol | python | {
"resource": ""
} |
q272560 | Standardizer.isotope_parent | test | def isotope_parent(self, mol, skip_standardize=False):
"""Return the isotope parent of a given molecule.
The isotope parent has all atoms replaced with the most abundant isotope for that element.
:param mol: The input molecule.
:type mol: rdkit.Chem.rdchem.Mol
:param bool skip_standardize: Set to True if mol has already been standardized.
:returns: The isotope parent molecule.
:rtype: rdkit.Chem.rdchem.Mol
"""
if not skip_standardize:
mol = self.standardize(mol)
else:
mol = copy.deepcopy(mol)
# Replace isotopes with common weight
for atom in mol.GetAtoms():
atom.SetIsotope(0)
return mol | python | {
"resource": ""
} |
q272561 | Standardizer.charge_parent | test | def charge_parent(self, mol, skip_standardize=False):
"""Return the charge parent of a given molecule.
The charge parent is the uncharged version of the fragment parent.
:param mol: The input molecule.
:type mol: rdkit.Chem.rdchem.Mol
:param bool skip_standardize: Set to True if mol has already been standardized.
:returns: The charge parent molecule.
:rtype: rdkit.Chem.rdchem.Mol
"""
# TODO: All ionized acids and bases should be neutralised.
if not skip_standardize:
mol = self.standardize(mol)
fragment = self.fragment_parent(mol, skip_standardize=True)
if fragment:
uncharged = self.uncharge(fragment)
# During final standardization, the Reionizer ensures any remaining charges are in the right places
uncharged = self.standardize(uncharged)
return uncharged | python | {
"resource": ""
} |
q272562 | Standardizer.super_parent | test | def super_parent(self, mol, skip_standardize=False):
"""Return the super parent of a given molecule.
THe super parent is fragment, charge, isotope, stereochemistry and tautomer insensitive. From the input
molecule, the largest fragment is taken. This is uncharged and then isotope and stereochemistry information is
discarded. Finally, the canonical tautomer is determined and returned.
:param mol: The input molecule.
:type mol: rdkit.Chem.rdchem.Mol
:param bool skip_standardize: Set to True if mol has already been standardized.
:returns: The super parent molecule.
:rtype: rdkit.Chem.rdchem.Mol
"""
if not skip_standardize:
mol = self.standardize(mol)
# We don't need to get fragment parent, because the charge parent is the largest fragment
mol = self.charge_parent(mol, skip_standardize=True)
mol = self.isotope_parent(mol, skip_standardize=True)
mol = self.stereo_parent(mol, skip_standardize=True)
mol = self.tautomer_parent(mol, skip_standardize=True)
mol = self.standardize(mol)
return mol | python | {
"resource": ""
} |
q272563 | main | test | def main():
"""Main function for molvs command line interface."""
# Root options
parser = MolvsParser(epilog='use "molvs <command> -h" to show help for a specific command')
subparsers = parser.add_subparsers(title='Available commands')
# Options common to all commands
common_parser = MolvsParser(add_help=False)
common_parser.add_argument('infile', nargs='?', help='input filename', type=argparse.FileType('r'), default=sys.stdin)
common_parser.add_argument('-i', '--intype', help='input filetype', choices=FILETYPES)
common_parser.add_argument('-:', '--smiles', help='input SMILES instead of file', metavar='<smiles>')
common_parser.add_argument('-O', '--outfile', help='output filename', type=argparse.FileType('w'), default=sys.stdout, metavar='<outfile>')
# Standardize options
standardize_parser = subparsers.add_parser('standardize', help='standardize a molecule', parents=[common_parser])
standardize_parser.add_argument('-o', '--outtype', help='output filetype', choices=FILETYPES)
standardize_parser.set_defaults(func=standardize_main)
# Validate options
validate_parser = subparsers.add_parser('validate', help='validate a molecule', parents=[common_parser])
validate_parser.set_defaults(func=validate_main)
args = parser.parse_args()
try:
args.func(args)
except Exception as e:
sys.stderr.write('Error: %s\n\n'.encode() % e.message)
parser.print_help()
sys.exit(2) | python | {
"resource": ""
} |
q272564 | FragmentRemover.remove | test | def remove(self, mol):
"""Return the molecule with specified fragments removed.
:param mol: The molecule to remove fragments from.
:type mol: rdkit.Chem.rdchem.Mol
:return: The molecule with fragments removed.
:rtype: rdkit.Chem.rdchem.Mol
"""
log.debug('Running FragmentRemover')
# Iterate FragmentPatterns and remove matching fragments
for frag in self.fragments:
# If nothing is left or leave_last and only one fragment, end here
if mol.GetNumAtoms() == 0 or (self.leave_last and len(Chem.GetMolFrags(mol)) <= 1):
break
# Apply removal for this FragmentPattern
removed = Chem.DeleteSubstructs(mol, frag.smarts, onlyFrags=True)
if not mol.GetNumAtoms() == removed.GetNumAtoms():
log.info('Removed fragment: %s', frag.name)
if self.leave_last and removed.GetNumAtoms() == 0:
# All the remaining fragments match this pattern - leave them all
break
mol = removed
return mol | python | {
"resource": ""
} |
q272565 | LargestFragmentChooser.choose | test | def choose(self, mol):
"""Return the largest covalent unit.
The largest fragment is determined by number of atoms (including hydrogens). Ties are broken by taking the
fragment with the higher molecular weight, and then by taking the first alphabetically by SMILES if needed.
:param mol: The molecule to choose the largest fragment from.
:type mol: rdkit.Chem.rdchem.Mol
:return: The largest fragment.
:rtype: rdkit.Chem.rdchem.Mol
"""
log.debug('Running LargestFragmentChooser')
# TODO: Alternatively allow a list of fragments to be passed as the mol parameter
fragments = Chem.GetMolFrags(mol, asMols=True)
largest = None
for f in fragments:
smiles = Chem.MolToSmiles(f, isomericSmiles=True)
log.debug('Fragment: %s', smiles)
organic = is_organic(f)
if self.prefer_organic:
# Skip this fragment if not organic and we already have an organic fragment as the largest so far
if largest and largest['organic'] and not organic:
continue
# Reset largest if it wasn't organic and this fragment is organic
if largest and organic and not largest['organic']:
largest = None
# Count atoms
atoms = 0
for a in f.GetAtoms():
atoms += 1 + a.GetTotalNumHs()
# Skip this fragment if fewer atoms than the largest
if largest and atoms < largest['atoms']:
continue
# Skip this fragment if equal number of atoms but weight is lower
weight = rdMolDescriptors.CalcExactMolWt(f)
if largest and atoms == largest['atoms'] and weight < largest['weight']:
continue
# Skip this fragment if equal atoms and equal weight but smiles comes last alphabetically
if largest and atoms == largest['atoms'] and weight == largest['weight'] and smiles > largest['smiles']:
continue
# Otherwise this is the largest so far
log.debug('New largest fragment: %s (%s)', smiles, atoms)
largest = {'smiles': smiles, 'fragment': f, 'atoms': atoms, 'weight': weight, 'organic': organic}
return largest['fragment'] | python | {
"resource": ""
} |
q272566 | integrate_ivp | test | def integrate_ivp(u0=1.0, v0=0.0, mu=1.0, tend=10.0, dt0=1e-8, nt=0,
nsteps=600, t0=0.0, atol=1e-8, rtol=1e-8, plot=False,
savefig='None', method='bdf', dpi=100, verbose=False):
"""
Example program integrating an IVP problem of van der Pol oscillator
"""
f, j = get_f_and_j(mu)
if nt > 1:
tout = np.linspace(t0, tend, nt)
yout, nfo = integrate_predefined(
f, j, [u0, v0], tout, dt0, atol, rtol, nsteps=nsteps,
check_indexing=False, method=method)
else:
tout, yout, nfo = integrate_adaptive(
f, j, [u0, v0], t0, tend, dt0, atol, rtol, nsteps=nsteps,
check_indexing=False, method=method) # dfdt[:] also for len == 1
if verbose:
print(nfo)
if plot:
import matplotlib.pyplot as plt
plt.plot(tout, yout[:, 1], 'g--')
plt.plot(tout, yout[:, 0], 'k-', linewidth=2)
if savefig == 'None':
plt.show()
else:
plt.savefig(savefig, dpi=dpi) | python | {
"resource": ""
} |
q272567 | GitHub_LLNL_Stats.get_stats | test | def get_stats(self, username='', password='', organization='llnl',
force=True, repo_type='public'):
"""
Retrieves the statistics from the given organization with the given
credentials. Will not retreive data if file exists and force hasn't been
set to True. This is to save GH API requests.
"""
date = str(datetime.date.today())
file_path = ('../github_stats_output/' + date[:4] + '/' + date[:7] + '/'
+ date + '.csv')
if force or not os.path.isfile(file_path):
my_github.login(username, password)
calls_beginning = self.logged_in_gh.ratelimit_remaining + 1
print 'Rate Limit: ' + str(calls_beginning)
my_github.get_org(organization)
count_members = my_github.get_mems_of_org()
count_teams = my_github.get_teams_of_org()
my_github.repos(repo_type=repo_type, organization=organization)
#Write JSON
my_github.write_org_json(dict_to_write=self.members_json,
path_ending_type='members', is_list=True)
my_github.write_org_json(dict_to_write=
{'singleton': self.org_retrieved.to_json()},
path_ending_type='organization')
my_github.write_org_json(dict_to_write=self.teams_json,
path_ending_type='teams', is_list=True)
my_github.write_repo_json(dict_to_write=self.repos_json,
path_ending_type='repo')
my_github.write_repo_json(dict_to_write=self.contributors_json,
path_ending_type='contributors', is_list=True)
my_github.write_repo_json(dict_to_write=self.pull_requests_json,
path_ending_type='pull-requests', is_list=True)
my_github.write_repo_json(dict_to_write=self.issues_json,
path_ending_type='issues', is_list=True)
my_github.write_repo_json(dict_to_write=self.languages_json,
path_ending_type='languages', is_dict=True)
my_github.write_repo_json(dict_to_write=self.commits_json,
path_ending_type='commits', is_list=True)
#Write CSV
my_github.write_to_file(file_path,
date,
organization,
count_members,
count_teams)
calls_remaining = self.logged_in_gh.ratelimit_remaining
calls_used = calls_beginning - calls_remaining
print ('Rate Limit Remaining: ' + str(calls_remaining) + '\nUsed '
+ str(calls_used) + ' API calls.') | python | {
"resource": ""
} |
q272568 | GitHub_LLNL_Stats.get_mems_of_org | test | def get_mems_of_org(self):
"""
Retrieves the number of members of the organization.
"""
print 'Getting members.'
counter = 0
for member in self.org_retrieved.iter_members():
self.members_json[member.id] = member.to_json()
counter += 1
return counter | python | {
"resource": ""
} |
q272569 | GitHub_LLNL_Stats.get_teams_of_org | test | def get_teams_of_org(self):
"""
Retrieves the number of teams of the organization.
"""
print 'Getting teams.'
counter = 0
for team in self.org_retrieved.iter_teams():
self.teams_json[team.id] = team.to_json()
counter += 1
return counter | python | {
"resource": ""
} |
q272570 | GitHub_LLNL_Stats.repos | test | def repos(self, repo_type='public', organization='llnl'):
"""
Retrieves info about the repos of the current organization.
"""
print 'Getting repos.'
for repo in self.org_retrieved.iter_repos(type=repo_type):
#JSON
json = repo.to_json()
self.repos_json[repo.name] = json
#CSV
temp_repo = my_repo.My_Repo()
temp_repo.name = repo.full_name
self.total_repos += 1
temp_repo.contributors = my_github.get_total_contributors(repo)
self.total_contributors += temp_repo.contributors
temp_repo.forks = repo.forks_count
self.total_forks += temp_repo.forks
temp_repo.stargazers = repo.stargazers
self.total_stars += temp_repo.stargazers
temp_repo.pull_requests_open, temp_repo.pull_requests_closed = \
my_github.get_pull_reqs(repo)
temp_repo.pull_requests = (temp_repo.pull_requests_open
+ temp_repo.pull_requests_closed)
self.total_pull_reqs += temp_repo.pull_requests_open
self.total_pull_reqs += temp_repo.pull_requests_closed
self.total_pull_reqs_open += temp_repo.pull_requests_open
self.total_pull_reqs_closed += temp_repo.pull_requests_closed
temp_repo.open_issues = repo.open_issues_count
self.total_open_issues += temp_repo.open_issues
temp_repo.closed_issues = my_github.get_issues(repo, organization=organization)
temp_repo.issues = temp_repo.closed_issues + temp_repo.open_issues
self.total_closed_issues += temp_repo.closed_issues
self.total_issues += temp_repo.issues
my_github.get_languages(repo, temp_repo)
temp_repo.readme = my_github.get_readme(repo)
#temp_repo.license = my_github.get_license(repo)
temp_repo.commits = self.get_commits(repo=repo, organization=organization)
self.total_commits += temp_repo.commits
self.all_repos.append(temp_repo) | python | {
"resource": ""
} |
q272571 | GitHub_LLNL_Stats.get_total_contributors | test | def get_total_contributors(self, repo):
"""
Retrieves the number of contributors to a repo in the organization.
Also adds to unique contributor list.
"""
repo_contributors = 0
for contributor in repo.iter_contributors():
repo_contributors += 1
self.unique_contributors[contributor.id].append(repo.name)
self.contributors_json[repo.name].append(contributor.to_json())
return repo_contributors | python | {
"resource": ""
} |
q272572 | GitHub_LLNL_Stats.get_pull_reqs | test | def get_pull_reqs(self, repo):
"""
Retrieves the number of pull requests on a repo in the organization.
"""
pull_reqs_open = 0
pull_reqs_closed = 0
for pull_request in repo.iter_pulls(state='all'):
self.pull_requests_json[repo.name].append(pull_request.to_json())
if pull_request.closed_at is not None:
pull_reqs_closed += 1
else:
pull_reqs_open += 1
return pull_reqs_open, pull_reqs_closed | python | {
"resource": ""
} |
q272573 | GitHub_LLNL_Stats.get_issues | test | def get_issues(self, repo, organization='llnl'):
"""
Retrieves the number of closed issues.
"""
#JSON
path = ('../github-data/' + organization + '/' + repo.name + '/issues')
is_only_today = False
if not os.path.exists(path): #no previous path, get all issues
all_issues = repo.iter_issues(state='all')
is_only_today = True
else:
files = os.listdir(path)
date = str(files[-1][:-5])
if date == str(datetime.date.today()):
#most recent date is actually today, get previous most recent date
if len(files) > 2:
date = str(files[-2][:-5])
else:
#This means there is only one file, today. Retrieve every issue
all_issues = repo.iter_issues(state='all')
is_only_today = True
if not is_only_today:#there's a previous saved JSON that's not today
all_issues = repo.iter_issues(since=date, state='all')
for issue in all_issues:
self.issues_json[repo.name].append(issue.to_json())
#CSV
closed_issues = 0
for issue in repo.iter_issues(state='closed'):
if issue is not None:
closed_issues += 1
return closed_issues | python | {
"resource": ""
} |
q272574 | GitHub_LLNL_Stats.get_readme | test | def get_readme(self, repo):
"""
Checks to see if the given repo has a ReadMe. MD means it has a correct
Readme recognized by GitHub.
"""
readme_contents = repo.readme()
if readme_contents is not None:
self.total_readmes += 1
return 'MD'
if self.search_limit >= 28:
print 'Hit search limit. Sleeping for 60 sec.'
time.sleep(60)
self.search_limit = 0
self.search_limit += 1
search_results = self.logged_in_gh.search_code('readme'
+ 'in:path repo:' + repo.full_name)
try:
for result in search_results:
path = result.path[1:]
if '/' not in path and 'readme' in path.lower():
self.total_readmes += 1
return path
return 'MISS'
except (github3.models.GitHubError, StopIteration) as e:
return 'MISS' | python | {
"resource": ""
} |
q272575 | GitHub_LLNL_Stats.get_license | test | def get_license(self, repo):
"""
Checks to see if the given repo has a top level LICENSE file.
"""
if self.search_limit >= 28:
print 'Hit search limit. Sleeping for 60 sec.'
time.sleep(60)
self.search_limit = 0
self.search_limit += 1
search_results = self.logged_in_gh.search_code('license'
+ 'in:path repo:' + repo.full_name)
try:
for result in search_results:
path = result.path[1:]
if '/' not in path and 'license' in path.lower():
self.total_licenses += 1
return path
return 'MISS'
except (StopIteration) as e:
return 'MISS' | python | {
"resource": ""
} |
q272576 | GitHub_LLNL_Stats.get_commits | test | def get_commits(self, repo, organization='llnl'):
"""
Retrieves the number of commits to a repo in the organization. If it is
the first time getting commits for a repo, it will get all commits and
save them to JSON. If there are previous commits saved, it will only get
commits that have not been saved to disk since the last date of commits.
"""
#JSON
path = ('../github-data/' + organization + '/' + repo.name + '/commits')
is_only_today = False
if not os.path.exists(path): #no previous path, get all commits
all_commits = repo.iter_commits()
is_only_today = True
else:
files = os.listdir(path)
date = str(files[-1][:-5])
if date == str(datetime.date.today()):
#most recent date is actually today, get previous most recent date
if len(files) > 2:
date = str(files[-2][:-5])
else:
#This means there is only one file, today. Retrieve every commit
all_commits = repo.iter_commits()
is_only_today = True
if not is_only_today:#there's a previous saved JSON that's not today
all_commits = repo.iter_commits(since=date)
for commit in all_commits:
self.commits_json[repo.name].append(commit.to_json())
#for csv
count = 0
for commit in repo.iter_commits():
count += 1
return count | python | {
"resource": ""
} |
q272577 | GitHub_LLNL_Stats.write_org_json | test | def write_org_json(self, date=(datetime.date.today()),
organization='llnl',dict_to_write={}, path_ending_type='',
is_list=False):
"""
Writes stats from the organization to JSON.
"""
path = ('../github-data/' + organization + '-org/'
+ path_ending_type + '/' + str(date) + '.json')
self.checkDir(path)
with open(path, 'w') as out_clear:#clear old data
out_clear.close()
with open(path, 'a') as out:
if is_list:#used for list of items
out.write('[')
for item in dict_to_write:
out.write(json.dumps(dict_to_write[item], sort_keys=True,
indent=4, separators=(',', ': ')) + ',')
out.seek(-1, os.SEEK_END)#kill last comma
out.truncate()
if is_list:
out.write(']')
out.close() | python | {
"resource": ""
} |
q272578 | GitHub_LLNL_Stats.write_totals | test | def write_totals(self, file_path='', date=str(datetime.date.today()),
organization='N/A', members=0, teams=0):
"""
Updates the total.csv file with current data.
"""
total_exists = os.path.isfile(file_path)
with open(file_path, 'a') as out_total:
if not total_exists:
out_total.write('date,organization,repos,members,teams,'
+ 'unique_contributors,total_contributors,forks,'
+ 'stargazers,pull_requests,open_issues,has_readme,'
+ 'has_license,pull_requests_open,pull_requests_closed,'
+ 'commits,id,closed_issues,issues\n')
self.delete_last_line(date=date, file_path=file_path)
out_total.close()
with open(file_path, 'r') as file_read:
row_count = sum(1 for row in file_read) - 1
file_read.close()
with open(file_path, 'a') as out_total:
out_total.write(date + ',' + organization + ','
+ str(self.total_repos) + ',' + str(members) + ',' + str(teams)
+ ',' + str(len(self.unique_contributors)) + ','
+ str(self.total_contributors) + ',' + str(self.total_forks)
+ ',' + str(self.total_stars) + ',' + str(self.total_pull_reqs)
+ ',' + str(self.total_open_issues) + ','
+ str(self.total_readmes) + ',' + str(self.total_licenses) + ','
+ str(self.total_pull_reqs_open) + ','
+ str(self.total_pull_reqs_closed) + ','
+ str(self.total_commits) + ',' + str(row_count) + ','
+ str(self.total_closed_issues) + ',' + str(self.total_issues)
+ '\n')
out_total.close() | python | {
"resource": ""
} |
q272579 | GitHub_LLNL_Stats.write_languages | test | def write_languages(self, file_path='',date=str(datetime.date.today())):
"""
Updates languages.csv file with current data.
"""
self.remove_date(file_path=file_path, date=date)
languages_exists = os.path.isfile(file_path)
with open(file_path, 'a') as out_languages:
if not languages_exists:
out_languages.write('date,language,count,size,size_log\n')
languages_sorted = sorted(self.languages_size)
#self.delete_last_line(date=date, file_path=file_path)
for language in languages_sorted:
try:
out_languages.write(date + ',' + language + ','
+ str(self.languages[language]) + ','
+ str(self.languages_size[language]) + ','
+ str(math.log10(int(self.languages_size[language])))
+ '\n')
except (TypeError, KeyError) as e:
out_languages.write(date + ',' + language + ','
+ str(0) + ','
+ str(self.languages_size[language]) + ','
+ str(math.log10(int(self.languages_size[language])))
+ '\n') | python | {
"resource": ""
} |
q272580 | GitHub_LLNL_Stats.checkDir | test | def checkDir(self, file_path=''):
"""
Checks if a directory exists. If not, it creates one with the specified
file_path.
"""
if not os.path.exists(os.path.dirname(file_path)):
try:
os.makedirs(os.path.dirname(file_path))
except OSError as e:
if e.errno != errno.EEXIST:
raise | python | {
"resource": ""
} |
q272581 | GitHub_LLNL_Stats.remove_date | test | def remove_date(self, file_path='', date=str(datetime.date.today())):
"""
Removes all rows of the associated date from the given csv file.
Defaults to today.
"""
languages_exists = os.path.isfile(file_path)
if languages_exists:
with open(file_path, 'rb') as inp, open('temp.csv', 'wb') as out:
writer = csv.writer(out)
for row in csv.reader(inp):
if row[0] != date:
writer.writerow(row)
inp.close()
out.close()
os.remove(file_path)
os.rename("temp.csv",file_path) | python | {
"resource": ""
} |
q272582 | gov_orgs | test | def gov_orgs():
"""
Returns a list of the names of US Government GitHub organizations
Based on: https://government.github.com/community/
Exmample return:
{'llnl', '18f', 'gsa', 'dhs-ncats', 'spack', ...}
"""
us_gov_github_orgs = set()
gov_orgs = requests.get('https://government.github.com/organizations.json').json()
us_gov_github_orgs.update(gov_orgs['governments']['U.S. Federal'])
us_gov_github_orgs.update(gov_orgs['governments']['U.S. Military and Intelligence'])
us_gov_github_orgs.update(gov_orgs['research']['U.S. Research Labs'])
return list(us_gov_github_orgs) | python | {
"resource": ""
} |
q272583 | create_enterprise_session | test | def create_enterprise_session(url, token=None):
"""
Create a github3.py session for a GitHub Enterprise instance
If token is not provided, will attempt to use the GITHUB_API_TOKEN
environment variable if present.
"""
gh_session = github3.enterprise_login(url=url, token=token)
if gh_session is None:
msg = 'Unable to connect to GitHub Enterprise (%s) with provided token.'
raise RuntimeError(msg, url)
return gh_session | python | {
"resource": ""
} |
q272584 | _check_api_limits | test | def _check_api_limits(gh_session, api_required=250, sleep_time=15):
"""
Simplified check for API limits
If necessary, spin in place waiting for API to reset before returning.
See: https://developer.github.com/v3/#rate-limiting
"""
api_rates = gh_session.rate_limit()
api_remaining = api_rates['rate']['remaining']
api_reset = api_rates['rate']['reset']
logger.debug('Rate Limit - %d requests remaining', api_remaining)
if api_remaining > api_required:
return
now_time = time.time()
time_to_reset = int(api_reset - now_time)
logger.warn('Rate Limit Depleted - Sleeping for %d seconds', time_to_reset)
while now_time < api_reset:
time.sleep(10)
now_time = time.time()
return | python | {
"resource": ""
} |
q272585 | connect | test | def connect(url='https://github.com', token=None):
"""
Create a GitHub session for making requests
"""
gh_session = None
if url == 'https://github.com':
gh_session = create_session(token)
else:
gh_session = create_enterprise_session(url, token)
if gh_session is None:
msg = 'Unable to connect to (%s) with provided token.'
raise RuntimeError(msg, url)
logger.info('Connected to: %s', url)
return gh_session | python | {
"resource": ""
} |
q272586 | query_repos | test | def query_repos(gh_session, orgs=None, repos=None, public_only=True):
"""
Yields GitHub3.py repo objects for provided orgs and repo names
If orgs and repos are BOTH empty, execute special mode of getting ALL
repositories from the GitHub Server.
If public_only is True, will return only those repos that are marked as
public. Set this to false to return all organizations that the session has
permissions to access.
"""
if orgs is None:
orgs = []
if repos is None:
repos = []
if public_only:
privacy = 'public'
else:
privacy = 'all'
_check_api_limits(gh_session, 10)
for org_name in orgs:
org = gh_session.organization(org_name)
num_repos = org.public_repos_count
_check_api_limits(gh_session, _num_requests_needed(num_repos))
for repo in org.repositories(type=privacy):
_check_api_limits(gh_session, 10)
yield repo
for repo_name in repos:
_check_api_limits(gh_session, 10)
org, name = repo_name.split('/')
yield gh_session.repository(org, name)
if not (orgs or repos):
for repo in gh_session.all_repositories():
yield repo | python | {
"resource": ""
} |
q272587 | GitHub_Stargazers.get_org | test | def get_org(self, organization_name=''):
"""
Retrieves an organization via given org name. If given
empty string, prompts user for an org name.
"""
self.organization_name = organization_name
if(organization_name == ''):
self.organization_name = raw_input('Organization: ')
print 'Getting organization.'
self.org_retrieved = self.logged_in_gh.organization(organization_name) | python | {
"resource": ""
} |
q272588 | GitHub_Stargazers.write_to_file | test | def write_to_file(self, file_path='', date=(datetime.date.today()),
organization='llnl'):
"""
Writes stargazers data to file.
"""
with open(file_path, 'w+') as out:
out.write('date,organization,stargazers\n')
sorted_stargazers = sorted(self.stargazers)#sort based on lowercase
for star in sorted_stargazers:
out.write(star + ',' + str(self.stargazers[star]) + '\n')
out.close() | python | {
"resource": ""
} |
q272589 | Project.from_gitlab | test | def from_gitlab(klass, repository, labor_hours=True):
"""
Create CodeGovProject object from GitLab Repository
"""
if not isinstance(repository, gitlab.v4.objects.Project):
raise TypeError('Repository must be a gitlab Repository object')
project = klass()
logger.debug(
'GitLab: repository_id=%d path_with_namespace=%s',
repository.id,
repository.path_with_namespace,
)
# -- REQUIRED FIELDS --
project['name'] = repository.name
project['repositoryURL'] = repository.http_url_to_repo
project['description'] = repository.description
# TODO: Update licenses from GitLab API
project['permissions']['licenses'] = None
web_url = repository.web_url
public_server = web_url.startswith('https://gitlab.com')
if repository.visibility in ('public') and public_server:
project['permissions']['usageType'] = 'openSource'
elif date_parse(repository.created_at) < POLICY_START_DATE:
project['permissions']['usageType'] = 'exemptByPolicyDate'
if labor_hours:
project['laborHours'] = labor_hours_from_url(project['repositoryURL'])
else:
project['laborHours'] = 0
project['tags'] = ['gitlab'] + repository.tag_list
project['contact'] = {
'email': '',
'URL': web_url,
}
# -- OPTIONAL FIELDS --
# project['version'] = ''
project['organization'] = repository.namespace['name']
# TODO: Currently, can't be an empty string, see: https://github.com/GSA/code-gov-web/issues/370
project['status'] = 'Development'
project['vcs'] = 'git'
project['homepageURL'] = repository.web_url
api_url = repository.manager.gitlab._url
archive_suffix = '/projects/%s/repository/archive' % repository.get_id()
project['downloadURL'] = api_url + archive_suffix
# project['languages'] = [l for l, _ in repository.languages()]
# project['partners'] = []
# project['relatedCode'] = []
# project['reusedCode'] = []
project['date'] = {
'created': date_parse(repository.created_at).date().isoformat(),
'lastModified': date_parse(repository.last_activity_at).date().isoformat(),
'metadataLastUpdated': '',
}
_prune_dict_null_str(project)
return project | python | {
"resource": ""
} |
q272590 | Project.from_doecode | test | def from_doecode(klass, record):
"""
Create CodeGovProject object from DOE CODE record
Handles crafting Code.gov Project
"""
if not isinstance(record, dict):
raise TypeError('`record` must be a dict')
project = klass()
# -- REQUIRED FIELDS --
project['name'] = record['software_title']
logger.debug('DOE CODE: software_title="%s"', record['software_title'])
link = record.get('repository_link', '')
if not link:
link = record.get('landing_page')
logger.warning('DOE CODE: No repositoryURL, using landing_page: %s', link)
project['repositoryURL'] = link
project['description'] = record['description']
licenses = set(record['licenses'])
licenses.discard(None)
logger.debug('DOE CODE: licenses=%s', licenses)
license_objects = []
if 'Other' in licenses:
licenses.remove('Other')
license_objects = [{
'name': 'Other',
'URL': record['proprietary_url']
}]
if licenses:
license_objects.extend([_license_obj(license) for license in licenses])
project['permissions']['licenses'] = license_objects
if record['open_source']:
usage_type = 'openSource'
else:
usage_type = 'exemptByLaw'
project['permissions']['exemptionText'] = 'This source code is restricted by patent and / or intellectual property law.'
project['permissions']['usageType'] = usage_type
# TODO: Compute from git repo
project['laborHours'] = 0
project['tags'] = ['DOE CODE']
lab_name = record.get('lab_display_name')
if lab_name is not None:
project['tags'].append(lab_name)
project['contact']['email'] = record['owner']
# project['contact']['URL'] = ''
# project['contact']['name'] = ''
# project['contact']['phone'] = ''
# -- OPTIONAL FIELDS --
if 'version_number' in record and record['version_number']:
project['version'] = record['version_number']
if lab_name is not None:
project['organization'] = lab_name
# Currently, can't be an empty string, see: https://github.com/GSA/code-gov-web/issues/370
status = record.get('ever_announced')
if status is None:
raise ValueError('DOE CODE: Unable to determine "ever_announced" value!')
elif status:
status = 'Production'
else:
status = 'Development'
project['status'] = status
vcs = None
link = project['repositoryURL']
if 'github.com' in link:
vcs = 'git'
if vcs is None:
logger.debug('DOE CODE: Unable to determine vcs for: name="%s", repositoryURL=%s', project['name'], link)
vcs = ''
if vcs:
project['vcs'] = vcs
url = record.get('landing_page', '')
if url:
project['homepageURL'] = url
# record['downloadURL'] = ''
# self['disclaimerText'] = ''
# self['disclaimerURL'] = ''
if 'programming_languages' in record:
project['languages'] = record['programming_languages']
# self['partners'] = []
# TODO: Look into using record['contributing_organizations']
# self['relatedCode'] = []
# self['reusedCode'] = []
# date: [object] A date object describing the release.
# created: [string] The date the release was originally created, in YYYY-MM-DD or ISO 8601 format.
# lastModified: [string] The date the release was modified, in YYYY-MM-DD or ISO 8601 format.
# metadataLastUpdated: [string] The date the metadata of the release was last updated, in YYYY-MM-DD or ISO 8601 format.
if 'date_record_added' in record and 'date_record_updated' in record:
project['date'] = {
'created': record['date_record_added'],
# 'lastModified': '',
'metadataLastUpdated': record['date_record_updated']
}
return project | python | {
"resource": ""
} |
q272591 | _license_obj | test | def _license_obj(license):
"""
A helper function to look up license object information
Use names from: https://api.github.com/licenses
"""
obj = None
if license in ('MIT', 'MIT License'):
obj = {
'URL': 'https://api.github.com/licenses/mit',
'name': 'MIT'
}
elif license in ('BSD 2-clause "Simplified" License'):
obj = {
'URL': 'https://api.github.com/licenses/bsd-2-clause',
'name': 'BSD-2-Clause'
}
elif license in ('BSD 3-clause "New" or "Revised" License'):
obj = {
'URL': 'https://api.github.com/licenses/bsd-3-clause',
'name': 'BSD-3-Clause'
}
elif license in ('Apache License 2.0'):
obj = {
'URL': 'https://api.github.com/licenses/apache-2.0',
'name': 'Apache-2.0'
}
elif license in ('GNU General Public License v2.1'):
obj = {
'URL': 'https://api.github.com/licenses/gpl-2.1',
'name': 'GPL-2.1'
}
elif license in ('GNU General Public License v2.0'):
obj = {
'URL': 'https://api.github.com/licenses/gpl-2.0',
'name': 'GPL-2.0'
}
elif license in ('GNU Lesser General Public License v2.1'):
obj = {
'URL': 'https://api.github.com/licenses/lgpl-2.1',
'name': 'LGPL-2.1'
}
elif license in ('GNU General Public License v3.0'):
obj = {
'URL': 'https://api.github.com/licenses/gpl-3.0',
'name': 'GPL-3.0'
}
elif license in ('GNU Lesser General Public License v3.0'):
obj = {
'URL': 'https://api.github.com/licenses/lgpl-3.0',
'name': 'LGPL-3.0'
}
elif license in ('Eclipse Public License 1.0'):
obj = {
'URL': 'https://api.github.com/licenses/epl-1.0',
'name': 'EPL-1.0',
}
elif license in ('Mozilla Public License 2.0'):
obj = {
'URL': 'https://api.github.com/licenses/mpl-2.0',
'name': 'MPL-2.0',
}
elif license in ('The Unlicense'):
obj = {
'URL': 'https://api.github.com/licenses/unlicense',
'name': 'Unlicense',
}
elif license in ('GNU Affero General Public License v3.0'):
obj = {
'URL': 'https://api.github.com/licenses/agpl-3.0',
'name': 'AGPL-3.0',
}
elif license in ('Eclipse Public License 2.0'):
obj = {
'URL': 'https://api.github.com/licenses/epl-2.0',
'name': 'EPL-2.0',
}
if obj is None:
logger.warn('I dont understand the license: %s', license)
raise ValueError('Aborting!')
return obj | python | {
"resource": ""
} |
q272592 | GitHub_Traffic.get_traffic | test | def get_traffic(self):
"""
Retrieves the traffic for the repositories of the given organization.
"""
print 'Getting traffic.'
#Uses the developer API. Note this could change.
headers = {'Accept': 'application/vnd.github.spiderman-preview', 'Authorization': 'token ' + self.token}
headers_release = {'Authorization': 'token ' + self.token}
for repo in self.org_retrieved.iter_repos(type='public'):
url = ('https://api.github.com/repos/' + self.organization_name
+ '/' + repo.name)
self.get_referrers(url=url, headers=headers, repo_name=repo.name)
self.get_paths(url=url, headers=headers)
self.get_data(url=url, headers=headers, dict_to_store=self.views,
type='views', repo_name=repo.name)
self.get_data(url=url, headers=headers, dict_to_store=self.clones,
type='clones', repo_name=repo.name)
self.get_releases(url=url, headers=headers_release, repo_name=repo.name) | python | {
"resource": ""
} |
q272593 | GitHub_Traffic.get_releases | test | def get_releases(self, url='', headers={}, repo_name=''):
"""
Retrieves the releases for the given repo in JSON.
"""
url_releases = (url + '/releases')
r = requests.get(url_releases, headers=headers)
self.releases_json[repo_name] = r.json() | python | {
"resource": ""
} |
q272594 | GitHub_Traffic.get_referrers | test | def get_referrers(self, url='', headers={}, repo_name=''):
"""
Retrieves the total referrers and unique referrers of all repos in json
and then stores it in a dict.
"""
#JSON
url_referrers = (url + '/traffic/popular/referrers')
r1 = requests.get(url_referrers, headers=headers)
referrers_json = r1.json()
self.referrers_json[repo_name] = referrers_json
#CSV
for referrer in referrers_json:
ref_name = referrer['referrer']
try:
tuple_in = (referrer['count'], referrer['uniques'])#curr vals
tuple = (self.referrers[ref_name][0] + tuple_in[0],#cal new vals
self.referrers[ref_name][1] + tuple_in[1])
self.referrers[ref_name] = tuple#record new vals
except KeyError:
tuple = self.referrers[ref_name] = (referrer['count'],
referrer['uniques'])
self.referrers_lower[ref_name.lower()] = ref_name | python | {
"resource": ""
} |
q272595 | GitHub_Traffic.get_data | test | def get_data(self, url='',headers={}, date=str(datetime.date.today()),
dict_to_store={}, type='', repo_name=''):
"""
Retrieves data from json and stores it in the supplied dict. Accepts
'clones' or 'views' as type.
"""
#JSON
url = (url + '/traffic/' + type)
r3 = requests.get(url, headers=headers)
json = r3.json()
if type == 'views':
self.views_json[repo_name] = json
elif type == 'clones':
self.clones_json[repo_name] = json
#CSV
for day in json[type]:
timestamp_seconds = day['timestamp']/1000
try:
date_timestamp = datetime.datetime.utcfromtimestamp(
timestamp_seconds).strftime('%Y-%m-%d')
#do not add todays date, some views might not be recorded yet
if date_timestamp != date:
tuple_in = (day['count'], day['uniques'])
tuple = (dict_to_store[timestamp_seconds][0] + tuple_in[0],
dict_to_store[timestamp_seconds][1] + tuple_in[1])
dict_to_store[timestamp_seconds] = tuple
except KeyError:
tuple = dict_to_store[timestamp_seconds] = (day['count'],
day['uniques']) | python | {
"resource": ""
} |
q272596 | GitHub_Traffic.write_json | test | def write_json(self, date=(datetime.date.today()),
organization='llnl',dict_to_write={}, path_ending_type=''):
"""
Writes all traffic data to file in JSON form.
"""
for repo in dict_to_write:
if len(dict_to_write[repo]) != 0:#don't need to write out empty lists
path = ('../github-data/' + organization + '/' + repo + '/' +
path_ending_type + '/' + str(date) + '.json')
self.checkDir(path)
with open(path, 'w') as out:
out.write(json.dumps(dict_to_write[repo], sort_keys=True,
indent=4, separators=(',', ': ')))
out.close() | python | {
"resource": ""
} |
q272597 | GitHub_Traffic.write_to_file | test | def write_to_file(self, referrers_file_path='', views_file_path='',
clones_file_path='', date=(datetime.date.today()), organization='llnl',
views_row_count=0, clones_row_count=0):
"""
Writes all traffic data to file.
"""
self.write_referrers_to_file(file_path=referrers_file_path)
self.write_data_to_file(file_path=views_file_path,
dict_to_write=self.views, name='views',
row_count=views_row_count)
self.write_data_to_file(file_path=clones_file_path,
dict_to_write=self.clones, name='clones',
row_count=clones_row_count) | python | {
"resource": ""
} |
q272598 | GitHub_Traffic.check_data_redundancy | test | def check_data_redundancy(self, file_path='', dict_to_check={}):
"""
Checks the given csv file against the json data scraped for the given
dict. It will remove all data retrieved that has already been recorded
so we don't write redundant data to file. Returns count of rows from
file.
"""
count = 0
exists = os.path.isfile(file_path)
previous_dates = {}
if exists:
with open(file_path, 'r') as input:
input.readline()#skip header line
for row in csv.reader(input):
timestamp = calendar.timegm(time.strptime(row[0],
'%Y-%m-%d'))
if timestamp in dict_to_check:#our date is already recorded
del dict_to_check[timestamp]
#calc current id max
count += 1
input.close()
return count | python | {
"resource": ""
} |
q272599 | GitHub_Traffic.write_data_to_file | test | def write_data_to_file(self, file_path='', date=str(datetime.date.today()),
organization='llnl',dict_to_write={}, name='', row_count=0):
"""
Writes given dict to file.
"""
exists = os.path.isfile(file_path)
with open(file_path, 'a') as out:
if not exists:
out.write('date,organization,' + name + ',unique_' + name
+ ',id\n')
sorted_dict = sorted(dict_to_write)
for day in sorted_dict:
day_formatted = datetime.datetime.utcfromtimestamp(
day ).strftime('%Y-%m-%d')
out.write(day_formatted + ',' + organization + ','
+ str(dict_to_write[day][0]) + ',' + str(dict_to_write[day][1])
+ ',' + str(row_count) + '\n')
row_count += 1 | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.