text_prompt
stringlengths 157
13.1k
| code_prompt
stringlengths 7
19.8k
⌀ |
|---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def read_config(self):
""" Read config from file """
|
try:
with open(self.config_file, 'r') as f:
self.config = json.loads(f.read())
f.close()
except IOError:
return False
return True
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def post_note(self):
""" Post note and return the URL of the posted note """
|
if self.args.note_title:
note_title = self.args.note_title
else:
note_title = None
note_content = self.args.note_content
mynote = self.pump.Note(display_name=note_title, content=note_content)
mynote.to = self.pump.me.followers
mynote.cc = self.pump.Public
mynote.send()
return mynote.id or None
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_obj_id(self, item):
""" Get the id of a PumpObject. :param item: id string or PumpObject """
|
if item is not None:
if isinstance(item, six.string_types):
return item
elif hasattr(item, 'id'):
return item.id
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_page(self, url):
""" Get a page of items from API """
|
if url:
data = self.feed._request(url, offset=self._offset, since=self._since, before=self._before)
# set values to False to avoid using them for next request
self._before = False if self._before is not None else None
self._since = False if self._since is not None else None
if getattr(self.feed, 'issue65', False):
self._offset = False
if self._since is not None:
# we want oldest items first when using 'since'
return reversed(data['items'])
else:
return data['items']
else:
return []
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def done(self):
""" Check if we should stop returning objects """
|
if self._done:
return self._done
if self._limit is None:
self._done = False
elif self.itemcount >= self._limit:
self._done = True
return self._done
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _build_cache(self):
""" Build a list of objects from feed's cached items or API page"""
|
self.cache = []
if self.done:
return
for i in (self.get_cached() if self._cached else self.get_page(self.url)):
if not self._cached:
# some objects don't have objectType set (inbox activities)
if not i.get("objectType"):
i["objectType"] = self.feed.object_types[0]
obj = Mapper(pypump=self.feed._pump).get_object(i)
else:
obj = i
self.cache.append(obj)
# ran out of items
if len(self.cache) <= 0:
self._done = True
# check what to do next time
if getattr(self.feed, 'issue65', False):
# work around API bug for favorites feed, see https://github.com/xray7224/PyPump/issues/65
if self._offset is None:
self._offset = 0
self._offset += 20
elif self._since is not None:
if self.feed.links.get('prev'):
self.url = self.feed.links['prev']
del self.feed.links['prev'] # avoid using it again
else:
if self.feed.links.get('next'):
self.url = self.feed.links['next']
del self.feed.links['next'] # avoid using it again
else:
self.url = None
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def items(self, offset=None, limit=20, since=None, before=None, *args, **kwargs):
""" Get a feed's items. :param offset: Amount of items to skip before returning data :param since: Return items added after this id (ordered old -> new) :param before: Return items added before this id (ordered new -> old) :param limit: Amount of items to return """
|
return ItemList(self, offset=offset, limit=limit, since=since, before=before, cached=self.is_cached)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def direct(self):
""" Direct inbox feed, contains activities addressed directly to the owner of the inbox. """
|
url = self._subfeed("direct")
if "direct" in self.url or "major" in self.url or "minor" in self.url:
return self
if self._direct is None:
self._direct = self.__class__(url, pypump=self._pump)
return self._direct
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def major(self):
""" Major inbox feed, contains major activities such as notes and images. """
|
url = self._subfeed("major")
if "major" in self.url or "minor" in self.url:
return self
if self._major is None:
self._major = self.__class__(url, pypump=self._pump)
return self._major
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def minor(self):
""" Minor inbox feed, contains minor activities such as likes, shares and follows. """
|
url = self._subfeed("minor")
if "minor" in self.url or "major" in self.url:
return self
if self._minor is None:
self._minor = self.__class__(url, pypump=self._pump)
return self._minor
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def serialize(self):
""" Converts the post to something compatible with `json.dumps` """
|
data = super(Note, self).serialize()
data.update({
"verb": "post",
"object": {
"objectType": self.object_type,
"content": self.content,
}
})
if self.display_name:
data["object"]["displayName"] = self.display_name
return data
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def context(self):
""" Provides request context """
|
type = "client_associate" if self.key is None else "client_update"
data = {
"type": type,
"application_type": self.type,
}
# is this an update?
if self.key:
data["client_id"] = self.key
data["client_secret"] = self.secret
# Add optional params
if self.name:
data["application_name"] = self.name
if self.logo:
data["logo_url"] = self.logo
if self.contacts:
# space seporated list
data["contacts"] = " ".join(self.contacts)
if self.redirect:
data["redirect_uri"] = " ".join(self.redirect)
# Convert to JSON and send
return json.dumps(data)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def request(self, server=None):
""" Sends the request """
|
request = {
"headers": {"Content-Type": "application/json"},
"timeout": self._pump.timeout,
"data": self.context,
}
url = "{proto}://{server}/{endpoint}".format(
proto=self._pump.protocol,
server=server or self.server,
endpoint=self.ENDPOINT,
)
response = self._pump._requester(requests.post, url, **request)
try:
server_data = response.json()
except ValueError:
raise ClientException(response.content)
if "error" in server_data:
raise ClientException(server_data["error"], self.context)
_log.debug("Client registration recieved: %(id)s %(secret)s %(expire)s", {
"id": server_data["client_id"],
"secret": server_data["client_secret"],
"expire": server_data["expires_at"],
})
return server_data
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def register(self, server=None):
""" Registers the client with the Pump API retrieving the id and secret """
|
if (self.key or self.secret):
return self.update()
server_data = self.request(server)
self.key = server_data["client_id"]
self.secret = server_data["client_secret"]
self.expirey = server_data["expires_at"]
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def update(self):
""" Updates the information the Pump server has about the client """
|
error = ""
if self.key is None:
error = "To update a client you need to provide a key"
if self.secret is None:
error = "To update a client you need to provide the secret"
if error:
raise ClientException(error)
self.request()
return True
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def compile_extensions(macros, compat=False):
""" Compiler subroutine to test whether some functions are available on the target system. Since the rrdtool headers shipped with most packages do not disclose any versioning information, we cannot test whether a given function is available that way. Instead, use this to manually try to compile code and see if it works. Taken from http://stackoverflow.com/questions/28843765/setup-py-check-if-non-python-library-dependency-exists. """
|
import distutils.sysconfig
import distutils.ccompiler
import tempfile
import shutil
from textwrap import dedent
# common vars
libraries = ['rrd']
include_dirs = [package_dir, '/usr/local/include']
library_dirs = ['/usr/local/lib']
compiler_args = dict(
libraries=libraries,
include_dirs=include_dirs,
library_dirs=library_dirs,
define_macros=macros)
exts = [Extension('rrdtool', sources=['rrdtoolmodule.c'], **compiler_args)]
if compat:
return exts
# in non-compat mode, try to link to check if the new symbols are present in librrd
c_code = dedent('''
#include <rrd.h>
#include "rrdtoolmodule.h"
int main(int argc, char *argv[]) {
rrd_fetch_cb_register(NULL); /* exists in rrdtool >= 1.5.0 */
return 0;
}
''')
tmp_dir = tempfile.mkdtemp(prefix='tmp_python_rrdtool')
bin_file_name = os.path.join(tmp_dir, 'test_rrdtool')
file_name = bin_file_name + '.c'
with open(file_name, 'w') as fp:
fp.write(c_code)
# try to compile it
compiler = distutils.ccompiler.new_compiler()
assert isinstance(compiler, distutils.ccompiler.CCompiler)
for s in include_dirs:
compiler.add_include_dir(s)
for s in library_dirs:
compiler.add_library_dir(s)
for s in libraries:
compiler.add_library(s)
for s in macros:
compiler.define_macro(*s)
distutils.sysconfig.customize_compiler(compiler)
try:
compiler.link_executable(
compiler.compile([file_name]),
bin_file_name,
libraries=libraries)
except CompileError:
sys.exit('Error: Unable to compile the binary module. Do you have the rrdtool header and libraries installed?')
ret = None
except LinkError as exc:
shutil.rmtree(tmp_dir)
raise # re-raise
else:
return exts # seems to be available, compile in regular way
shutil.rmtree(tmp_dir)
return ret
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add(self, obj):
""" Adds a member to the collection. :param obj: Object to add. Example: """
|
activity = {
"verb": "add",
"object": {
"objectType": obj.object_type,
"id": obj.id
},
"target": {
"objectType": self.object_type,
"id": self.id
}
}
self._post_activity(activity)
# Remove the cash so it's re-generated next time it's needed
self._members = None
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def remove(self, obj):
""" Removes a member from the collection. :param obj: Object to remove. Example: """
|
activity = {
"verb": "remove",
"object": {
"objectType": obj.object_type,
"id": obj.id
},
"target": {
"objectType": self.object_type,
"id": self.id
}
}
self._post_activity(activity)
# Remove the cash so it's re-generated next time it's needed
self._members = None
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _post_activity(self, activity, unserialize=True):
""" Posts a activity to feed """
|
# I think we always want to post to feed
feed_url = "{proto}://{server}/api/user/{username}/feed".format(
proto=self._pump.protocol,
server=self._pump.client.server,
username=self._pump.client.nickname
)
data = self._pump.request(feed_url, method="POST", data=activity)
if not data:
return False
if "error" in data:
raise PumpException(data["error"])
if unserialize:
if "target" in data:
# we probably want to unserialize target if it's there
# true for collection.{add,remove}
self.unserialize(data["target"])
else:
# copy activity attributes into object
if "author" not in data["object"]:
data["object"]["author"] = data["actor"]
for key in ["to", "cc", "bto", "bcc"]:
if key not in data["object"] and key in data:
data["object"][key] = data[key]
self.unserialize(data["object"])
return True
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _add_links(self, links, key="href", proxy_key="proxyURL", endpoints=None):
""" Parses and adds block of links """
|
if endpoints is None:
endpoints = ["likes", "replies", "shares", "self", "followers",
"following", "lists", "favorites", "members"]
if links.get("links"):
for endpoint in links['links']:
# It would seem occasionally the links["links"][endpoint] is
# just a string (what would be the href value). I don't know
# why, it's likely a bug in pump.io but for now we'll support
# this too.
if isinstance(links['links'][endpoint], dict):
self._add_link(endpoint, links['links'][endpoint]["href"])
else:
self._add_link(endpoint, links["links"][endpoint])
for endpoint in endpoints:
if links.get(endpoint, None) is None:
continue
if "pump_io" in links[endpoint]:
self._add_link(endpoint, links[endpoint]["pump_io"][proxy_key])
elif "url" in links[endpoint]:
self._add_link(endpoint, links[endpoint]["url"])
else:
self._add_link(endpoint, links[endpoint][key])
return self.links
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _set_people(self, people):
""" Sets who the object is sent to """
|
if hasattr(people, "object_type"):
people = [people]
elif hasattr(people, "__iter__"):
people = list(people)
return people
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def from_file(self, filename):
""" Uploads a file from a filename on your system. :param filename: Path to file on your system. Example: """
|
mimetype = mimetypes.guess_type(filename)[0] or "application/octal-stream"
headers = {
"Content-Type": mimetype,
"Content-Length": str(os.path.getsize(filename)),
}
# upload file
file_data = self._pump.request(
"/api/user/{0}/uploads".format(self._pump.client.nickname),
method="POST",
data=open(filename, "rb").read(),
headers=headers,
)
# now post it to the feed
data = {
"verb": "post",
"object": file_data,
}
data.update(self.serialize())
if not self.content and not self.display_name and not self.license:
self._post_activity(data)
else:
self._post_activity(data, unserialize=False)
# update post with display_name and content
if self.content:
file_data['content'] = self.content
if self.display_name:
file_data['displayName'] = self.display_name
if self.license:
file_data['license'] = self.license
data = {
"verb": "update",
"object": file_data,
}
self._post_activity(data)
return self
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def unserialize(self, data):
""" From JSON -> Activity object """
|
# copy activity attributes into object
if "author" not in data["object"]:
data["object"]["author"] = data["actor"]
for key in ["to", "cc", "bto", "bcc"]:
if key not in data["object"] and key in data:
data["object"][key] = data[key]
Mapper(pypump=self._pump).parse_map(self, data=data)
self._add_links(data)
return self
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create_store(self):
""" Creates store object """
|
if self.store_class is not None:
return self.store_class.load(self.client.webfinger, self)
raise NotImplementedError("You need to specify PyPump.store_class or override PyPump.create_store method.")
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _build_url(self, endpoint):
""" Returns a fully qualified URL """
|
server = None
if "://" in endpoint:
# looks like an url, let's break it down
server, endpoint = self._deconstruct_url(endpoint)
endpoint = endpoint.lstrip("/")
url = "{proto}://{server}/{endpoint}".format(
proto=self.protocol,
server=self.client.server if server is None else server,
endpoint=endpoint,
)
return url
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _deconstruct_url(self, url):
""" Breaks down URL and returns server and endpoint """
|
url = url.split("://", 1)[-1]
server, endpoint = url.split("/", 1)
return (server, endpoint)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _add_client(self, url, key=None, secret=None):
""" Creates Client object with key and secret for server and adds it to _server_cache if it doesnt already exist """
|
if "://" in url:
server, endpoint = self._deconstruct_url(url)
else:
server = url
if server not in self._server_cache:
if not (key and secret):
client = Client(
webfinger=self.client.webfinger,
name=self.client.name,
type=self.client.type,
)
client.set_pump(self)
client.register(server)
else:
client = Client(
webfinger=self.client.webfinger,
key=key,
secret=secret,
type=self.client.type,
name=self.client.name,
)
client.set_pump(self)
self._server_cache[server] = client
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def request(self, endpoint, method="GET", data="", raw=False, params=None, retries=None, client=None, headers=None, timeout=None, **kwargs):
""" Make request to endpoint with OAuth. Returns dictionary with response data. :param endpoint: endpoint path, or a fully qualified URL if raw=True. :param method: GET (default), POST or DELETE. :param data: data to send in the request body. :param raw: use endpoint as entered without trying to modify it. :param params: dictionary of parameters to send in the query string. :param retries: number of times to retry if a request fails. :param client: OAuth client data, if False do request without OAuth. :param headers: dictionary of HTTP headers. :param timeout: the timeout for a request, in seconds. Example: {u'displayName': u'Evan Prodromou', u'favorites': {u'totalItems': 7227, u'url': u'https://e14n.com/api/user/evan/favorites'}, u'id': u'acct:evan@e14n.com', u'image': {u'height': 96, u'url': u'https://e14n.com/uploads/evan/2014/9/24/knyf1g_thumb.jpg', u'width': 96}, u'liked': False, u'location': {u'displayName': u'Montreal, Quebec, Canada', u'objectType': u'place'}, u'objectType': u'person', u'preferredUsername': u'evan', u'published': u'2013-02-20T15:34:52Z', u'summary': u'I wanna make it with you. http://payb.tc/evanp', u'updated': u'2014-09-24T02:38:32Z', u'url': u'https://e14n.com/evan'} """
|
retries = self.retries if retries is None else retries
timeout = self.timeout if timeout is None else timeout
# check client has been setup
if client is None:
client = self.setup_oauth_client(endpoint)
c = client.client
fnc = OAuth1Session(c.client_key,
client_secret=c.client_secret,
resource_owner_key=c.resource_owner_key,
resource_owner_secret=c.resource_owner_secret
)
elif client is False:
fnc = requests
params = {} if params is None else params
if data and isinstance(data, dict):
data = json.dumps(data)
if not raw:
url = self._build_url(endpoint)
else:
url = endpoint
headers = headers or {"Content-Type": "application/json"}
request = {
"headers": headers,
"params": params,
"timeout": timeout,
}
request.update(kwargs)
if method == "POST":
fnc = fnc.post
request.update({"data": data})
elif method == "PUT":
fnc = fnc.put
request.update({"data": data})
elif method == "GET":
fnc = fnc.get
elif method == "DELETE":
fnc = fnc.delete
for attempt in range(1 + retries):
response = self._requester(
fnc=fnc,
endpoint=endpoint,
raw=raw,
**request
)
if response.status_code == 200:
# huray!
return response.json()
if response.status_code == 400:
# can't do much
try:
try:
data = response.json()
error = data["error"]
except ValueError:
error = response.content
if not error:
raise IndexError # yesss i know.
except IndexError:
error = "400 - Bad request."
raise PyPumpException(error)
if response.ok:
return response
error = "Request Failed to {url} (response: {data} | status: {status})"
error = error.format(
url=url,
data=response.content,
status=response.status_code
)
raise PyPumpException(error)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def oauth_request(self):
""" Makes a oauth connection """
|
# get tokens from server and make a dict of them.
self._server_tokens = self.request_token()
self.store["oauth-request-token"] = self._server_tokens["token"]
self.store["oauth-request-secret"] = self._server_tokens["token_secret"]
# now we need the user to authorize me to use their pump.io account
result = self.verifier_callback(self.construct_oauth_url())
if result is not None:
self.verifier(result)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def construct_oauth_url(self):
""" Constructs verifier OAuth URL """
|
response = self._requester(requests.head,
"{0}://{1}/".format(self.protocol, self.client.server),
allow_redirects=False
)
if response.is_redirect:
server = response.headers['location']
else:
server = response.url
path = "oauth/authorize?oauth_token={token}".format(
token=self.store["oauth-request-token"]
)
return "{server}{path}".format(
server=server,
path=path
)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def setup_oauth_client(self, url=None):
""" Sets up client for requests to pump """
|
if url and "://" in url:
server, endpoint = self._deconstruct_url(url)
else:
server = self.client.server
if server not in self._server_cache:
self._add_client(server)
if server == self.client.server:
self.oauth = OAuth1(
client_key=self.store["client-key"],
client_secret=self.store["client-secret"],
resource_owner_key=self.store["oauth-access-token"],
resource_owner_secret=self.store["oauth-access-secret"],
)
return self.oauth
else:
return OAuth1(
client_key=self._server_cache[server].key,
client_secret=self._server_cache[server].secret,
)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def request_token(self):
""" Gets OAuth request token """
|
client = OAuth1(
client_key=self._server_cache[self.client.server].key,
client_secret=self._server_cache[self.client.server].secret,
callback_uri=self.callback,
)
request = {"auth": client}
response = self._requester(
requests.post,
"oauth/request_token",
**request
)
data = parse.parse_qs(response.text)
data = {
'token': data[self.PARAM_TOKEN][0],
'token_secret': data[self.PARAM_TOKEN_SECRET][0]
}
return data
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def request_access(self, verifier):
""" Get OAuth access token so we can make requests """
|
client = OAuth1(
client_key=self._server_cache[self.client.server].key,
client_secret=self._server_cache[self.client.server].secret,
resource_owner_key=self.store["oauth-request-token"],
resource_owner_secret=self.store["oauth-request-secret"],
verifier=verifier,
)
request = {"auth": client}
response = self._requester(
requests.post,
"oauth/access_token",
**request
)
data = parse.parse_qs(response.text)
self.store["oauth-access-token"] = data[self.PARAM_TOKEN][0]
self.store["oauth-access-secret"] = data[self.PARAM_TOKEN_SECRET][0]
self._server_tokens = {}
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def logged_in(self):
""" Return boolean if is logged in """
|
if "oauth-access-token" not in self.store:
return False
response = self.request("/api/whoami", allow_redirects=False)
# It should response with a redirect to our profile if it's logged in
if response.status_code != 302:
return False
# the location should be the profile we have
if response.headers["location"] != self.me.links["self"]:
return False
return True
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cudnnCreate():
""" Initialize cuDNN. Initializes cuDNN and returns a handle to the cuDNN context. Returns ------- handle : cudnnHandle cuDNN context """
|
handle = ctypes.c_void_p()
status = _libcudnn.cudnnCreate(ctypes.byref(handle))
cudnnCheckStatus(status)
return handle.value
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cudnnDestroy(handle):
""" Release cuDNN resources. Release hardware resources used by cuDNN. Parameters handle : cudnnHandle cuDNN context. """
|
status = _libcudnn.cudnnDestroy(ctypes.c_void_p(handle))
cudnnCheckStatus(status)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cudnnSetStream(handle, id):
""" Set current cuDNN library stream. Parameters handle : cudnnHandle cuDNN context. id : cudaStream Stream Id. """
|
status = _libcudnn.cudnnSetStream(handle, id)
cudnnCheckStatus(status)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cudnnGetStream(handle):
""" Get current cuDNN library stream. Parameters handle : int cuDNN context. Returns ------- id : int Stream ID. """
|
id = ctypes.c_void_p()
status = _libcudnn.cudnnGetStream(handle, ctypes.byref(id))
cudnnCheckStatus(status)
return id.value
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cudnnCreateTensorDescriptor():
""" Create a Tensor descriptor object. Allocates a cudnnTensorDescriptor_t structure and returns a pointer to it. Returns ------- tensor_descriptor : int Tensor descriptor. """
|
tensor = ctypes.c_void_p()
status = _libcudnn.cudnnCreateTensorDescriptor(ctypes.byref(tensor))
cudnnCheckStatus(status)
return tensor.value
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cudnnSetTensor4dDescriptor(tensorDesc, format, dataType, n, c, h, w):
""" Initialize a previously created Tensor 4D object. This function initializes a previously created Tensor4D descriptor object. The strides of the four dimensions are inferred from the format parameter and set in such a way that the data is contiguous in memory with no padding between dimensions. Parameters tensorDesc : cudnnTensorDescriptor Handle to a previously created tensor descriptor. format : cudnnTensorFormat Type of format. dataType : cudnnDataType Data type. n : int Number of images. c : int Number of feature maps per image. h : int Height of each feature map. w : int Width of each feature map. """
|
status = _libcudnn.cudnnSetTensor4dDescriptor(tensorDesc, format, dataType,
n, c, h, w)
cudnnCheckStatus(status)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cudnnSetTensor4dDescriptorEx(tensorDesc, dataType, n, c, h, w, nStride, cStride, hStride, wStride):
"""" Initialize a Tensor descriptor object with strides. This function initializes a previously created generic Tensor descriptor object into a 4D tensor, similarly to cudnnSetTensor4dDescriptor but with the strides explicitly passed as parameters. This can be used to lay out the 4D tensor in any order or simply to define gaps between dimensions. Parameters tensorDesc : cudnnTensorDescriptor_t Handle to a previously created tensor descriptor. dataType : cudnnDataType Data type. n : int Number of images. c : int Number of feature maps per image. h : int Height of each feature map. w : int Width of each feature map. nStride : int Stride between two consective images. cStride : int Stride between two consecutive feature maps. hStride : int Stride between two consecutive rows. wStride : int Stride between two consecutive columns. """
|
status = _libcudnn.cudnnSetTensor4dDescriptorEx(tensorDesc, dataType, n, c, h, w,
nStride, cStride, hStride, wStride)
cudnnCheckStatus(status)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cudnnGetTensor4dDescriptor(tensorDesc):
"""" Get parameters of a Tensor descriptor object. This function queries the parameters of the previouly initialized Tensor4D descriptor object. Parameters tensorDesc : cudnnTensorDescriptor Handle to a previously initialized tensor descriptor. Returns ------- dataType : cudnnDataType Data type. n : int Number of images. c : int Number of feature maps per image. h : int Height of each feature map. w : int Width of each feature map. nStride : int Stride between two consective images. cStride : int Stride between two consecutive feature maps. hStride : int Stride between two consecutive rows. wStride : int Stride between two consecutive columns. """
|
dataType = ctypes.c_int()
n = ctypes.c_int()
c = ctypes.c_int()
h = ctypes.c_int()
w = ctypes.c_int()
nStride = ctypes.c_int()
cStride = ctypes.c_int()
hStride = ctypes.c_int()
wStride = ctypes.c_int()
status = _libcudnn.cudnnGetTensor4dDescriptor(tensorDesc, ctypes.byref(dataType), ctypes.byref(n),
ctypes.byref(c), ctypes.byref(h), ctypes.byref(w),
ctypes.byref(nStride), ctypes.byref(cStride),
ctypes.byref(hStride), ctypes.byref(wStride))
cudnnCheckStatus(status)
return dataType.value, n.value, c.value, h.value, w.value, nStride.value, cStride.value, \
hStride.value, wStride.value
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cudnnCreateFilterDescriptor():
"""" Create a filter descriptor. This function creates a filter descriptor object by allocating the memory needed to hold its opaque structure. Parameters Returns ------- wDesc : cudnnFilterDescriptor Handle to a newly allocated filter descriptor. """
|
wDesc = ctypes.c_void_p()
status = _libcudnn.cudnnCreateFilterDescriptor(ctypes.byref(wDesc))
cudnnCheckStatus(status)
return wDesc.value
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cudnnSetFilter4dDescriptor(wDesc, dataType, format, k, c, h, w):
"""" Initialize a filter descriptor. This function initializes a previously created filter descriptor object into a 4D filter. Filters layout must be contiguous in memory. Parameters wDesc : cudnnFilterDescriptor Handle to a previously created filter descriptor. dataType : cudnnDataType Data type. format: cudnnTensorFormat Tensor format k : int Number of output feature maps. c : int Number of input feature maps. h : int Height of each filter. w : int Width of each filter. """
|
status = _libcudnn.cudnnSetFilter4dDescriptor(wDesc, dataType, format, k, c, h, w)
cudnnCheckStatus(status)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cudnnGetFilter4dDescriptor(wDesc):
"""" Get parameters of filter descriptor. This function queries the parameters of the previouly initialized filter descriptor object. Parameters wDesc : cudnnFilterDescriptor Handle to a previously created filter descriptor. Returns ------- dataType : cudnnDataType Data type. format: cudnnTensorFormat Tensor format k : int Number of output feature maps. c : int Number of input feature maps. h : int Height of each filter. w : int Width of each filter. """
|
dataType = ctypes.c_int()
format = ctypes.c_int()
k = ctypes.c_int()
c = ctypes.c_int()
h = ctypes.c_int()
w = ctypes.c_int()
status = _libcudnn.cudnnGetFilter4dDescriptor(wDesc, ctypes.byref(dataType),
ctypes.byref(format),
ctypes.byref(k), ctypes.byref(c),
ctypes.byref(h), ctypes.byref(w))
cudnnCheckStatus(status)
return dataType.value, format.value, k.value, c.value, h.value, w.value
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cudnnCreateConvolutionDescriptor():
"""" Create a convolution descriptor. This function creates a convolution descriptor object by allocating the memory needed to hold its opaque structure. Returns ------- convDesc : cudnnConvolutionDescriptor Handle to newly allocated convolution descriptor. """
|
convDesc = ctypes.c_void_p()
status = _libcudnn.cudnnCreateConvolutionDescriptor(ctypes.byref(convDesc))
cudnnCheckStatus(status)
return convDesc.value
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cudnnSetConvolution2dDescriptor(convDesc, pad_h, pad_w, u, v, dilation_h, dilation_w, mode, computeType):
"""" Initialize a convolution descriptor. This function initializes a previously created convolution descriptor object into a 2D correlation. This function assumes that the tensor and filter descriptors corresponds to the formard convolution path and checks if their settings are valid. That same convolution descriptor can be reused in the backward path provided it corresponds to the same layer. Parameters convDesc : cudnnConvolutionDescriptor Handle to a previously created convolution descriptor. pad_h : int zero-padding height: number of rows of zeros implicitly concatenated onto the top and onto the bottom of input images. pad_w : int zero-padding width: number of columns of zeros implicitly concatenated onto the left and onto the right of input images. u : int Vertical filter stride. v : int Horizontal filter stride. dilation_h : int Filter height dilation. dilation_w : int Filter width dilation. mode : cudnnConvolutionMode Select between CUDNN_CONVOLUTION or CUDNN_CROSS_CORRELATION. computeType : cudnnDataType Compute precision """
|
status = _libcudnn.cudnnSetConvolution2dDescriptor(convDesc, pad_h, pad_w, u, v,
dilation_h, dilation_w, mode,
computeType)
cudnnCheckStatus(status)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cudnnGetConvolution2dDescriptor(convDesc):
"""" Get a convolution descriptor. This function queries a previously initialized 2D convolution descriptor object. Parameters convDesc : cudnnConvolutionDescriptor Handle to a previously created convolution descriptor. Returns ------- pad_h : int zero-padding height: number of rows of zeros implicitly concatenated onto the top and onto the bottom of input images. pad_w : int zero-padding width: number of columns of zeros implicitly concatenated onto the left and onto the right of input images. u : int Vertical filter stride. v : int Horizontal filter stride. dilation_h : int Filter height dilation. dilation_w : int Filter width dilation. mode : cudnnConvolutionMode Either CUDNN_CONVOLUTION or CUDNN_CROSS_CORRELATION. computeType : cudnnDataType Compute precision """
|
pad_h = ctypes.c_int()
pad_w = ctypes.c_int()
u = ctypes.c_int()
v = ctypes.c_int()
dilation_h = ctypes.c_int()
dilation_w = ctypes.c_int()
mode = ctypes.c_int()
computeType = ctypes.c_int()
status = _libcudnn.cudnnGetConvolution2dDescriptor(convDesc, ctypes.byref(pad_h),
ctypes.byref(pad_w), ctypes.byref(u),
ctypes.byref(v), ctypes.byref(dilation_h),
ctypes.byref(dilation_w),
ctypes.byref(mode), ctypes.byref(computeType))
cudnnCheckStatus(status)
return (pad_h.value, pad_w.value, u.value, v.value, upscalex.value, upscaley.value, mode.value,
computeType.value)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cudnnGetConvolution2dForwardOutputDim(convDesc, inputTensorDesc, wDesc):
"""" Return the dimensions of the output tensor given a convolution descriptor. This function returns the dimensions of the resulting 4D tensor of a 2D convolution, given the convolution descriptor, the input tensor descriptor and the filter descriptor. This function can help to setup the output tensor and allocate the proper amount of memory prior to launching the actual convolution. Parameters convDesc : cudnnConvolutionDescriptor Handle to a previously created convolution descriptor. inputTensorDesc: cudnnTensorDescriptor Handle to a previously initialized tensor descriptor. wDesc: cudnnFilterDescriptor Handle to a previously initialized filter descriptor. Returns ------- n : int Number of output images. c : int Number of output feature maps per image. h : int Height of each output feature map. w : int Width of each output feature map. """
|
n = ctypes.c_int()
c = ctypes.c_int()
h = ctypes.c_int()
w = ctypes.c_int()
status = _libcudnn.cudnnGetConvolution2dForwardOutputDim(convDesc, inputTensorDesc,
wDesc, ctypes.byref(n),
ctypes.byref(c), ctypes.byref(h),
ctypes.byref(w))
cudnnCheckStatus(status)
return n.value, c.value, h.value, w.value
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cudnnGetConvolutionForwardAlgorithm(handle, srcDesc, wDesc, convDesc, destDesc, preference, memoryLimitInbytes):
"""" This function returns the best algorithm to choose for the forward convolution depending on the critera expressed in the cudnnConvolutionFwdPreference_t enumerant. Parameters handle : cudnnHandle Handle to a previously created cuDNN context. srcDesc : cudnnTensorDescriptor Handle to a previously initialized tensor descriptor. wDesc : cudnnFilterDescriptor Handle to a previously initialized filter descriptor. convDesc : cudnnConvolutionDescriptor Previously initialized convolution descriptor. destDesc : cudnnTensorDescriptor Handle to a previously initialized tensor descriptor. preference : cudnnConvolutionFwdPreference Enumerant to express the preference criteria in terms of memory requirement and speed. memoryLimitInbytes: size_t The maximum amount of GPU memory the user is willing to use as a workspace when preference is CUDNN_CONVOLUTION_FWD_SPECIFY_WORKSPACE_LIMIT. Returns ------- algo: cudnnConvolutionFwdAlgo Enumerant that specifies which convolution algorithm should be used to compute the results according to the specified preference. """
|
algo = ctypes.c_int()
status = _libcudnn.cudnnGetConvolutionForwardAlgorithm(handle, srcDesc, wDesc,
convDesc, destDesc, preference,
ctypes.c_size_t(memoryLimitInbytes),
ctypes.byref(algo))
cudnnCheckStatus(status)
return algo
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cudnnGetConvolutionForwardWorkspaceSize(handle, srcDesc, wDesc, convDesc, destDesc, algo):
"""" This function returns the amount of GPU memory workspace the user needs to allocate to be able to call cudnnConvolutionForward with the specified algorithm. Parameters handle : cudnnHandle Handle to a previously created cuDNN context. srcDesc : cudnnTensorDescriptor Handle to a previously initialized tensor descriptor. wDesc : cudnnFilterDescriptor Handle to a previously initialized filter descriptor. convDesc : cudnnConvolutionDescriptor Previously initialized convolution descriptor. destDesc : cudnnTensorDescriptor Handle to a previously initialized tensor descriptor. algo : cudnnConvolutionFwdAlgo Enumerant that specifies the chosen convolution algorithm. Returns ------- sizeInBytes: c_size_t Amount of GPU memory needed as workspace to be able to execute a forward convolution with the sepcified algo. """
|
sizeInBytes = ctypes.c_size_t()
status = _libcudnn.cudnnGetConvolutionForwardWorkspaceSize(handle, srcDesc, wDesc,
convDesc, destDesc, algo,
ctypes.byref(sizeInBytes))
cudnnCheckStatus(status)
return sizeInBytes
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cudnnSoftmaxForward(handle, algorithm, mode, alpha, srcDesc, srcData, beta, destDesc, destData):
"""" This routing computes the softmax function Parameters handle : cudnnHandle Handle to a previously created cuDNN context. algorithm : cudnnSoftmaxAlgorithm Enumerant to specify the softmax algorithm. mode : cudnnSoftmaxMode Enumerant to specify the softmax mode. alpha: float Scaling factor with which every element of the input tensors is multiplied. srcDesc : cudnnTensorDescriptor Handle to the previously initialized input tensor descriptor. srcData : void_p Data pointer to GPU memory associated with the tensor descriptor srcDesc. beta: float Scaling factor which is applied on every element of the output tensor prior to adding the result of the activation Note that if beta is zero, the output is not read and can contain any uninitialized data (including Nan numbers). destDesc : cudnnTensorDescriptor Handle to the previously initialized output tensor descriptor. destData : void_p Data pointer to GPU memory associated with the output tensor descriptor destDesc. """
|
dataType = cudnnGetTensor4dDescriptor(destDesc)[0]
if dataType == cudnnDataType['CUDNN_DATA_DOUBLE']:
alphaRef = ctypes.byref(ctypes.c_double(alpha))
betaRef = ctypes.byref(ctypes.c_double(beta))
else:
alphaRef = ctypes.byref(ctypes.c_float(alpha))
betaRef = ctypes.byref(ctypes.c_float(beta))
status = _libcudnn.cudnnSoftmaxForward(handle, algorithm, mode, alphaRef,
srcDesc, srcData, betaRef,
destDesc, destData)
cudnnCheckStatus(status)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cudnnCreatePoolingDescriptor():
"""" Create pooling descriptor. This function creates a pooling descriptor object by allocating the memory needed to hold its opaque structure, Returns ------- poolingDesc : cudnnPoolingDescriptor Newly allocated pooling descriptor. """
|
poolingDesc = ctypes.c_void_p()
status = _libcudnn.cudnnCreatePoolingDescriptor(ctypes.byref(poolingDesc))
cudnnCheckStatus(status)
return poolingDesc.value
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cudnnSetPooling2dDescriptor(poolingDesc, mode, windowHeight, windowWidth, verticalPadding, horizontalPadding, verticalStride, horizontalStride):
"""" Initialize a 2D pooling descriptor. This function initializes a previously created pooling descriptor object. Parameters poolingDesc : cudnnPoolingDescriptor Handle to a previously created pooling descriptor. mode : cudnnPoolingMode Enumerant to specify the pooling mode. windowHeight : int Height of the pooling window. windowWidth : int Width of the pooling window. verticalPadding: int Size of vertical padding. horizontalPadding: int Size of horizontal padding. verticalStride : int Pooling vertical stride. horizontalStride : int Pooling horizontal stride. """
|
status = _libcudnn.cudnnSetPooling2dDescriptor(poolingDesc, mode, windowHeight,
windowWidth, verticalPadding, horizontalPadding,
verticalStride, horizontalStride)
cudnnCheckStatus(status)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cudnnGetPooling2dDescriptor(poolingDesc):
"""" This function queries a previously created pooling descriptor object. Parameters poolingDesc : cudnnPoolingDescriptor Handle to a previously created 2D pooling descriptor. Returns ------- mode : cudnnPoolingMode Enumerant to specify the pooling mode. windowHeight : int Height of the pooling window. windowWidth : int Width of the pooling window. verticalPadding: int Size of vertical padding. horizontalPadding: int Size of horizontal padding. verticalStride : int Pooling vertical stride. horizontalStride : int Pooling horizontal stride. """
|
mode = ctypes.c_int()
windowHeight = ctypes.c_int()
windowWidth = ctypes.c_int()
verticalPadding = ctypes.c_int()
horizontalPadding = ctypes.c_int()
verticalStride = ctypes.c_int()
horizontalStride = ctypes.c_int()
status = _libcudnn.cudnnGetPooling2dDescriptor(poolingDesc, ctypes.byref(mode), ctypes.byref(windowHeight),
ctypes.byref(windowWidth), ctypes.byref(verticalPadding),
ctypes.byref(horizontalPadding), ctypes.byref(verticalStride),
ctypes.byref(horizontalStride))
cudnnCheckStatus(status)
return mode.value, windowHeight.value, windowWidth.value, verticalStride.value, horizontalStride.value
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cudnnActivationBackward(handle, mode, alpha, srcDesc, srcData, srcDiffDesc, srcDiffData, destDesc, destData, beta, destDiffDesc, destDiffData):
"""" Gradient of activation function. This routine computes the gradient of a neuron activation function. In-place operation is allowed for this routine; i.e., srcData and destData pointers may be equal and srcDiffData and destDiffData pointers may be equal. However, this requires the corresponding tensor descriptors to be identical (particularly, the strides of the input and output must match for in-place operation to be allowed). Parameters handle : cudnnHandle Handle to a previously created cuDNN context. mode : cudnnActivationMode Enumerant to specify the activation mode. alpha: float Scaling factor with which every element of the input tensor is multiplied. srcDesc : cudnnTensorDescriptor Handle to the previously initialized input tensor descriptor. srcData : void_p Data pointer to GPU memory associated with the tensor descriptor srcDesc. srcDiffDesc : cudnnTensorDescriptor Handle to the previously initialized input differential tensor descriptor. srcDiffData : void_p Data pointer to GPU memory associated with the tensor descriptor srcDiffData. destDesc : cudnnTensorDescriptor Handle to the previously initialized output tensor descriptor. destData : void_p Data pointer to GPU memory associated with the output tensor descriptor destDesc. beta: float Scaling factor which is applied on every element of the output tensor prior to adding the result of the activation gradient. Note that if beta is zero, the output is not read and can contain any uninitialized data (including Nan numbers). destDiffDesc : cudnnTensorDescriptor Handle to the previously initialized output differential tensor descriptor. destDiffData : void_p Data pointer to GPU memory associated with the output tensor descriptor destDiffDesc. """
|
dataType = cudnnGetTensor4dDescriptor(destDesc)[0]
if dataType == cudnnDataType['CUDNN_DATA_DOUBLE']:
alphaRef = ctypes.byref(ctypes.c_double(alpha))
betaRef = ctypes.byref(ctypes.c_double(beta))
else:
alphaRef = ctypes.byref(ctypes.c_float(alpha))
betaRef = ctypes.byref(ctypes.c_float(beta))
status = _libcudnn.cudnnActivationBackward(handle, mode, alphaRef, srcDesc, srcData,
srcDiffDesc, srcDiffData,
destDesc, destData, betaRef,
destDiffDesc, destDiffData)
cudnnCheckStatus(status)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def __prefix_key(self, key):
""" This will add the prefix to the key if one exists on the store """
|
# If there isn't a prefix don't bother
if self.prefix is None:
return key
# Don't prefix key if it already has it
if key.startswith(self.prefix + "-"):
return key
return "{0}-{1}".format(self.prefix, key)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def export(self):
""" Exports as dictionary """
|
data = {}
for key, value in self.items():
data[key] = value
return data
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def save(self):
""" Saves dictionary to disk in JSON format. """
|
if self.filename is None:
raise StoreException("Filename must be set to write store to disk")
# We need an atomic way of re-writing the settings, we also need to
# prevent only overwriting part of the settings file (see bug #116).
# Create a temp file and only then re-name it to the config
filename = "{filename}.{date}.tmp".format(
filename=self.filename,
date=datetime.datetime.utcnow().strftime('%Y-%m-%dT%H_%M_%S.%f')
)
# The `open` built-in doesn't allow us to set the mode
mode = stat.S_IRUSR | stat.S_IWUSR # 0600
fd = os.open(filename, os.O_WRONLY | os.O_CREAT, mode)
fout = os.fdopen(fd, "w")
fout.write(json.dumps(self.export()))
fout.close()
# Now we should remove the old config
if os.path.isfile(self.filename):
os.remove(self.filename)
# Now rename the temp file to the real config file
os.rename(filename, self.filename)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_filename(cls):
""" Gets filename of store on disk """
|
config_home = os.environ.get("XDG_CONFIG_HOME", "~/.config")
config_home = os.path.expanduser(config_home)
base_path = os.path.join(config_home, "PyPump")
if not os.path.isdir(base_path):
os.makedirs(base_path)
return os.path.join(base_path, "credentials.json")
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load(cls, webfinger, pypump):
""" Load JSON from disk into store object """
|
filename = cls.get_filename()
if os.path.isfile(filename):
data = open(filename).read()
data = json.loads(data)
store = cls(data, filename=filename)
else:
store = cls(filename=filename)
store.prefix = webfinger
return store
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def pause(message='Press any key to continue . . . '):
""" Prints the specified message if it's not None and waits for a keypress. """
|
if message is not None:
print(message, end='')
sys.stdout.flush()
getch()
print()
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def covalent_bonds(atoms, threshold=1.1):
"""Returns all the covalent bonds in a list of `Atom` pairs. Notes ----- Uses information `element_data`, which can be accessed directly through this module i.e. `isambard.ampal.interactions.element_data`. Parameters atoms : [(`Atom`, `Atom`)] List of pairs of `Atoms`. threshold : float, optional Allows deviation from ideal covalent bond distance to be included. For example, a value of 1.1 would allow interactions up to 10% further from the ideal distance to be included. """
|
bonds=[]
for a, b in atoms:
bond_distance=(
element_data[a.element.title()]['atomic radius'] + element_data[
b.element.title()]['atomic radius']) / 100
dist=distance(a._vector, b._vector)
if dist <= bond_distance * threshold:
bonds.append(CovalentBond(a, b, dist))
return bonds
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def find_covalent_bonds(ampal, max_range=2.2, threshold=1.1, tag=True):
"""Finds all covalent bonds in the AMPAL object. Parameters ampal : AMPAL Object Any AMPAL object with a `get_atoms` method. max_range : float, optional Used to define the sector size, so interactions at longer ranges will not be found. threshold : float, optional Allows deviation from ideal covalent bond distance to be included. For example, a value of 1.1 would allow interactions up to 10% further from the ideal distance to be included. tag : bool, optional If `True`, will add the covalent bond to the tags dictionary of each `Atom` involved in the interaction under the `covalent_bonds` key. """
|
sectors=gen_sectors(ampal.get_atoms(), max_range * 1.1)
bonds=[]
for sector in sectors.values():
atoms=itertools.combinations(sector, 2)
bonds.extend(covalent_bonds(atoms, threshold=threshold))
bond_set=list(set(bonds))
if tag:
for bond in bond_set:
a, b=bond.a, bond.b
if 'covalent_bonds' not in a.tags:
a.tags['covalent_bonds']=[b]
else:
a.tags['covalent_bonds'].append(b)
if 'covalent_bonds' not in b.tags:
b.tags['covalent_bonds']=[a]
else:
b.tags['covalent_bonds'].append(a)
return bond_set
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def generate_covalent_bond_graph(covalent_bonds):
"""Generates a graph of the covalent bond network described by the interactions. Parameters covalent_bonds: [CovalentBond] List of `CovalentBond`. Returns ------- bond_graph: networkx.Graph A graph of the covalent bond network. """
|
bond_graph=networkx.Graph()
for inter in covalent_bonds:
bond_graph.add_edge(inter.a, inter.b)
return bond_graph
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def generate_bond_subgraphs_from_break(bond_graph, atom1, atom2):
"""Splits the bond graph between two atoms to producing subgraphs. Notes ----- This will not work if there are cycles in the bond graph. Parameters bond_graph: networkx.Graph Graph of covalent bond network atom1: isambard.ampal.Atom First atom in the bond. atom2: isambard.ampal.Atom Second atom in the bond. Returns ------- subgraphs: [networkx.Graph] A list of subgraphs generated when a bond is broken in the covalent bond network. """
|
bond_graph.remove_edge(atom1, atom2)
try:
subgraphs=list(networkx.connected_component_subgraphs(
bond_graph, copy=False))
finally:
# Add edge
bond_graph.add_edge(atom1, atom2)
return subgraphs
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cap(v, l):
"""Shortens string is above certain length."""
|
s = str(v)
return s if len(s) <= l else s[-l:]
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def find_atoms_within_distance(atoms, cutoff_distance, point):
"""Returns atoms within the distance from the point. Parameters atoms : [ampal.atom] A list of `ampal.atoms`. cutoff_distance : float Maximum distance from point. point : (float, float, float) Reference point, 3D coordinate. Returns ------- filtered_atoms : [ampal.atoms] `atoms` list filtered by distance. """
|
return [x for x in atoms if distance(x, point) <= cutoff_distance]
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def centre_of_atoms(atoms, mass_weighted=True):
""" Returns centre point of any list of atoms. Parameters atoms : list List of AMPAL atom objects. mass_weighted : bool, optional If True returns centre of mass, otherwise just geometric centre of points. Returns ------- centre_of_mass : numpy.array 3D coordinate for the centre of mass. """
|
points = [x._vector for x in atoms]
if mass_weighted:
masses = [x.mass for x in atoms]
else:
masses = []
return centre_of_mass(points=points, masses=masses)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def assign_force_field(self, ff, mol2=False):
"""Assigns force field parameters to Atoms in the AMPAL object. Parameters ff: BuffForceField The force field to be used for scoring. mol2: bool, optional If true, mol2 style labels will also be used. """
|
if hasattr(self, 'ligands'):
atoms = self.get_atoms(ligands=True, inc_alt_states=True)
else:
atoms = self.get_atoms(inc_alt_states=True)
for atom in atoms:
w_str = None
a_ff_id = None
if atom.element == 'H':
continue
elif atom.ampal_parent.mol_code in ff:
if atom.res_label in ff[atom.ampal_parent.mol_code]:
a_ff_id = (atom.ampal_parent.mol_code, atom.res_label)
elif atom.res_label in ff['WLD']:
a_ff_id = ('WLD', atom.res_label)
else:
w_str = ('{} atom is not parameterised in the selected '
'force field for {} residues, this will be '
'ignored.').format(
atom.res_label, atom.ampal_parent.mol_code)
elif atom.res_label in ff['WLD']:
a_ff_id = ('WLD', atom.res_label)
elif mol2 and (atom.ampal_parent.mol_code.capitalize() in ff['MOL2']):
a_ff_id = ('MOL2', atom.res_label.capitalize())
else:
if not mol2:
w_str = ('{} ({}) atom is not parameterised in the selected'
' residue force field. Try activating the heavy '
' atom force field (haff).').format(
atom.element, atom.res_label)
else:
w_str = ('{} ({}) atom is not parameterised in the selected'
' force field.').format(atom.element, atom.res_label)
if w_str:
warnings.warn(w_str, NotParameterisedWarning)
atom._ff_id = a_ff_id
self.tags['assigned_ff'] = True
return
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def update_ff(self, ff, mol2=False, force_ff_assign=False):
"""Manages assigning the force field parameters. The aim of this method is to avoid unnecessary assignment of the force field. Parameters ff: BuffForceField The force field to be used for scoring. mol2: bool, optional If true, mol2 style labels will also be used. force_ff_assign: bool, optional If true, the force field will be completely reassigned, ignoring the cached parameters. """
|
aff = False
if force_ff_assign:
aff = True
elif 'assigned_ff' not in self.tags:
aff = True
elif not self.tags['assigned_ff']:
aff = True
if aff:
self.assign_force_field(ff, mol2=mol2)
return
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_internal_energy(self, assign_ff=True, ff=None, mol2=False, force_ff_assign=False):
"""Calculates the internal energy of the AMPAL object. This method is assigned to the buff_internal_energy property, using the default arguments. Parameters assign_ff: bool, optional If true the force field will be updated if required. ff: BuffForceField, optional The force field to be used for scoring. mol2: bool, optional If true, mol2 style labels will also be used. force_ff_assign: bool, optional If true, the force field will be completely reassigned, ignoring the cached parameters. Returns ------- BUFF_score: BUFFScore A BUFFScore object with information about each of the interactions and the atoms involved. """
|
if not ff:
ff = global_settings['buff']['force_field']
if assign_ff:
self.update_ff(ff, mol2=mol2, force_ff_assign=force_ff_assign)
interactions = find_intra_ampal(self, ff.distance_cutoff)
buff_score = score_interactions(interactions, ff)
return buff_score
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def rotate(self, angle, axis, point=None, radians=False, inc_alt_states=True):
"""Rotates every atom in the AMPAL object. Parameters angle : float Angle that AMPAL object will be rotated. axis : 3D Vector (tuple, list, numpy.array) Axis about which the AMPAL object will be rotated. point : 3D Vector (tuple, list, numpy.array), optional Point that the axis lies upon. If `None` then the origin is used. radians : bool, optional True is `angle` is define in radians, False is degrees. inc_alt_states : bool, optional If true, will rotate atoms in all states i.e. includes alternate conformations for sidechains. """
|
q = Quaternion.angle_and_axis(angle=angle, axis=axis, radians=radians)
for atom in self.get_atoms(inc_alt_states=inc_alt_states):
atom._vector = q.rotate_vector(v=atom._vector, point=point)
return
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def translate(self, vector, inc_alt_states=True):
"""Translates every atom in the AMPAL object. Parameters vector : 3D Vector (tuple, list, numpy.array) Vector used for translation. inc_alt_states : bool, optional If true, will rotate atoms in all states i.e. includes alternate conformations for sidechains. """
|
vector = numpy.array(vector)
for atom in self.get_atoms(inc_alt_states=inc_alt_states):
atom._vector += vector
return
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def rmsd(self, other, backbone=False):
"""Calculates the RMSD between two AMPAL objects. Notes ----- No fitting operation is performs and both AMPAL objects must have the same number of atoms. Parameters other : AMPAL Object Any AMPAL object with `get_atoms` method. backbone : bool, optional Calculates RMSD of backbone only. """
|
assert type(self) == type(other)
if backbone and hasattr(self, 'backbone'):
points1 = self.backbone.get_atoms()
points2 = other.backbone.get_atoms()
else:
points1 = self.get_atoms()
points2 = other.get_atoms()
points1 = [x._vector for x in points1]
points2 = [x._vector for x in points2]
return rmsd(points1=points1, points2=points2)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def append(self, item):
"""Appends a `Monomer to the `Polymer`. Notes ----- Does not update labelling. """
|
if isinstance(item, Monomer):
self._monomers.append(item)
else:
raise TypeError(
'Only Monomer objects can be appended to an Polymer.')
return
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def extend(self, polymer):
"""Extends the `Polymer` with the contents of another `Polymer`. Notes ----- Does not update labelling. """
|
if isinstance(polymer, Polymer):
self._monomers.extend(polymer)
else:
raise TypeError(
'Only Polymer objects may be merged with a Polymer using unary operator "+".')
return
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_monomers(self, ligands=True):
"""Retrieves all the `Monomers` from the AMPAL object. Parameters ligands : bool, optional If true, will include ligand `Monomers`. """
|
if ligands and self.ligands:
monomers = self._monomers + self.ligands._monomers
else:
monomers = self._monomers
return iter(monomers)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_atoms(self, ligands=True, inc_alt_states=False):
"""Flat list of all the Atoms in the Polymer. Parameters inc_alt_states : bool If true atoms from alternate conformations are included rather than only the "active" states. Returns ------- atoms : itertools.chain Returns an iterator of all the atoms. Convert to list if you require indexing. """
|
if ligands and self.ligands:
monomers = self._monomers + self.ligands._monomers
else:
monomers = self._monomers
atoms = itertools.chain(
*(list(m.get_atoms(inc_alt_states=inc_alt_states)) for m in monomers))
return atoms
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def relabel_monomers(self, labels=None):
"""Relabels the either in numerically or using a list of labels. Parameters labels : list, optional A list of new labels. Raises ------ ValueError Raised if the number of labels does not match the number of component Monoer objects. """
|
if labels:
if len(self._monomers) == len(labels):
for monomer, label in zip(self._monomers, labels):
monomer.id = str(label)
else:
error_string = (
'Number of Monomers ({}) and number of labels '
'({}) must be equal.')
raise ValueError(error_string.format(
len(self._monomers), len(labels)))
else:
for i, monomer in enumerate(self._monomers):
monomer.id = str(i + 1)
return
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def relabel_atoms(self, start=1):
"""Relabels all `Atoms` in numerical order. Parameters start : int, optional Offset the labelling by `start` residues. """
|
counter = start
for atom in self.get_atoms():
atom.id = counter
counter += 1
return
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def make_pdb(self, alt_states=False, inc_ligands=True):
"""Generates a PDB string for the `Polymer`. Parameters alt_states : bool, optional Include alternate conformations for `Monomers` in PDB. inc_ligands : bool, optional Includes `Ligands` in PDB. Returns ------- pdb_str : str String of the pdb for the `Polymer`. Generated using information from the component `Monomers`. """
|
if any([False if x.id else True for x in self._monomers]):
self.relabel_monomers()
if self.ligands and inc_ligands:
monomers = self._monomers + self.ligands._monomers
else:
monomers = self._monomers
pdb_str = write_pdb(monomers, self.id, alt_states=alt_states)
return pdb_str
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def rotate(self, angle, axis, point=None, radians=False):
"""Rotates `Atom` by `angle`. Parameters angle : float Angle that `Atom` will be rotated. axis : 3D Vector (tuple, list, numpy.array) Axis about which the `Atom` will be rotated. point : 3D Vector (tuple, list, numpy.array), optional Point that the `axis` lies upon. If `None` then the origin is used. radians : bool, optional True is `angle` is define in radians, False is degrees. """
|
q = Quaternion.angle_and_axis(angle=angle, axis=axis, radians=radians)
self._vector = q.rotate_vector(v=self._vector, point=point)
return
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def dict_from_mmcif(mmcif, path=True):
"""Parse mmcif file into a dictionary. Notes ----- Full list of keys/value types, and further information on them can be viewed here: http://mmcif.wwpdb.org/docs/pdb_to_pdbx_correspondences.html All values in the returned dict are str or list(str). This means that some of the data values are string representations of integers - parse these outside of this function if desired. An alternative approach to this can be found in Biopython (via the function Bio.PDB.MMCIF2Dict.MMCIF2Dict). mmcif files are subject to the usual "here be dragons" problems of the PDB and difficult file formats. As such, this function is likely to be in a permanent state of flux as more dragons are found. Parameters mmcif : str mmcif string or a path to an mmcif file. path : bool True if mmcif is a path. Returns ------- cif_data : dict Keys are cif data names, e.g. '_struct_keywords.text'. Values are str or list(str). """
|
if path:
with open(mmcif, 'r') as foo:
lines = foo.readlines()
else:
lines = mmcif.splitlines()
lines = [' '.join(x.strip().split()) for x in lines]
# Some of the data in a .cif files are stored between 'loop_' to initiate a loop, and '#' to terminate it.
# The variable 'loop' is a flag to keep track of this behaviour.
loop = False
# Set up the dictionary to populate as the lines of the .cif file are iterated over.
cif_data = {}
for i, line in enumerate(lines):
if not line:
continue
# hash signifies end of a loop. Ensure loop flag is set to False.
if line == '#':
loop = False
continue
if not loop:
# This line initiates a loop section, in which keys are listed first,
# followed by lines of data in which the values are listed in the same order as the above keys.
# The values in the loop section will be stored as lists - there are multiple values for one key.
# An example of this type of data is the 'REVDAT' section, which stores details on the (potentially
# numerous) various revisions made to the PDB file during its history.
if line[:5] == 'loop_':
loop = True
key_list = []
continue
# Lines beginning with '_' start with data names, i.e. keys in the cif_data dictionary.
elif line[0] == '_':
# If line consists only of a key, then subsequent lines may contain the associated value.
if len(line.split()) == 1:
current_key = line
count = 1
while True:
# Look forward until a key is found, keeping count of the number of lines in between.
try:
if lines[i + count][0] != '_':
count += 1
# prevent infinite loop.
elif i + count > len(lines):
break
else:
if count > 1:
try:
cif_data[current_key] = ' '.join(lines[i + 1: i + count])
except IndexError:
cif_data[current_key] = None
else:
cif_data[current_key] = None
break
except IndexError:
break
continue
# Simplest case. Line is a key-value pair, with the key identified by its first character, '_'.
elif len(line.split()) > 1:
line = line.split()
try:
cif_data[line[0]] = ' '.join(line[1:])
except IndexError:
cif_data[line[0]] = None
continue
# Line is one of multiple lines that are combined into a value in the while True: loop above.
else:
continue
else:
# Within a loop section, keys are identified by their first character '_'.
# Add them to the list of keys in the loop.
if line[0] == '_':
if len(line.split()) == 1:
key_list.append(line)
if line not in cif_data.keys():
cif_data[line] = []
# Within a loop section, the values are listed within a single space-separated line in the same order
# that the keys were listed at the start of the loop.
else:
# Cannot do a simple split if any of the values themselves are strings containing at least one space.
if '\"' in line and line.count('\"') % 2 == 0:
line_parts = [x.strip() for x in line.split('\"') if x]
line = []
for part in line_parts:
if line_parts.index(part) % 2 == 0:
for x in part.split():
line.append(x)
else:
line.append(part)
elif '\'' in line and line.count('\'') % 2 == 0:
line = [x.strip() for x in line.split('\'') if x]
elif len(key_list) == len(line.split()):
line = line.split()
if len(key_list) == len(line):
for j, v in enumerate(line):
cif_data[key_list[j]].append(line[j])
else:
# CURRENTLY THERE IS A PROBLEM WITH REALLY LONG LOOPS eg _pdbx_refine_tls*, _pdbx_struct_oper_list*
# The values span multiple lines, and therefore do not satisfy
# the condition of the above 'if' statement.
# A correction for this needs to keep track of the value count on subsequent lines,
# until the 'if' condition is met.
continue
return cif_data
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_protein_dict(cif_data):
""" Parse cif_data dict for a subset of its data. Notes ----- cif_data dict contains all the data from the .cif file, with values as strings. This function returns a more 'human readable' dictionary of key-value pairs. The keys have simpler (and still often more descriptive!) names, and the values are not restricted to being strings. To add more key-value pairs to the protein_dict, follow the patterns used in this function. Add the key and youre name for it to mmcif_data_names. Will it need further parsing, like with the dates in the function below? If the value is not a string, add it to a list of data-types at the end of the function. More information on what key-value pairs can be obtained can be gleaned by examining cif_data and/or by viewing the mmcif resource on the PDB website: http://mmcif.wwpdb.org/docs/pdb_to_pdbx_correspondences.html WARNING: Do not alter the keys of protein_dict without caution. The keys of protein_dict MUST match the column names of the Protein model in the protgraph database. Parameters cif_data : dict Key/value pairs taken directly from a .cif file. Output of the function dict_from_mmcif. Returns ------- protein_dict : dict A dictionary containing a parsed subset of the data in cif_data. The keys have the same name as fields in the Protein model. """
|
# Dictionary relating the keys of protein_dict (column names in Protein model) to the keys of cif_data.
mmcif_data_names = {
'keywords': '_struct_keywords.text',
'header': '_struct_keywords.pdbx_keywords',
'space_group': '_symmetry.space_group_name_H-M',
'experimental_method': '_exptl.method',
'crystal_growth': '_exptl_crystal_grow.pdbx_details',
'resolution': '_refine.ls_d_res_high',
'r_value_obs': '_refine.ls_R_factor_obs',
'atoms_protein': '_refine_hist.pdbx_number_atoms_protein',
'atoms_solvent': '_refine_hist.number_atoms_solvent',
'atoms_ligand': '_refine_hist.pdbx_number_atoms_ligand',
'atoms_nucleic_acid': '_refine_hist.pdbx_number_atoms_nucleic_acid',
'atoms_total': '_refine_hist.number_atoms_total',
'title': '_struct.title',
'pdb_descriptor': '_struct.pdbx_descriptor',
'model_details': '_struct.pdbx_model_details',
'casp_flag': '_struct.pdbx_CASP_flag',
'model_type_details': '_struct.pdbx_model_type_details',
'ncbi_taxonomy': '_entity_src_nat.pdbx_ncbi_taxonomy_id',
'ncbi_taxonomy_gene': '_entity_src_gen.pdbx_gene_src_ncbi_taxonomy_id',
'ncbi_taxonomy_host_org': '_entity_src_gen.pdbx_host_org_ncbi_taxonomy_id',
}
# Set up initial protein_dict.
protein_dict = {}
for column_name, cif_name in mmcif_data_names.items():
try:
data = cif_data[cif_name]
except IndexError:
data = None
except KeyError:
data = None
protein_dict[column_name] = data
# These entries are modified from the mmcif dictionary.
# There may be many revision dates in cif_data. We save the original deposition, release and last_modified dates.
# If there are many dates, they will be in a list in cif_data, otherwise it's one date in a string
# Is there a tidier way to do this?
if isinstance(cif_data['_database_PDB_rev.date_original'], str):
protein_dict['deposition_date'] = cif_data['_database_PDB_rev.date_original']
else:
protein_dict['deposition_date'] = cif_data['_database_PDB_rev.date_original'][0]
if isinstance(cif_data['_database_PDB_rev.date'], str):
protein_dict['release_date'] = cif_data['_database_PDB_rev.date']
protein_dict['last_modified_date'] = cif_data['_database_PDB_rev.date']
else:
protein_dict['release_date'] = cif_data['_database_PDB_rev.date'][0]
protein_dict['last_modified_date'] = cif_data['_database_PDB_rev.date'][-1]
# crystal_growth should be a string or None
crystal_growth = protein_dict['crystal_growth']
if type(crystal_growth) == list and len(crystal_growth) >= 1:
protein_dict['crystal_growth'] = crystal_growth[0]
else:
protein_dict['crystal_growth'] = None
# taxonomy data types should be ints, not lists
taxonomy_keys = ['ncbi_taxonomy', 'ncbi_taxonomy_gene', 'ncbi_taxonomy_host_org']
for taxonomy_key in taxonomy_keys:
if protein_dict[taxonomy_key]:
if type(protein_dict[taxonomy_key]) == list:
try:
protein_dict[taxonomy_key] = int(protein_dict[taxonomy_key][0])
except ValueError or IndexError:
protein_dict[taxonomy_key] = None
# Convert data types from strings to their correct data type.
ints = ['atoms_ligand', 'atoms_nucleic_acid', 'atoms_protein', 'atoms_solvent', 'atoms_total']
floats = ['r_value_obs', 'resolution']
dates = ['deposition_date', 'release_date', 'last_modified_date']
for k, v in protein_dict.items():
if v:
if v == '?' or v == 'None' or v == '.':
protein_dict[k] = None
elif k in ints:
protein_dict[k] = int(v)
elif k in floats:
protein_dict[k] = float(v)
elif k in dates:
protein_dict[k] = datetime.datetime.strptime(v, '%Y-%m-%d')
# Parse awkward strings from cif_data.
elif type(v) == str:
v = v.replace('loop_', '')
v = v.replace(' # ', '')
if v[0] == v[-1] == '\'':
protein_dict[k] = v[1:-1]
return protein_dict
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_PISCES_output(pisces_output, path=False):
""" Takes the output list of a PISCES cull and returns in a usable dictionary. Notes ----- Designed for outputs of protein sequence redundancy culls conducted using the PISCES server. http://dunbrack.fccc.edu/PISCES.php G. Wang and R. L. Dunbrack, Jr. PISCES: a protein sequence culling server. Bioinformatics, 19:1589-1591, 2003. Parameters pisces_output : str or path Output list of non-redundant protein chains from PISCES, or path to text file. path : bool True if path given rather than string. Returns ------- pisces_dict : dict Data output by PISCES in dictionary form. """
|
pisces_dict = {}
if path:
pisces_path = Path(pisces_output)
pisces_content = pisces_path.read_text().splitlines()[1:]
else:
pisces_content = pisces_output.splitlines()[1:]
for line in pisces_content:
pdb = line.split()[0][:4].lower()
chain = line.split()[0][-1]
pdb_dict = {'length': line.split()[1],
'method': line.split()[2],
'resolution': line.split()[3],
'R-factor': line.split()[4],
'R-free': line.split()[5]}
if pdb in pisces_dict:
pisces_dict[pdb]['chains'].append(chain)
else:
pdb_dict['chains'] = [chain]
pisces_dict[pdb] = pdb_dict
return pisces_dict
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def download_decode(URL, encoding='utf-8', verbose=True):
""" Downloads data from URL and returns decoded contents."""
|
if verbose:
print("Downloading data from " + URL)
req = Request(URL)
try:
with urlopen(req) as u:
decoded_file = u.read().decode(encoding)
except URLError as e:
if hasattr(e, 'reason'):
print('Server could not be reached.')
print('Reason: ', e.reason)
elif hasattr(e, 'code'):
print('The server couldn\'t fulfill the request.')
print('Error code: ', e.code)
return None
return decoded_file
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def olderado_best_model(pdb_id):
""" Checks the Olderado web server and returns the most representative conformation for PDB NMR structures. Notes ----- Uses OLDERADO from the EBI. See http://www.ebi.ac.uk/pdbe/nmr/olderado/ and citations therein. Parameters pdb_id : str The 4-character PDB code for the NMR structure of interest. Returns ------- model_no : int The conformation number of the most-representative conformation. Raises ------ ValueError If the model number it finds is not an integer. This might indicate that the website format has changed. """
|
pdb_code = pdb_id[:4].lower()
olderado_url = 'http://www.ebi.ac.uk/pdbe/nmr/olderado/searchEntry?pdbCode=' + pdb_code
olderado_page = download_decode(olderado_url, verbose=False)
if olderado_page:
parsed_page = BeautifulSoup(olderado_page, 'html.parser')
else:
return None
try:
best_model = parsed_page.find_all('td')[1]
except IndexError:
print("No model info could be found for {0} - ensure that it's an NMR structure.".format(pdb_id))
return None
try:
model_no = int(best_model.string)
except ValueError as v:
print("Did not find a number for best model.")
raise v
return model_no
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def buff_eval(params):
"""Builds and evaluates BUFF energy of model in parallelization Parameters params: list Tuple containing the specification to be built, the sequence, and the parameters for model building. Returns ------- model.bude_score: float BUFF score for model to be assigned to particle fitness value. """
|
specification, sequence, parsed_ind = params
model = specification(*parsed_ind)
model.build()
model.pack_new_sequences(sequence)
return model.buff_interaction_energy.total_energy
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def buff_internal_eval(params):
"""Builds and evaluates BUFF internal energy of a model in parallelization Parameters params: list Tuple containing the specification to be built, the sequence and the parameters for model building. Returns ------- model.bude_score: float BUFF internal energy score to be assigned to particle fitness value. """
|
specification, sequence, parsed_ind = params
model = specification(*parsed_ind)
model.build()
model.pack_new_sequences(sequence)
return model.buff_internal_energy.total_energy
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def rmsd_eval(rmsd_params):
"""Builds a model and runs profit against a reference model. Parameters rmsd_params Returns ------- rmsd: float rmsd against reference model as calculated by profit. """
|
specification, sequence, parsed_ind, reference_pdb = rmsd_params
model = specification(*parsed_ind)
model.pack_new_sequences(sequence)
ca, bb, aa = run_profit(model.pdb, reference_pdb, path1=False, path2=False)
return bb
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def comparator_eval(comparator_params):
"""Gets BUFF score for interaction between two AMPAL objects """
|
top1, top2, params1, params2, seq1, seq2, movements = comparator_params
xrot, yrot, zrot, xtrans, ytrans, ztrans = movements
obj1 = top1(*params1)
obj2 = top2(*params2)
obj2.rotate(xrot, [1, 0, 0])
obj2.rotate(yrot, [0, 1, 0])
obj2.rotate(zrot, [0, 0, 1])
obj2.translate([xtrans, ytrans, ztrans])
model = obj1 + obj2
model.relabel_all()
model.pack_new_sequences(seq1 + seq2)
return model.buff_interaction_energy.total_energy
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parameters(self, sequence, value_means, value_ranges, arrangement):
"""Relates the individual to be evolved to the full parameter string. Parameters sequence: str Full amino acid sequence for specification object to be optimized. Must be equal to the number of residues in the model. value_means: list List containing mean values for parameters to be optimized. value_ranges: list List containing ranges for parameters to be optimized. Values must be positive. arrangement: list Full list of fixed and variable parameters for model building. Fixed values are the appropriate value. Values to be varied should be listed as 'var0', 'var1' etc, and must be in ascending numerical order. Variables can be repeated if required. """
|
self._params['sequence'] = sequence
self._params['value_means'] = value_means
self._params['value_ranges'] = value_ranges
self._params['arrangement'] = arrangement
if any(x <= 0 for x in self._params['value_ranges']):
raise ValueError("range values must be greater than zero")
self._params['variable_parameters'] = []
for i in range(len(self._params['value_means'])):
self._params['variable_parameters'].append(
"".join(['var', str(i)]))
if len(set(arrangement).intersection(
self._params['variable_parameters'])) != len(
self._params['value_means']):
raise ValueError("argument mismatch!")
if len(self._params['value_ranges']) != len(
self._params['value_means']):
raise ValueError("argument mismatch!")
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def make_energy_funnel_data(self, cores=1):
"""Compares models created during the minimisation to the best model. Returns ------- energy_rmsd_gen: [(float, float, int)] A list of triples containing the BUFF score, RMSD to the top model and generation of a model generated during the minimisation. """
|
if not self.parameter_log:
raise AttributeError(
'No parameter log data to make funnel, have you ran the '
'optimiser?')
model_cls = self._params['specification']
gen_tagged = []
for gen, models in enumerate(self.parameter_log):
for model in models:
gen_tagged.append((model[0], model[1], gen))
sorted_pps = sorted(gen_tagged, key=lambda x: x[1])
top_result = sorted_pps[0]
top_result_model = model_cls(*top_result[0])
if (cores == 1) or (sys.platform == 'win32'):
energy_rmsd_gen = map(
self.funnel_rebuild,
[(x, top_result_model,
self._params['specification']) for x in sorted_pps[1:]])
else:
with futures.ProcessPoolExecutor(
max_workers=self._params['processors']) as executor:
energy_rmsd_gen = executor.map(
self.funnel_rebuild,
[(x, top_result_model, self._params['specification'])
for x in sorted_pps[1:]])
return list(energy_rmsd_gen)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def funnel_rebuild(psg_trm_spec):
"""Rebuilds a model and compares it to a reference model. Parameters psg_trm: (([float], float, int), AMPAL, specification) A tuple containing the parameters, score and generation for a model as well as a model of the best scoring parameters. Returns ------- energy_rmsd_gen: (float, float, int) A triple containing the BUFF score, RMSD to the top model and generation of a model generated during the minimisation. """
|
param_score_gen, top_result_model, specification = psg_trm_spec
params, score, gen = param_score_gen
model = specification(*params)
rmsd = top_result_model.rmsd(model)
return rmsd, score, gen
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def update_pop(self):
"""Updates the population according to crossover and fitness criteria. """
|
candidates = []
for ind in self.population:
candidates.append(self.crossover(ind))
self._params['model_count'] += len(candidates)
self.assign_fitnesses(candidates)
for i in range(len(self.population)):
if candidates[i].fitness > self.population[i].fitness:
self.population[i] = candidates[i]
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def initialize_pop(self):
"""Generates initial population with random positions and speeds."""
|
self.population = self.toolbox.swarm(n=self._params['popsize'])
if self._params['neighbours']:
for i in range(len(self.population)):
self.population[i].ident = i
self.population[i].neighbours = list(
set(
[(i - x) % len(self.population)
for x in range(1, self._params['neighbours'] + 1)] +
[i] +
[(i + x) % len(self.population)
for x in range(1, self._params['neighbours'] + 1)]
))
else:
for i in range(len(self.population)):
self.population[i].ident = i
self.population[i].neighbours = [
x for x in range(len(self.population))]
self.assign_fitnesses(self.population)
for part in self.population:
part.best = creator.Particle(part)
part.best.fitness.values = part.fitness.values
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def initialize_pop(self):
"""Assigns initial fitnesses."""
|
self.toolbox.register("individual", self.generate)
self.toolbox.register("population", tools.initRepeat,
list, self.toolbox.individual)
self.population = self.toolbox.population(n=self._params['popsize'])
self.assign_fitnesses(self.population)
self._params['model_count'] += len(self.population)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def randomise_proposed_value(self):
"""Creates a randomly the proposed value. Raises ------ TypeError Raised if this method is called on a static value. TypeError Raised if the parameter type is unknown. """
|
if self.parameter_type is MMCParameterType.UNIFORM_DIST:
(a, b) = self.static_dist_or_list
self.proposed_value = random.uniform(a, b)
elif self.parameter_type is MMCParameterType.NORMAL_DIST:
(mu, sigma) = self.static_dist_or_list
self.proposed_value = random.normalvariate(mu, sigma)
elif self.parameter_type is MMCParameterType.DISCRETE_RANGE:
(min_v, max_v, step) = self.static_dist_or_list
self.proposed_value = random.choice(
numpy.arange(min_v, max_v, step))
elif self.parameter_type is MMCParameterType.LIST:
self.proposed_value = random.choice(self.static_dist_or_list)
elif self.parameter_type is MMCParameterType.STATIC_VALUE:
raise TypeError('This value is static, it cannot be mutated.')
else:
raise TypeError(
'Cannot randomise this parameter, unknown parameter type.')
return
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def accept_proposed_value(self):
"""Changes the current value to the proposed value."""
|
if self.proposed_value is not None:
self.current_value = self.proposed_value
self.proposed_value = None
return
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.