id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
51
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
19,800
hfaran/piazza-api
piazza_api/network.py
Network.delete_post
def delete_post(self, post): """ Deletes post by cid :type post: dict|str|int :param post: Either the post dict returned by another API method, the post ID, or the `cid` field of that post. :rtype: dict :returns: Dictionary with information about the post cid. """ try: cid = post['id'] except KeyError: cid = post except TypeError: post = self.get_post(post) cid = post['id'] params = { "cid": cid, } return self._rpc.content_delete(params)
python
def delete_post(self, post): try: cid = post['id'] except KeyError: cid = post except TypeError: post = self.get_post(post) cid = post['id'] params = { "cid": cid, } return self._rpc.content_delete(params)
[ "def", "delete_post", "(", "self", ",", "post", ")", ":", "try", ":", "cid", "=", "post", "[", "'id'", "]", "except", "KeyError", ":", "cid", "=", "post", "except", "TypeError", ":", "post", "=", "self", ".", "get_post", "(", "post", ")", "cid", "=...
Deletes post by cid :type post: dict|str|int :param post: Either the post dict returned by another API method, the post ID, or the `cid` field of that post. :rtype: dict :returns: Dictionary with information about the post cid.
[ "Deletes", "post", "by", "cid" ]
26201d06e26bada9a838f6765c1bccedad05bd39
https://github.com/hfaran/piazza-api/blob/26201d06e26bada9a838f6765c1bccedad05bd39/piazza_api/network.py#L378-L400
19,801
hfaran/piazza-api
piazza_api/network.py
Network.get_feed
def get_feed(self, limit=100, offset=0): """Get your feed for this network Pagination for this can be achieved by using the ``limit`` and ``offset`` params :type limit: int :param limit: Number of posts from feed to get, starting from ``offset`` :type offset: int :param offset: Offset starting from bottom of feed :rtype: dict :returns: Feed metadata, including list of posts in feed format; this means they are not the full posts but only in partial form as necessary to display them on the Piazza feed. For example, the returned dicts only have content snippets of posts rather than the full text. """ return self._rpc.get_my_feed(limit=limit, offset=offset)
python
def get_feed(self, limit=100, offset=0): return self._rpc.get_my_feed(limit=limit, offset=offset)
[ "def", "get_feed", "(", "self", ",", "limit", "=", "100", ",", "offset", "=", "0", ")", ":", "return", "self", ".", "_rpc", ".", "get_my_feed", "(", "limit", "=", "limit", ",", "offset", "=", "offset", ")" ]
Get your feed for this network Pagination for this can be achieved by using the ``limit`` and ``offset`` params :type limit: int :param limit: Number of posts from feed to get, starting from ``offset`` :type offset: int :param offset: Offset starting from bottom of feed :rtype: dict :returns: Feed metadata, including list of posts in feed format; this means they are not the full posts but only in partial form as necessary to display them on the Piazza feed. For example, the returned dicts only have content snippets of posts rather than the full text.
[ "Get", "your", "feed", "for", "this", "network" ]
26201d06e26bada9a838f6765c1bccedad05bd39
https://github.com/hfaran/piazza-api/blob/26201d06e26bada9a838f6765c1bccedad05bd39/piazza_api/network.py#L406-L423
19,802
hfaran/piazza-api
piazza_api/network.py
Network.get_filtered_feed
def get_filtered_feed(self, feed_filter): """Get your feed containing only posts filtered by ``feed_filter`` :type feed_filter: FeedFilter :param feed_filter: Must be an instance of either: UnreadFilter, FollowingFilter, or FolderFilter :rtype: dict """ assert isinstance(feed_filter, (UnreadFilter, FollowingFilter, FolderFilter)) return self._rpc.filter_feed(**feed_filter.to_kwargs())
python
def get_filtered_feed(self, feed_filter): assert isinstance(feed_filter, (UnreadFilter, FollowingFilter, FolderFilter)) return self._rpc.filter_feed(**feed_filter.to_kwargs())
[ "def", "get_filtered_feed", "(", "self", ",", "feed_filter", ")", ":", "assert", "isinstance", "(", "feed_filter", ",", "(", "UnreadFilter", ",", "FollowingFilter", ",", "FolderFilter", ")", ")", "return", "self", ".", "_rpc", ".", "filter_feed", "(", "*", "...
Get your feed containing only posts filtered by ``feed_filter`` :type feed_filter: FeedFilter :param feed_filter: Must be an instance of either: UnreadFilter, FollowingFilter, or FolderFilter :rtype: dict
[ "Get", "your", "feed", "containing", "only", "posts", "filtered", "by", "feed_filter" ]
26201d06e26bada9a838f6765c1bccedad05bd39
https://github.com/hfaran/piazza-api/blob/26201d06e26bada9a838f6765c1bccedad05bd39/piazza_api/network.py#L425-L435
19,803
lucaskjaero/PyCasia
pycasia/CASIA.py
CASIA.get_dataset
def get_dataset(self, dataset): """ Checks to see if the dataset is present. If not, it downloads and unzips it. """ # If the dataset is present, no need to download anything. success = True dataset_path = self.base_dataset_path + dataset if not isdir(dataset_path): # Try 5 times to download. The download page is unreliable, so we need a few tries. was_error = False for iteration in range(5): # Guard against trying again if successful if iteration == 0 or was_error is True: zip_path = dataset_path + ".zip" # Download zip files if they're not there if not isfile(zip_path): try: with DLProgress(unit='B', unit_scale=True, miniters=1, desc=dataset) as pbar: urlretrieve(self.datasets[dataset]["url"], zip_path, pbar.hook) except Exception as ex: print("Error downloading %s: %s" % (dataset, ex)) was_error = True # Unzip the data files if not isdir(dataset_path): try: with zipfile.ZipFile(zip_path) as zip_archive: zip_archive.extractall(path=dataset_path) zip_archive.close() except Exception as ex: print("Error unzipping %s: %s" % (zip_path, ex)) # Usually the error is caused by a bad zip file. # Delete it so the program will try to download it again. try: remove(zip_path) except FileNotFoundError: pass was_error = True if was_error: print("\nThis recognizer is trained by the CASIA handwriting database.") print("If the download doesn't work, you can get the files at %s" % self.datasets[dataset]["url"]) print("If you have download problems, " "wget may be effective at downloading because of download resuming.") success = False return success
python
def get_dataset(self, dataset): # If the dataset is present, no need to download anything. success = True dataset_path = self.base_dataset_path + dataset if not isdir(dataset_path): # Try 5 times to download. The download page is unreliable, so we need a few tries. was_error = False for iteration in range(5): # Guard against trying again if successful if iteration == 0 or was_error is True: zip_path = dataset_path + ".zip" # Download zip files if they're not there if not isfile(zip_path): try: with DLProgress(unit='B', unit_scale=True, miniters=1, desc=dataset) as pbar: urlretrieve(self.datasets[dataset]["url"], zip_path, pbar.hook) except Exception as ex: print("Error downloading %s: %s" % (dataset, ex)) was_error = True # Unzip the data files if not isdir(dataset_path): try: with zipfile.ZipFile(zip_path) as zip_archive: zip_archive.extractall(path=dataset_path) zip_archive.close() except Exception as ex: print("Error unzipping %s: %s" % (zip_path, ex)) # Usually the error is caused by a bad zip file. # Delete it so the program will try to download it again. try: remove(zip_path) except FileNotFoundError: pass was_error = True if was_error: print("\nThis recognizer is trained by the CASIA handwriting database.") print("If the download doesn't work, you can get the files at %s" % self.datasets[dataset]["url"]) print("If you have download problems, " "wget may be effective at downloading because of download resuming.") success = False return success
[ "def", "get_dataset", "(", "self", ",", "dataset", ")", ":", "# If the dataset is present, no need to download anything.", "success", "=", "True", "dataset_path", "=", "self", ".", "base_dataset_path", "+", "dataset", "if", "not", "isdir", "(", "dataset_path", ")", ...
Checks to see if the dataset is present. If not, it downloads and unzips it.
[ "Checks", "to", "see", "if", "the", "dataset", "is", "present", ".", "If", "not", "it", "downloads", "and", "unzips", "it", "." ]
511ddb7809d788fc2c7bc7c1e8600db60bac8152
https://github.com/lucaskjaero/PyCasia/blob/511ddb7809d788fc2c7bc7c1e8600db60bac8152/pycasia/CASIA.py#L72-L121
19,804
ashleysommer/sanicpluginsframework
spf/plugin.py
SanicPlugin.first_plugin_context
def first_plugin_context(self): """Returns the context is associated with the first app this plugin was registered on""" # Note, because registrations are stored in a set, its not _really_ # the first one, but whichever one it sees first in the set. first_spf_reg = next(iter(self.registrations)) return self.get_context_from_spf(first_spf_reg)
python
def first_plugin_context(self): # Note, because registrations are stored in a set, its not _really_ # the first one, but whichever one it sees first in the set. first_spf_reg = next(iter(self.registrations)) return self.get_context_from_spf(first_spf_reg)
[ "def", "first_plugin_context", "(", "self", ")", ":", "# Note, because registrations are stored in a set, its not _really_", "# the first one, but whichever one it sees first in the set.", "first_spf_reg", "=", "next", "(", "iter", "(", "self", ".", "registrations", ")", ")", "...
Returns the context is associated with the first app this plugin was registered on
[ "Returns", "the", "context", "is", "associated", "with", "the", "first", "app", "this", "plugin", "was", "registered", "on" ]
2cb1656d9334f04c30c738074784b0450c1b893e
https://github.com/ashleysommer/sanicpluginsframework/blob/2cb1656d9334f04c30c738074784b0450c1b893e/spf/plugin.py#L191-L197
19,805
ashleysommer/sanicpluginsframework
spf/plugin.py
SanicPlugin.route_wrapper
async def route_wrapper(self, route, request, context, request_args, request_kw, *decorator_args, with_context=None, **decorator_kw): """This is the function that is called when a route is decorated with your plugin decorator. Context will normally be None, but the user can pass use_context=True so the route will get the plugin context """ # by default, do nothing, just run the wrapped function if with_context: resp = route(request, context, *request_args, **request_kw) else: resp = route(request, *request_args, **request_kw) if isawaitable(resp): resp = await resp return resp
python
async def route_wrapper(self, route, request, context, request_args, request_kw, *decorator_args, with_context=None, **decorator_kw): # by default, do nothing, just run the wrapped function if with_context: resp = route(request, context, *request_args, **request_kw) else: resp = route(request, *request_args, **request_kw) if isawaitable(resp): resp = await resp return resp
[ "async", "def", "route_wrapper", "(", "self", ",", "route", ",", "request", ",", "context", ",", "request_args", ",", "request_kw", ",", "*", "decorator_args", ",", "with_context", "=", "None", ",", "*", "*", "decorator_kw", ")", ":", "# by default, do nothing...
This is the function that is called when a route is decorated with your plugin decorator. Context will normally be None, but the user can pass use_context=True so the route will get the plugin context
[ "This", "is", "the", "function", "that", "is", "called", "when", "a", "route", "is", "decorated", "with", "your", "plugin", "decorator", ".", "Context", "will", "normally", "be", "None", "but", "the", "user", "can", "pass", "use_context", "=", "True", "so"...
2cb1656d9334f04c30c738074784b0450c1b893e
https://github.com/ashleysommer/sanicpluginsframework/blob/2cb1656d9334f04c30c738074784b0450c1b893e/spf/plugin.py#L370-L385
19,806
ionelmc/python-manhole
src/manhole/__init__.py
check_credentials
def check_credentials(client): """ Checks credentials for given socket. """ pid, uid, gid = get_peercred(client) euid = os.geteuid() client_name = "PID:%s UID:%s GID:%s" % (pid, uid, gid) if uid not in (0, euid): raise SuspiciousClient("Can't accept client with %s. It doesn't match the current EUID:%s or ROOT." % ( client_name, euid )) _LOG("Accepted connection on fd:%s from %s" % (client.fileno(), client_name)) return pid, uid, gid
python
def check_credentials(client): pid, uid, gid = get_peercred(client) euid = os.geteuid() client_name = "PID:%s UID:%s GID:%s" % (pid, uid, gid) if uid not in (0, euid): raise SuspiciousClient("Can't accept client with %s. It doesn't match the current EUID:%s or ROOT." % ( client_name, euid )) _LOG("Accepted connection on fd:%s from %s" % (client.fileno(), client_name)) return pid, uid, gid
[ "def", "check_credentials", "(", "client", ")", ":", "pid", ",", "uid", ",", "gid", "=", "get_peercred", "(", "client", ")", "euid", "=", "os", ".", "geteuid", "(", ")", "client_name", "=", "\"PID:%s UID:%s GID:%s\"", "%", "(", "pid", ",", "uid", ",", ...
Checks credentials for given socket.
[ "Checks", "credentials", "for", "given", "socket", "." ]
6a519a1f25142b047e814c6d00f4ef404856a15d
https://github.com/ionelmc/python-manhole/blob/6a519a1f25142b047e814c6d00f4ef404856a15d/src/manhole/__init__.py#L242-L256
19,807
ionelmc/python-manhole
src/manhole/__init__.py
handle_connection_exec
def handle_connection_exec(client): """ Alternate connection handler. No output redirection. """ class ExitExecLoop(Exception): pass def exit(): raise ExitExecLoop() client.settimeout(None) fh = os.fdopen(client.detach() if hasattr(client, 'detach') else client.fileno()) with closing(client): with closing(fh): try: payload = fh.readline() while payload: _LOG("Running: %r." % payload) eval(compile(payload, '<manhole>', 'exec'), {'exit': exit}, _MANHOLE.locals) payload = fh.readline() except ExitExecLoop: _LOG("Exiting exec loop.")
python
def handle_connection_exec(client): class ExitExecLoop(Exception): pass def exit(): raise ExitExecLoop() client.settimeout(None) fh = os.fdopen(client.detach() if hasattr(client, 'detach') else client.fileno()) with closing(client): with closing(fh): try: payload = fh.readline() while payload: _LOG("Running: %r." % payload) eval(compile(payload, '<manhole>', 'exec'), {'exit': exit}, _MANHOLE.locals) payload = fh.readline() except ExitExecLoop: _LOG("Exiting exec loop.")
[ "def", "handle_connection_exec", "(", "client", ")", ":", "class", "ExitExecLoop", "(", "Exception", ")", ":", "pass", "def", "exit", "(", ")", ":", "raise", "ExitExecLoop", "(", ")", "client", ".", "settimeout", "(", "None", ")", "fh", "=", "os", ".", ...
Alternate connection handler. No output redirection.
[ "Alternate", "connection", "handler", ".", "No", "output", "redirection", "." ]
6a519a1f25142b047e814c6d00f4ef404856a15d
https://github.com/ionelmc/python-manhole/blob/6a519a1f25142b047e814c6d00f4ef404856a15d/src/manhole/__init__.py#L259-L281
19,808
ionelmc/python-manhole
src/manhole/__init__.py
handle_connection_repl
def handle_connection_repl(client): """ Handles connection. """ client.settimeout(None) # # disable this till we have evidence that it's needed # client.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 0) # # Note: setting SO_RCVBUF on UDS has no effect, see: http://man7.org/linux/man-pages/man7/unix.7.html backup = [] old_interval = getinterval() patches = [('r', ('stdin', '__stdin__')), ('w', ('stdout', '__stdout__'))] if _MANHOLE.redirect_stderr: patches.append(('w', ('stderr', '__stderr__'))) try: client_fd = client.fileno() for mode, names in patches: for name in names: backup.append((name, getattr(sys, name))) setattr(sys, name, _ORIGINAL_FDOPEN(client_fd, mode, 1 if PY3 else 0)) try: handle_repl(_MANHOLE.locals) except Exception as exc: _LOG("REPL failed with %r." % exc) _LOG("DONE.") finally: try: # Change the switch/check interval to something ridiculous. We don't want to have other thread try # to write to the redirected sys.__std*/sys.std* - it would fail horribly. setinterval(2147483647) try: client.close() # close before it's too late. it may already be dead except IOError: pass junk = [] # keep the old file objects alive for a bit for name, fh in backup: junk.append(getattr(sys, name)) setattr(sys, name, fh) del backup for fh in junk: try: if hasattr(fh, 'detach'): fh.detach() else: fh.close() except IOError: pass del fh del junk finally: setinterval(old_interval) _LOG("Cleaned up.")
python
def handle_connection_repl(client): client.settimeout(None) # # disable this till we have evidence that it's needed # client.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 0) # # Note: setting SO_RCVBUF on UDS has no effect, see: http://man7.org/linux/man-pages/man7/unix.7.html backup = [] old_interval = getinterval() patches = [('r', ('stdin', '__stdin__')), ('w', ('stdout', '__stdout__'))] if _MANHOLE.redirect_stderr: patches.append(('w', ('stderr', '__stderr__'))) try: client_fd = client.fileno() for mode, names in patches: for name in names: backup.append((name, getattr(sys, name))) setattr(sys, name, _ORIGINAL_FDOPEN(client_fd, mode, 1 if PY3 else 0)) try: handle_repl(_MANHOLE.locals) except Exception as exc: _LOG("REPL failed with %r." % exc) _LOG("DONE.") finally: try: # Change the switch/check interval to something ridiculous. We don't want to have other thread try # to write to the redirected sys.__std*/sys.std* - it would fail horribly. setinterval(2147483647) try: client.close() # close before it's too late. it may already be dead except IOError: pass junk = [] # keep the old file objects alive for a bit for name, fh in backup: junk.append(getattr(sys, name)) setattr(sys, name, fh) del backup for fh in junk: try: if hasattr(fh, 'detach'): fh.detach() else: fh.close() except IOError: pass del fh del junk finally: setinterval(old_interval) _LOG("Cleaned up.")
[ "def", "handle_connection_repl", "(", "client", ")", ":", "client", ".", "settimeout", "(", "None", ")", "# # disable this till we have evidence that it's needed", "# client.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 0)", "# # Note: setting SO_RCVBUF on UDS has no effect, see: http:...
Handles connection.
[ "Handles", "connection", "." ]
6a519a1f25142b047e814c6d00f4ef404856a15d
https://github.com/ionelmc/python-manhole/blob/6a519a1f25142b047e814c6d00f4ef404856a15d/src/manhole/__init__.py#L284-L335
19,809
ionelmc/python-manhole
src/manhole/__init__.py
install
def install(verbose=True, verbose_destination=sys.__stderr__.fileno() if hasattr(sys.__stderr__, 'fileno') else sys.__stderr__, strict=True, **kwargs): """ Installs the manhole. Args: verbose (bool): Set it to ``False`` to squelch the logging. verbose_destination (file descriptor or handle): Destination for verbose messages. Default is unbuffered stderr (stderr ``2`` file descriptor). patch_fork (bool): Set it to ``False`` if you don't want your ``os.fork`` and ``os.forkpy`` monkeypatched activate_on (int or signal name): set to ``"USR1"``, ``"USR2"`` or some other signal name, or a number if you want the Manhole thread to start when this signal is sent. This is desireable in case you don't want the thread active all the time. oneshot_on (int or signal name): Set to ``"USR1"``, ``"USR2"`` or some other signal name, or a number if you want the Manhole to listen for connection in the signal handler. This is desireable in case you don't want threads at all. thread (bool): Start the always-on ManholeThread. Default: ``True``. Automatically switched to ``False`` if ``oneshort_on`` or ``activate_on`` are used. sigmask (list of ints or signal names): Will set the signal mask to the given list (using ``signalfd.sigprocmask``). No action is done if ``signalfd`` is not importable. **NOTE**: This is done so that the Manhole thread doesn't *steal* any signals; Normally that is fine because Python will force all the signal handling to be run in the main thread but signalfd doesn't. socket_path (str): Use a specific path for the unix domain socket (instead of ``/tmp/manhole-<pid>``). This disables ``patch_fork`` as children cannot reuse the same path. reinstall_delay (float): Delay the unix domain socket creation *reinstall_delay* seconds. This alleviates cleanup failures when using fork+exec patterns. locals (dict): Names to add to manhole interactive shell locals. daemon_connection (bool): The connection thread is daemonic (dies on app exit). Default: ``False``. redirect_stderr (bool): Redirect output from stderr to manhole console. Default: ``True``. connection_handler (function): Connection handler to use. Use ``"exec"`` for simple implementation without output redirection or your own function. (warning: this is for advanced users). Default: ``"repl"``. """ # pylint: disable=W0603 global _MANHOLE with _LOCK: if _MANHOLE is None: _MANHOLE = Manhole() else: if strict: raise AlreadyInstalled("Manhole already installed!") else: _LOG.release() _MANHOLE.release() # Threads might be started here _LOG.configure(verbose, verbose_destination) _MANHOLE.configure(**kwargs) # Threads might be started here return _MANHOLE
python
def install(verbose=True, verbose_destination=sys.__stderr__.fileno() if hasattr(sys.__stderr__, 'fileno') else sys.__stderr__, strict=True, **kwargs): # pylint: disable=W0603 global _MANHOLE with _LOCK: if _MANHOLE is None: _MANHOLE = Manhole() else: if strict: raise AlreadyInstalled("Manhole already installed!") else: _LOG.release() _MANHOLE.release() # Threads might be started here _LOG.configure(verbose, verbose_destination) _MANHOLE.configure(**kwargs) # Threads might be started here return _MANHOLE
[ "def", "install", "(", "verbose", "=", "True", ",", "verbose_destination", "=", "sys", ".", "__stderr__", ".", "fileno", "(", ")", "if", "hasattr", "(", "sys", ".", "__stderr__", ",", "'fileno'", ")", "else", "sys", ".", "__stderr__", ",", "strict", "=",...
Installs the manhole. Args: verbose (bool): Set it to ``False`` to squelch the logging. verbose_destination (file descriptor or handle): Destination for verbose messages. Default is unbuffered stderr (stderr ``2`` file descriptor). patch_fork (bool): Set it to ``False`` if you don't want your ``os.fork`` and ``os.forkpy`` monkeypatched activate_on (int or signal name): set to ``"USR1"``, ``"USR2"`` or some other signal name, or a number if you want the Manhole thread to start when this signal is sent. This is desireable in case you don't want the thread active all the time. oneshot_on (int or signal name): Set to ``"USR1"``, ``"USR2"`` or some other signal name, or a number if you want the Manhole to listen for connection in the signal handler. This is desireable in case you don't want threads at all. thread (bool): Start the always-on ManholeThread. Default: ``True``. Automatically switched to ``False`` if ``oneshort_on`` or ``activate_on`` are used. sigmask (list of ints or signal names): Will set the signal mask to the given list (using ``signalfd.sigprocmask``). No action is done if ``signalfd`` is not importable. **NOTE**: This is done so that the Manhole thread doesn't *steal* any signals; Normally that is fine because Python will force all the signal handling to be run in the main thread but signalfd doesn't. socket_path (str): Use a specific path for the unix domain socket (instead of ``/tmp/manhole-<pid>``). This disables ``patch_fork`` as children cannot reuse the same path. reinstall_delay (float): Delay the unix domain socket creation *reinstall_delay* seconds. This alleviates cleanup failures when using fork+exec patterns. locals (dict): Names to add to manhole interactive shell locals. daemon_connection (bool): The connection thread is daemonic (dies on app exit). Default: ``False``. redirect_stderr (bool): Redirect output from stderr to manhole console. Default: ``True``. connection_handler (function): Connection handler to use. Use ``"exec"`` for simple implementation without output redirection or your own function. (warning: this is for advanced users). Default: ``"repl"``.
[ "Installs", "the", "manhole", "." ]
6a519a1f25142b047e814c6d00f4ef404856a15d
https://github.com/ionelmc/python-manhole/blob/6a519a1f25142b047e814c6d00f4ef404856a15d/src/manhole/__init__.py#L569-L618
19,810
ionelmc/python-manhole
src/manhole/__init__.py
dump_stacktraces
def dump_stacktraces(): """ Dumps thread ids and tracebacks to stdout. """ lines = [] for thread_id, stack in sys._current_frames().items(): # pylint: disable=W0212 lines.append("\n######### ProcessID=%s, ThreadID=%s #########" % ( os.getpid(), thread_id )) for filename, lineno, name, line in traceback.extract_stack(stack): lines.append('File: "%s", line %d, in %s' % (filename, lineno, name)) if line: lines.append(" %s" % (line.strip())) lines.append("#############################################\n\n") print('\n'.join(lines), file=sys.stderr if _MANHOLE.redirect_stderr else sys.stdout)
python
def dump_stacktraces(): lines = [] for thread_id, stack in sys._current_frames().items(): # pylint: disable=W0212 lines.append("\n######### ProcessID=%s, ThreadID=%s #########" % ( os.getpid(), thread_id )) for filename, lineno, name, line in traceback.extract_stack(stack): lines.append('File: "%s", line %d, in %s' % (filename, lineno, name)) if line: lines.append(" %s" % (line.strip())) lines.append("#############################################\n\n") print('\n'.join(lines), file=sys.stderr if _MANHOLE.redirect_stderr else sys.stdout)
[ "def", "dump_stacktraces", "(", ")", ":", "lines", "=", "[", "]", "for", "thread_id", ",", "stack", "in", "sys", ".", "_current_frames", "(", ")", ".", "items", "(", ")", ":", "# pylint: disable=W0212", "lines", ".", "append", "(", "\"\\n######### ProcessID=...
Dumps thread ids and tracebacks to stdout.
[ "Dumps", "thread", "ids", "and", "tracebacks", "to", "stdout", "." ]
6a519a1f25142b047e814c6d00f4ef404856a15d
https://github.com/ionelmc/python-manhole/blob/6a519a1f25142b047e814c6d00f4ef404856a15d/src/manhole/__init__.py#L621-L636
19,811
ionelmc/python-manhole
src/manhole/__init__.py
ManholeThread.clone
def clone(self, **kwargs): """ Make a fresh thread with the same options. This is usually used on dead threads. """ return ManholeThread( self.get_socket, self.sigmask, self.start_timeout, connection_handler=self.connection_handler, daemon_connection=self.daemon_connection, **kwargs )
python
def clone(self, **kwargs): return ManholeThread( self.get_socket, self.sigmask, self.start_timeout, connection_handler=self.connection_handler, daemon_connection=self.daemon_connection, **kwargs )
[ "def", "clone", "(", "self", ",", "*", "*", "kwargs", ")", ":", "return", "ManholeThread", "(", "self", ".", "get_socket", ",", "self", ".", "sigmask", ",", "self", ".", "start_timeout", ",", "connection_handler", "=", "self", ".", "connection_handler", ",...
Make a fresh thread with the same options. This is usually used on dead threads.
[ "Make", "a", "fresh", "thread", "with", "the", "same", "options", ".", "This", "is", "usually", "used", "on", "dead", "threads", "." ]
6a519a1f25142b047e814c6d00f4ef404856a15d
https://github.com/ionelmc/python-manhole/blob/6a519a1f25142b047e814c6d00f4ef404856a15d/src/manhole/__init__.py#L167-L176
19,812
ionelmc/python-manhole
src/manhole/__init__.py
Manhole.reinstall
def reinstall(self): """ Reinstalls the manhole. Checks if the thread is running. If not, it starts it again. """ with _LOCK: if not (self.thread.is_alive() and self.thread in _ORIGINAL__ACTIVE): self.thread = self.thread.clone(bind_delay=self.reinstall_delay) if self.should_restart: self.thread.start()
python
def reinstall(self): with _LOCK: if not (self.thread.is_alive() and self.thread in _ORIGINAL__ACTIVE): self.thread = self.thread.clone(bind_delay=self.reinstall_delay) if self.should_restart: self.thread.start()
[ "def", "reinstall", "(", "self", ")", ":", "with", "_LOCK", ":", "if", "not", "(", "self", ".", "thread", ".", "is_alive", "(", ")", "and", "self", ".", "thread", "in", "_ORIGINAL__ACTIVE", ")", ":", "self", ".", "thread", "=", "self", ".", "thread",...
Reinstalls the manhole. Checks if the thread is running. If not, it starts it again.
[ "Reinstalls", "the", "manhole", ".", "Checks", "if", "the", "thread", "is", "running", ".", "If", "not", "it", "starts", "it", "again", "." ]
6a519a1f25142b047e814c6d00f4ef404856a15d
https://github.com/ionelmc/python-manhole/blob/6a519a1f25142b047e814c6d00f4ef404856a15d/src/manhole/__init__.py#L502-L510
19,813
ionelmc/python-manhole
src/manhole/__init__.py
Manhole.patched_forkpty
def patched_forkpty(self): """Fork a new process with a new pseudo-terminal as controlling tty.""" pid, master_fd = self.original_os_forkpty() if not pid: _LOG('Fork detected. Reinstalling Manhole.') self.reinstall() return pid, master_fd
python
def patched_forkpty(self): pid, master_fd = self.original_os_forkpty() if not pid: _LOG('Fork detected. Reinstalling Manhole.') self.reinstall() return pid, master_fd
[ "def", "patched_forkpty", "(", "self", ")", ":", "pid", ",", "master_fd", "=", "self", ".", "original_os_forkpty", "(", ")", "if", "not", "pid", ":", "_LOG", "(", "'Fork detected. Reinstalling Manhole.'", ")", "self", ".", "reinstall", "(", ")", "return", "p...
Fork a new process with a new pseudo-terminal as controlling tty.
[ "Fork", "a", "new", "process", "with", "a", "new", "pseudo", "-", "terminal", "as", "controlling", "tty", "." ]
6a519a1f25142b047e814c6d00f4ef404856a15d
https://github.com/ionelmc/python-manhole/blob/6a519a1f25142b047e814c6d00f4ef404856a15d/src/manhole/__init__.py#L546-L552
19,814
ambitioninc/newrelic-api
newrelic_api/alert_conditions_nrql.py
AlertConditionsNRQL.update
def update( # noqa: C901 self, alert_condition_nrql_id, policy_id, name=None, threshold_type=None, query=None, since_value=None, terms=None, expected_groups=None, value_function=None, runbook_url=None, ignore_overlap=None, enabled=True): """ Updates any of the optional parameters of the alert condition nrql :type alert_condition_nrql_id: int :param alert_condition_nrql_id: Alerts condition NRQL id to update :type policy_id: int :param policy_id: Alert policy id where target alert condition belongs to :type condition_scope: str :param condition_scope: The scope of the condition, can be instance or application :type name: str :param name: The name of the alert :type threshold_type: str :param threshold_type: The tthreshold_typeype of the condition, can be static or outlier :type query: str :param query: nrql query for the alerts :type since_value: str :param since_value: since value for the alert :type terms: list[hash] :param terms: list of hashes containing threshold config for the alert :type expected_groups: int :param expected_groups: expected groups setting for outlier alerts :type value_function: str :param type: value function for static alerts :type runbook_url: str :param runbook_url: The url of the runbook :type ignore_overlap: bool :param ignore_overlap: Whether to ignore overlaps for outlier alerts :type enabled: bool :param enabled: Whether to enable that alert condition :rtype: dict :return: The JSON response of the API :raises: This will raise a :class:`NewRelicAPIServerException<newrelic_api.exceptions.NoEntityException>` if target alert condition is not included in target policy :raises: This will raise a :class:`ConfigurationException<newrelic_api.exceptions.ConfigurationException>` if metric is set as user_defined but user_defined config is not passed :: { "nrql_condition": { "name": "string", "runbook_url": "string", "enabled": "boolean", "expected_groups": "integer", "ignore_overlap": "boolean", "value_function": "string", "terms": [ { "duration": "string", "operator": "string", "priority": "string", "threshold": "string", "time_function": "string" } ], "nrql": { "query": "string", "since_value": "string" } } } """ conditions_nrql_dict = self.list(policy_id) target_condition_nrql = None for condition in conditions_nrql_dict['nrql_conditions']: if int(condition['id']) == alert_condition_nrql_id: target_condition_nrql = condition break if target_condition_nrql is None: raise NoEntityException( 'Target alert condition nrql is not included in that policy.' 'policy_id: {}, alert_condition_nrql_id {}'.format( policy_id, alert_condition_nrql_id ) ) data = { 'nrql_condition': { 'type': threshold_type or target_condition_nrql['type'], 'enabled': target_condition_nrql['enabled'], 'name': name or target_condition_nrql['name'], 'terms': terms or target_condition_nrql['terms'], 'nrql': { 'query': query or target_condition_nrql['nrql']['query'], 'since_value': since_value or target_condition_nrql['nrql']['since_value'], } } } if enabled is not None: data['nrql_condition']['enabled'] = str(enabled).lower() if runbook_url is not None: data['nrql_condition']['runbook_url'] = runbook_url elif 'runbook_url' in target_condition_nrql: data['nrql_condition']['runbook_url'] = target_condition_nrql['runbook_url'] if expected_groups is not None: data['nrql_condition']['expected_groups'] = expected_groups elif 'expected_groups' in target_condition_nrql: data['nrql_condition']['expected_groups'] = target_condition_nrql['expected_groups'] if ignore_overlap is not None: data['nrql_condition']['ignore_overlap'] = ignore_overlap elif 'ignore_overlap' in target_condition_nrql: data['nrql_condition']['ignore_overlap'] = target_condition_nrql['ignore_overlap'] if value_function is not None: data['nrql_condition']['value_function'] = value_function elif 'value_function' in target_condition_nrql: data['nrql_condition']['value_function'] = target_condition_nrql['value_function'] if data['nrql_condition']['type'] == 'static': if 'value_function' not in data['nrql_condition']: raise ConfigurationException( 'Alert is set as static but no value_function config specified' ) data['nrql_condition'].pop('expected_groups', None) data['nrql_condition'].pop('ignore_overlap', None) elif data['nrql_condition']['type'] == 'outlier': if 'expected_groups' not in data['nrql_condition']: raise ConfigurationException( 'Alert is set as outlier but expected_groups config is not specified' ) if 'ignore_overlap' not in data['nrql_condition']: raise ConfigurationException( 'Alert is set as outlier but ignore_overlap config is not specified' ) data['nrql_condition'].pop('value_function', None) return self._put( url='{0}alerts_nrql_conditions/{1}.json'.format(self.URL, alert_condition_nrql_id), headers=self.headers, data=data )
python
def update( # noqa: C901 self, alert_condition_nrql_id, policy_id, name=None, threshold_type=None, query=None, since_value=None, terms=None, expected_groups=None, value_function=None, runbook_url=None, ignore_overlap=None, enabled=True): conditions_nrql_dict = self.list(policy_id) target_condition_nrql = None for condition in conditions_nrql_dict['nrql_conditions']: if int(condition['id']) == alert_condition_nrql_id: target_condition_nrql = condition break if target_condition_nrql is None: raise NoEntityException( 'Target alert condition nrql is not included in that policy.' 'policy_id: {}, alert_condition_nrql_id {}'.format( policy_id, alert_condition_nrql_id ) ) data = { 'nrql_condition': { 'type': threshold_type or target_condition_nrql['type'], 'enabled': target_condition_nrql['enabled'], 'name': name or target_condition_nrql['name'], 'terms': terms or target_condition_nrql['terms'], 'nrql': { 'query': query or target_condition_nrql['nrql']['query'], 'since_value': since_value or target_condition_nrql['nrql']['since_value'], } } } if enabled is not None: data['nrql_condition']['enabled'] = str(enabled).lower() if runbook_url is not None: data['nrql_condition']['runbook_url'] = runbook_url elif 'runbook_url' in target_condition_nrql: data['nrql_condition']['runbook_url'] = target_condition_nrql['runbook_url'] if expected_groups is not None: data['nrql_condition']['expected_groups'] = expected_groups elif 'expected_groups' in target_condition_nrql: data['nrql_condition']['expected_groups'] = target_condition_nrql['expected_groups'] if ignore_overlap is not None: data['nrql_condition']['ignore_overlap'] = ignore_overlap elif 'ignore_overlap' in target_condition_nrql: data['nrql_condition']['ignore_overlap'] = target_condition_nrql['ignore_overlap'] if value_function is not None: data['nrql_condition']['value_function'] = value_function elif 'value_function' in target_condition_nrql: data['nrql_condition']['value_function'] = target_condition_nrql['value_function'] if data['nrql_condition']['type'] == 'static': if 'value_function' not in data['nrql_condition']: raise ConfigurationException( 'Alert is set as static but no value_function config specified' ) data['nrql_condition'].pop('expected_groups', None) data['nrql_condition'].pop('ignore_overlap', None) elif data['nrql_condition']['type'] == 'outlier': if 'expected_groups' not in data['nrql_condition']: raise ConfigurationException( 'Alert is set as outlier but expected_groups config is not specified' ) if 'ignore_overlap' not in data['nrql_condition']: raise ConfigurationException( 'Alert is set as outlier but ignore_overlap config is not specified' ) data['nrql_condition'].pop('value_function', None) return self._put( url='{0}alerts_nrql_conditions/{1}.json'.format(self.URL, alert_condition_nrql_id), headers=self.headers, data=data )
[ "def", "update", "(", "# noqa: C901", "self", ",", "alert_condition_nrql_id", ",", "policy_id", ",", "name", "=", "None", ",", "threshold_type", "=", "None", ",", "query", "=", "None", ",", "since_value", "=", "None", ",", "terms", "=", "None", ",", "expec...
Updates any of the optional parameters of the alert condition nrql :type alert_condition_nrql_id: int :param alert_condition_nrql_id: Alerts condition NRQL id to update :type policy_id: int :param policy_id: Alert policy id where target alert condition belongs to :type condition_scope: str :param condition_scope: The scope of the condition, can be instance or application :type name: str :param name: The name of the alert :type threshold_type: str :param threshold_type: The tthreshold_typeype of the condition, can be static or outlier :type query: str :param query: nrql query for the alerts :type since_value: str :param since_value: since value for the alert :type terms: list[hash] :param terms: list of hashes containing threshold config for the alert :type expected_groups: int :param expected_groups: expected groups setting for outlier alerts :type value_function: str :param type: value function for static alerts :type runbook_url: str :param runbook_url: The url of the runbook :type ignore_overlap: bool :param ignore_overlap: Whether to ignore overlaps for outlier alerts :type enabled: bool :param enabled: Whether to enable that alert condition :rtype: dict :return: The JSON response of the API :raises: This will raise a :class:`NewRelicAPIServerException<newrelic_api.exceptions.NoEntityException>` if target alert condition is not included in target policy :raises: This will raise a :class:`ConfigurationException<newrelic_api.exceptions.ConfigurationException>` if metric is set as user_defined but user_defined config is not passed :: { "nrql_condition": { "name": "string", "runbook_url": "string", "enabled": "boolean", "expected_groups": "integer", "ignore_overlap": "boolean", "value_function": "string", "terms": [ { "duration": "string", "operator": "string", "priority": "string", "threshold": "string", "time_function": "string" } ], "nrql": { "query": "string", "since_value": "string" } } }
[ "Updates", "any", "of", "the", "optional", "parameters", "of", "the", "alert", "condition", "nrql" ]
07b4430aa6ae61e4704e2928a6e7a24c76f0f424
https://github.com/ambitioninc/newrelic-api/blob/07b4430aa6ae61e4704e2928a6e7a24c76f0f424/newrelic_api/alert_conditions_nrql.py#L67-L224
19,815
ambitioninc/newrelic-api
newrelic_api/alert_conditions_nrql.py
AlertConditionsNRQL.create
def create( self, policy_id, name, threshold_type, query, since_value, terms, expected_groups=None, value_function=None, runbook_url=None, ignore_overlap=None, enabled=True): """ Creates an alert condition nrql :type policy_id: int :param policy_id: Alert policy id where target alert condition nrql belongs to :type name: str :param name: The name of the alert :type threshold_type: str :param type: The threshold_type of the condition, can be static or outlier :type query: str :param query: nrql query for the alerts :type since_value: str :param since_value: since value for the alert :type terms: list[hash] :param terms: list of hashes containing threshold config for the alert :type expected_groups: int :param expected_groups: expected groups setting for outlier alerts :type value_function: str :param type: value function for static alerts :type runbook_url: str :param runbook_url: The url of the runbook :type ignore_overlap: bool :param ignore_overlap: Whether to ignore overlaps for outlier alerts :type enabled: bool :param enabled: Whether to enable that alert condition :rtype: dict :return: The JSON response of the API :raises: This will raise a :class:`NewRelicAPIServerException<newrelic_api.exceptions.NoEntityException>` if target alert condition is not included in target policy :raises: This will raise a :class:`ConfigurationException<newrelic_api.exceptions.ConfigurationException>` if metric is set as user_defined but user_defined config is not passed :: { "nrql_condition": { "name": "string", "runbook_url": "string", "enabled": "boolean", "expected_groups": "integer", "ignore_overlap": "boolean", "value_function": "string", "terms": [ { "duration": "string", "operator": "string", "priority": "string", "threshold": "string", "time_function": "string" } ], "nrql": { "query": "string", "since_value": "string" } } } """ data = { 'nrql_condition': { 'type': threshold_type, 'name': name, 'enabled': enabled, 'terms': terms, 'nrql': { 'query': query, 'since_value': since_value } } } if runbook_url is not None: data['nrql_condition']['runbook_url'] = runbook_url if expected_groups is not None: data['nrql_condition']['expected_groups'] = expected_groups if ignore_overlap is not None: data['nrql_condition']['ignore_overlap'] = ignore_overlap if value_function is not None: data['nrql_condition']['value_function'] = value_function if data['nrql_condition']['type'] == 'static': if 'value_function' not in data['nrql_condition']: raise ConfigurationException( 'Alert is set as static but no value_function config specified' ) data['nrql_condition'].pop('expected_groups', None) data['nrql_condition'].pop('ignore_overlap', None) elif data['nrql_condition']['type'] == 'outlier': if 'expected_groups' not in data['nrql_condition']: raise ConfigurationException( 'Alert is set as outlier but expected_groups config is not specified' ) if 'ignore_overlap' not in data['nrql_condition']: raise ConfigurationException( 'Alert is set as outlier but ignore_overlap config is not specified' ) data['nrql_condition'].pop('value_function', None) return self._post( url='{0}alerts_nrql_conditions/policies/{1}.json'.format(self.URL, policy_id), headers=self.headers, data=data )
python
def create( self, policy_id, name, threshold_type, query, since_value, terms, expected_groups=None, value_function=None, runbook_url=None, ignore_overlap=None, enabled=True): data = { 'nrql_condition': { 'type': threshold_type, 'name': name, 'enabled': enabled, 'terms': terms, 'nrql': { 'query': query, 'since_value': since_value } } } if runbook_url is not None: data['nrql_condition']['runbook_url'] = runbook_url if expected_groups is not None: data['nrql_condition']['expected_groups'] = expected_groups if ignore_overlap is not None: data['nrql_condition']['ignore_overlap'] = ignore_overlap if value_function is not None: data['nrql_condition']['value_function'] = value_function if data['nrql_condition']['type'] == 'static': if 'value_function' not in data['nrql_condition']: raise ConfigurationException( 'Alert is set as static but no value_function config specified' ) data['nrql_condition'].pop('expected_groups', None) data['nrql_condition'].pop('ignore_overlap', None) elif data['nrql_condition']['type'] == 'outlier': if 'expected_groups' not in data['nrql_condition']: raise ConfigurationException( 'Alert is set as outlier but expected_groups config is not specified' ) if 'ignore_overlap' not in data['nrql_condition']: raise ConfigurationException( 'Alert is set as outlier but ignore_overlap config is not specified' ) data['nrql_condition'].pop('value_function', None) return self._post( url='{0}alerts_nrql_conditions/policies/{1}.json'.format(self.URL, policy_id), headers=self.headers, data=data )
[ "def", "create", "(", "self", ",", "policy_id", ",", "name", ",", "threshold_type", ",", "query", ",", "since_value", ",", "terms", ",", "expected_groups", "=", "None", ",", "value_function", "=", "None", ",", "runbook_url", "=", "None", ",", "ignore_overlap...
Creates an alert condition nrql :type policy_id: int :param policy_id: Alert policy id where target alert condition nrql belongs to :type name: str :param name: The name of the alert :type threshold_type: str :param type: The threshold_type of the condition, can be static or outlier :type query: str :param query: nrql query for the alerts :type since_value: str :param since_value: since value for the alert :type terms: list[hash] :param terms: list of hashes containing threshold config for the alert :type expected_groups: int :param expected_groups: expected groups setting for outlier alerts :type value_function: str :param type: value function for static alerts :type runbook_url: str :param runbook_url: The url of the runbook :type ignore_overlap: bool :param ignore_overlap: Whether to ignore overlaps for outlier alerts :type enabled: bool :param enabled: Whether to enable that alert condition :rtype: dict :return: The JSON response of the API :raises: This will raise a :class:`NewRelicAPIServerException<newrelic_api.exceptions.NoEntityException>` if target alert condition is not included in target policy :raises: This will raise a :class:`ConfigurationException<newrelic_api.exceptions.ConfigurationException>` if metric is set as user_defined but user_defined config is not passed :: { "nrql_condition": { "name": "string", "runbook_url": "string", "enabled": "boolean", "expected_groups": "integer", "ignore_overlap": "boolean", "value_function": "string", "terms": [ { "duration": "string", "operator": "string", "priority": "string", "threshold": "string", "time_function": "string" } ], "nrql": { "query": "string", "since_value": "string" } } }
[ "Creates", "an", "alert", "condition", "nrql" ]
07b4430aa6ae61e4704e2928a6e7a24c76f0f424
https://github.com/ambitioninc/newrelic-api/blob/07b4430aa6ae61e4704e2928a6e7a24c76f0f424/newrelic_api/alert_conditions_nrql.py#L226-L350
19,816
ambitioninc/newrelic-api
newrelic_api/alert_conditions_nrql.py
AlertConditionsNRQL.delete
def delete(self, alert_condition_nrql_id): """ This API endpoint allows you to delete an alert condition nrql :type alert_condition_nrql_id: integer :param alert_condition_nrql_id: Alert Condition ID :rtype: dict :return: The JSON response of the API :: { "nrql_condition": { "type": "string", "id": "integer", "name": "string", "runbook_url": "string", "enabled": "boolean", "expected_groups": "integer", "ignore_overlap": "boolean", "value_function": "string", "terms": [ { "duration": "string", "operator": "string", "priority": "string", "threshold": "string", "time_function": "string" } ], "nrql": { "query": "string", "since_value": "string" } } } """ return self._delete( url='{0}alerts_nrql_conditions/{1}.json'.format(self.URL, alert_condition_nrql_id), headers=self.headers )
python
def delete(self, alert_condition_nrql_id): return self._delete( url='{0}alerts_nrql_conditions/{1}.json'.format(self.URL, alert_condition_nrql_id), headers=self.headers )
[ "def", "delete", "(", "self", ",", "alert_condition_nrql_id", ")", ":", "return", "self", ".", "_delete", "(", "url", "=", "'{0}alerts_nrql_conditions/{1}.json'", ".", "format", "(", "self", ".", "URL", ",", "alert_condition_nrql_id", ")", ",", "headers", "=", ...
This API endpoint allows you to delete an alert condition nrql :type alert_condition_nrql_id: integer :param alert_condition_nrql_id: Alert Condition ID :rtype: dict :return: The JSON response of the API :: { "nrql_condition": { "type": "string", "id": "integer", "name": "string", "runbook_url": "string", "enabled": "boolean", "expected_groups": "integer", "ignore_overlap": "boolean", "value_function": "string", "terms": [ { "duration": "string", "operator": "string", "priority": "string", "threshold": "string", "time_function": "string" } ], "nrql": { "query": "string", "since_value": "string" } } }
[ "This", "API", "endpoint", "allows", "you", "to", "delete", "an", "alert", "condition", "nrql" ]
07b4430aa6ae61e4704e2928a6e7a24c76f0f424
https://github.com/ambitioninc/newrelic-api/blob/07b4430aa6ae61e4704e2928a6e7a24c76f0f424/newrelic_api/alert_conditions_nrql.py#L352-L394
19,817
ambitioninc/newrelic-api
newrelic_api/servers.py
Servers.list
def list(self, filter_name=None, filter_ids=None, filter_labels=None, page=None): """ This API endpoint returns a paginated list of the Servers associated with your New Relic account. Servers can be filtered by their name or by a list of server IDs. :type filter_name: str :param filter_name: Filter by server name :type filter_ids: list of ints :param filter_ids: Filter by server ids :type filter_labels: dict of label type: value pairs :param filter_labels: Filter by server labels :type page: int :param page: Pagination index :rtype: dict :return: The JSON response of the API, with an additional 'pages' key if there are paginated results :: { "servers": [ { "id": "integer", "account_id": "integer", "name": "string", "host": "string", "reporting": "boolean", "last_reported_at": "time", "summary": { "cpu": "float", "cpu_stolen": "float", "disk_io": "float", "memory": "float", "memory_used": "integer", "memory_total": "integer", "fullest_disk": "float", "fullest_disk_free": "integer" } } ], "pages": { "last": { "url": "https://api.newrelic.com/v2/servers.json?page=2", "rel": "last" }, "next": { "url": "https://api.newrelic.com/v2/servers.json?page=2", "rel": "next" } } } """ label_param = '' if filter_labels: label_param = ';'.join(['{}:{}'.format(label, value) for label, value in filter_labels.items()]) filters = [ 'filter[name]={0}'.format(filter_name) if filter_name else None, 'filter[ids]={0}'.format(','.join([str(app_id) for app_id in filter_ids])) if filter_ids else None, 'filter[labels]={0}'.format(label_param) if filter_labels else None, 'page={0}'.format(page) if page else None ] return self._get( url='{0}servers.json'.format(self.URL), headers=self.headers, params=self.build_param_string(filters) )
python
def list(self, filter_name=None, filter_ids=None, filter_labels=None, page=None): label_param = '' if filter_labels: label_param = ';'.join(['{}:{}'.format(label, value) for label, value in filter_labels.items()]) filters = [ 'filter[name]={0}'.format(filter_name) if filter_name else None, 'filter[ids]={0}'.format(','.join([str(app_id) for app_id in filter_ids])) if filter_ids else None, 'filter[labels]={0}'.format(label_param) if filter_labels else None, 'page={0}'.format(page) if page else None ] return self._get( url='{0}servers.json'.format(self.URL), headers=self.headers, params=self.build_param_string(filters) )
[ "def", "list", "(", "self", ",", "filter_name", "=", "None", ",", "filter_ids", "=", "None", ",", "filter_labels", "=", "None", ",", "page", "=", "None", ")", ":", "label_param", "=", "''", "if", "filter_labels", ":", "label_param", "=", "';'", ".", "j...
This API endpoint returns a paginated list of the Servers associated with your New Relic account. Servers can be filtered by their name or by a list of server IDs. :type filter_name: str :param filter_name: Filter by server name :type filter_ids: list of ints :param filter_ids: Filter by server ids :type filter_labels: dict of label type: value pairs :param filter_labels: Filter by server labels :type page: int :param page: Pagination index :rtype: dict :return: The JSON response of the API, with an additional 'pages' key if there are paginated results :: { "servers": [ { "id": "integer", "account_id": "integer", "name": "string", "host": "string", "reporting": "boolean", "last_reported_at": "time", "summary": { "cpu": "float", "cpu_stolen": "float", "disk_io": "float", "memory": "float", "memory_used": "integer", "memory_total": "integer", "fullest_disk": "float", "fullest_disk_free": "integer" } } ], "pages": { "last": { "url": "https://api.newrelic.com/v2/servers.json?page=2", "rel": "last" }, "next": { "url": "https://api.newrelic.com/v2/servers.json?page=2", "rel": "next" } } }
[ "This", "API", "endpoint", "returns", "a", "paginated", "list", "of", "the", "Servers", "associated", "with", "your", "New", "Relic", "account", ".", "Servers", "can", "be", "filtered", "by", "their", "name", "or", "by", "a", "list", "of", "server", "IDs",...
07b4430aa6ae61e4704e2928a6e7a24c76f0f424
https://github.com/ambitioninc/newrelic-api/blob/07b4430aa6ae61e4704e2928a6e7a24c76f0f424/newrelic_api/servers.py#L8-L82
19,818
ambitioninc/newrelic-api
newrelic_api/servers.py
Servers.update
def update(self, id, name=None): """ Updates any of the optional parameters of the server :type id: int :param id: Server ID :type name: str :param name: The name of the server :rtype: dict :return: The JSON response of the API :: { "server": { "id": "integer", "account_id": "integer", "name": "string", "host": "string", "reporting": "boolean", "last_reported_at": "time", "summary": { "cpu": "float", "cpu_stolen": "float", "disk_io": "float", "memory": "float", "memory_used": "integer", "memory_total": "integer", "fullest_disk": "float", "fullest_disk_free": "integer" } } } """ nr_data = self.show(id)['server'] data = { 'server': { 'name': name or nr_data['name'], } } return self._put( url='{0}servers/{1}.json'.format(self.URL, id), headers=self.headers, data=data )
python
def update(self, id, name=None): nr_data = self.show(id)['server'] data = { 'server': { 'name': name or nr_data['name'], } } return self._put( url='{0}servers/{1}.json'.format(self.URL, id), headers=self.headers, data=data )
[ "def", "update", "(", "self", ",", "id", ",", "name", "=", "None", ")", ":", "nr_data", "=", "self", ".", "show", "(", "id", ")", "[", "'server'", "]", "data", "=", "{", "'server'", ":", "{", "'name'", ":", "name", "or", "nr_data", "[", "'name'",...
Updates any of the optional parameters of the server :type id: int :param id: Server ID :type name: str :param name: The name of the server :rtype: dict :return: The JSON response of the API :: { "server": { "id": "integer", "account_id": "integer", "name": "string", "host": "string", "reporting": "boolean", "last_reported_at": "time", "summary": { "cpu": "float", "cpu_stolen": "float", "disk_io": "float", "memory": "float", "memory_used": "integer", "memory_total": "integer", "fullest_disk": "float", "fullest_disk_free": "integer" } } }
[ "Updates", "any", "of", "the", "optional", "parameters", "of", "the", "server" ]
07b4430aa6ae61e4704e2928a6e7a24c76f0f424
https://github.com/ambitioninc/newrelic-api/blob/07b4430aa6ae61e4704e2928a6e7a24c76f0f424/newrelic_api/servers.py#L123-L172
19,819
ambitioninc/newrelic-api
newrelic_api/alert_policies.py
AlertPolicies.create
def create(self, name, incident_preference): """ This API endpoint allows you to create an alert policy :type name: str :param name: The name of the policy :type incident_preference: str :param incident_preference: Can be PER_POLICY, PER_CONDITION or PER_CONDITION_AND_TARGET :rtype: dict :return: The JSON response of the API :: { "policy": { "created_at": "time", "id": "integer", "incident_preference": "string", "name": "string", "updated_at": "time" } } """ data = { "policy": { "name": name, "incident_preference": incident_preference } } return self._post( url='{0}alerts_policies.json'.format(self.URL), headers=self.headers, data=data )
python
def create(self, name, incident_preference): data = { "policy": { "name": name, "incident_preference": incident_preference } } return self._post( url='{0}alerts_policies.json'.format(self.URL), headers=self.headers, data=data )
[ "def", "create", "(", "self", ",", "name", ",", "incident_preference", ")", ":", "data", "=", "{", "\"policy\"", ":", "{", "\"name\"", ":", "name", ",", "\"incident_preference\"", ":", "incident_preference", "}", "}", "return", "self", ".", "_post", "(", "...
This API endpoint allows you to create an alert policy :type name: str :param name: The name of the policy :type incident_preference: str :param incident_preference: Can be PER_POLICY, PER_CONDITION or PER_CONDITION_AND_TARGET :rtype: dict :return: The JSON response of the API :: { "policy": { "created_at": "time", "id": "integer", "incident_preference": "string", "name": "string", "updated_at": "time" } }
[ "This", "API", "endpoint", "allows", "you", "to", "create", "an", "alert", "policy" ]
07b4430aa6ae61e4704e2928a6e7a24c76f0f424
https://github.com/ambitioninc/newrelic-api/blob/07b4430aa6ae61e4704e2928a6e7a24c76f0f424/newrelic_api/alert_policies.py#L49-L88
19,820
ambitioninc/newrelic-api
newrelic_api/alert_policies.py
AlertPolicies.update
def update(self, id, name, incident_preference): """ This API endpoint allows you to update an alert policy :type id: integer :param id: The id of the policy :type name: str :param name: The name of the policy :type incident_preference: str :param incident_preference: Can be PER_POLICY, PER_CONDITION or PER_CONDITION_AND_TARGET :rtype: dict :return: The JSON response of the API :: { "policy": { "created_at": "time", "id": "integer", "incident_preference": "string", "name": "string", "updated_at": "time" } } """ data = { "policy": { "name": name, "incident_preference": incident_preference } } return self._put( url='{0}alerts_policies/{1}.json'.format(self.URL, id), headers=self.headers, data=data )
python
def update(self, id, name, incident_preference): data = { "policy": { "name": name, "incident_preference": incident_preference } } return self._put( url='{0}alerts_policies/{1}.json'.format(self.URL, id), headers=self.headers, data=data )
[ "def", "update", "(", "self", ",", "id", ",", "name", ",", "incident_preference", ")", ":", "data", "=", "{", "\"policy\"", ":", "{", "\"name\"", ":", "name", ",", "\"incident_preference\"", ":", "incident_preference", "}", "}", "return", "self", ".", "_pu...
This API endpoint allows you to update an alert policy :type id: integer :param id: The id of the policy :type name: str :param name: The name of the policy :type incident_preference: str :param incident_preference: Can be PER_POLICY, PER_CONDITION or PER_CONDITION_AND_TARGET :rtype: dict :return: The JSON response of the API :: { "policy": { "created_at": "time", "id": "integer", "incident_preference": "string", "name": "string", "updated_at": "time" } }
[ "This", "API", "endpoint", "allows", "you", "to", "update", "an", "alert", "policy" ]
07b4430aa6ae61e4704e2928a6e7a24c76f0f424
https://github.com/ambitioninc/newrelic-api/blob/07b4430aa6ae61e4704e2928a6e7a24c76f0f424/newrelic_api/alert_policies.py#L90-L132
19,821
ambitioninc/newrelic-api
newrelic_api/alert_policies.py
AlertPolicies.delete
def delete(self, id): """ This API endpoint allows you to delete an alert policy :type id: integer :param id: The id of the policy :rtype: dict :return: The JSON response of the API :: { "policy": { "created_at": "time", "id": "integer", "incident_preference": "string", "name": "string", "updated_at": "time" } } """ return self._delete( url='{0}alerts_policies/{1}.json'.format(self.URL, id), headers=self.headers )
python
def delete(self, id): return self._delete( url='{0}alerts_policies/{1}.json'.format(self.URL, id), headers=self.headers )
[ "def", "delete", "(", "self", ",", "id", ")", ":", "return", "self", ".", "_delete", "(", "url", "=", "'{0}alerts_policies/{1}.json'", ".", "format", "(", "self", ".", "URL", ",", "id", ")", ",", "headers", "=", "self", ".", "headers", ")" ]
This API endpoint allows you to delete an alert policy :type id: integer :param id: The id of the policy :rtype: dict :return: The JSON response of the API :: { "policy": { "created_at": "time", "id": "integer", "incident_preference": "string", "name": "string", "updated_at": "time" } }
[ "This", "API", "endpoint", "allows", "you", "to", "delete", "an", "alert", "policy" ]
07b4430aa6ae61e4704e2928a6e7a24c76f0f424
https://github.com/ambitioninc/newrelic-api/blob/07b4430aa6ae61e4704e2928a6e7a24c76f0f424/newrelic_api/alert_policies.py#L134-L161
19,822
ambitioninc/newrelic-api
newrelic_api/alert_policies.py
AlertPolicies.associate_with_notification_channel
def associate_with_notification_channel(self, id, channel_id): """ This API endpoint allows you to associate an alert policy with an notification channel :type id: integer :param id: The id of the policy :type channel_id: integer :param channel_id: The id of the notification channel :rtype: dict :return: The JSON response of the API :: { "policy": { "channel_ids": "list", "id": "integer" } } """ return self._put( url='{0}alerts_policy_channels.json?policy_id={1}&channel_ids={2}'.format( self.URL, id, channel_id ), headers=self.headers )
python
def associate_with_notification_channel(self, id, channel_id): return self._put( url='{0}alerts_policy_channels.json?policy_id={1}&channel_ids={2}'.format( self.URL, id, channel_id ), headers=self.headers )
[ "def", "associate_with_notification_channel", "(", "self", ",", "id", ",", "channel_id", ")", ":", "return", "self", ".", "_put", "(", "url", "=", "'{0}alerts_policy_channels.json?policy_id={1}&channel_ids={2}'", ".", "format", "(", "self", ".", "URL", ",", "id", ...
This API endpoint allows you to associate an alert policy with an notification channel :type id: integer :param id: The id of the policy :type channel_id: integer :param channel_id: The id of the notification channel :rtype: dict :return: The JSON response of the API :: { "policy": { "channel_ids": "list", "id": "integer" } }
[ "This", "API", "endpoint", "allows", "you", "to", "associate", "an", "alert", "policy", "with", "an", "notification", "channel" ]
07b4430aa6ae61e4704e2928a6e7a24c76f0f424
https://github.com/ambitioninc/newrelic-api/blob/07b4430aa6ae61e4704e2928a6e7a24c76f0f424/newrelic_api/alert_policies.py#L163-L195
19,823
ambitioninc/newrelic-api
newrelic_api/alert_policies.py
AlertPolicies.dissociate_from_notification_channel
def dissociate_from_notification_channel(self, id, channel_id): """ This API endpoint allows you to dissociate an alert policy from an notification channel :type id: integer :param id: The id of the policy :type channel_id: integer :param channel_id: The id of the notification channel :rtype: dict :return: The JSON response of the API :: { "channel":{ "configuration": "hash", "type": "string", "id": "integer", "links":{ "policy_ids": "list" }, "name": "string" } } """ return self._delete( url='{0}alerts_policy_channels.json?policy_id={1}&channel_id={2}'.format( self.URL, id, channel_id ), headers=self.headers )
python
def dissociate_from_notification_channel(self, id, channel_id): return self._delete( url='{0}alerts_policy_channels.json?policy_id={1}&channel_id={2}'.format( self.URL, id, channel_id ), headers=self.headers )
[ "def", "dissociate_from_notification_channel", "(", "self", ",", "id", ",", "channel_id", ")", ":", "return", "self", ".", "_delete", "(", "url", "=", "'{0}alerts_policy_channels.json?policy_id={1}&channel_id={2}'", ".", "format", "(", "self", ".", "URL", ",", "id",...
This API endpoint allows you to dissociate an alert policy from an notification channel :type id: integer :param id: The id of the policy :type channel_id: integer :param channel_id: The id of the notification channel :rtype: dict :return: The JSON response of the API :: { "channel":{ "configuration": "hash", "type": "string", "id": "integer", "links":{ "policy_ids": "list" }, "name": "string" } }
[ "This", "API", "endpoint", "allows", "you", "to", "dissociate", "an", "alert", "policy", "from", "an", "notification", "channel" ]
07b4430aa6ae61e4704e2928a6e7a24c76f0f424
https://github.com/ambitioninc/newrelic-api/blob/07b4430aa6ae61e4704e2928a6e7a24c76f0f424/newrelic_api/alert_policies.py#L197-L234
19,824
ambitioninc/newrelic-api
newrelic_api/alert_conditions.py
AlertConditions.list
def list(self, policy_id, page=None): """ This API endpoint returns a paginated list of alert conditions associated with the given policy_id. This API endpoint returns a paginated list of the alert conditions associated with your New Relic account. Alert conditions can be filtered by their name, list of IDs, type (application, key_transaction, or server) or whether or not policies are archived (defaults to filtering archived policies). :type policy_id: int :param policy_id: Alert policy id :type page: int :param page: Pagination index :rtype: dict :return: The JSON response of the API, with an additional 'pages' key if there are paginated results :: { "conditions": [ { "id": "integer", "type": "string", "condition_scope": "string", "name": "string", "enabled": "boolean", "entities": [ "integer" ], "metric": "string", "runbook_url": "string", "terms": [ { "duration": "string", "operator": "string", "priority": "string", "threshold": "string", "time_function": "string" } ], "user_defined": { "metric": "string", "value_function": "string" } } ] } """ filters = [ 'policy_id={0}'.format(policy_id), 'page={0}'.format(page) if page else None ] return self._get( url='{0}alerts_conditions.json'.format(self.URL), headers=self.headers, params=self.build_param_string(filters) )
python
def list(self, policy_id, page=None): filters = [ 'policy_id={0}'.format(policy_id), 'page={0}'.format(page) if page else None ] return self._get( url='{0}alerts_conditions.json'.format(self.URL), headers=self.headers, params=self.build_param_string(filters) )
[ "def", "list", "(", "self", ",", "policy_id", ",", "page", "=", "None", ")", ":", "filters", "=", "[", "'policy_id={0}'", ".", "format", "(", "policy_id", ")", ",", "'page={0}'", ".", "format", "(", "page", ")", "if", "page", "else", "None", "]", "re...
This API endpoint returns a paginated list of alert conditions associated with the given policy_id. This API endpoint returns a paginated list of the alert conditions associated with your New Relic account. Alert conditions can be filtered by their name, list of IDs, type (application, key_transaction, or server) or whether or not policies are archived (defaults to filtering archived policies). :type policy_id: int :param policy_id: Alert policy id :type page: int :param page: Pagination index :rtype: dict :return: The JSON response of the API, with an additional 'pages' key if there are paginated results :: { "conditions": [ { "id": "integer", "type": "string", "condition_scope": "string", "name": "string", "enabled": "boolean", "entities": [ "integer" ], "metric": "string", "runbook_url": "string", "terms": [ { "duration": "string", "operator": "string", "priority": "string", "threshold": "string", "time_function": "string" } ], "user_defined": { "metric": "string", "value_function": "string" } } ] }
[ "This", "API", "endpoint", "returns", "a", "paginated", "list", "of", "alert", "conditions", "associated", "with", "the", "given", "policy_id", "." ]
07b4430aa6ae61e4704e2928a6e7a24c76f0f424
https://github.com/ambitioninc/newrelic-api/blob/07b4430aa6ae61e4704e2928a6e7a24c76f0f424/newrelic_api/alert_conditions.py#L9-L72
19,825
ambitioninc/newrelic-api
newrelic_api/alert_conditions.py
AlertConditions.update
def update( self, alert_condition_id, policy_id, type=None, condition_scope=None, name=None, entities=None, metric=None, runbook_url=None, terms=None, user_defined=None, enabled=None): """ Updates any of the optional parameters of the alert condition :type alert_condition_id: int :param alert_condition_id: Alerts condition id to update :type policy_id: int :param policy_id: Alert policy id where target alert condition belongs to :type type: str :param type: The type of the condition, can be apm_app_metric, apm_kt_metric, servers_metric, browser_metric, mobile_metric :type condition_scope: str :param condition_scope: The scope of the condition, can be instance or application :type name: str :param name: The name of the server :type entities: list[str] :param name: entity ids to which the alert condition is applied :type : str :param metric: The target metric :type : str :param runbook_url: The url of the runbook :type terms: list[hash] :param terms: list of hashes containing threshold config for the alert :type user_defined: hash :param user_defined: hash containing threshold user_defined for the alert required if metric is set to user_defined :type enabled: bool :param enabled: Whether to enable that alert condition :rtype: dict :return: The JSON response of the API :raises: This will raise a :class:`NewRelicAPIServerException<newrelic_api.exceptions.NoEntityException>` if target alert condition is not included in target policy :raises: This will raise a :class:`ConfigurationException<newrelic_api.exceptions.ConfigurationException>` if metric is set as user_defined but user_defined config is not passed :: { "condition": { "id": "integer", "type": "string", "condition_scope": "string", "name": "string", "enabled": "boolean", "entities": [ "integer" ], "metric": "string", "runbook_url": "string", "terms": [ { "duration": "string", "operator": "string", "priority": "string", "threshold": "string", "time_function": "string" } ], "user_defined": { "metric": "string", "value_function": "string" } } } """ conditions_dict = self.list(policy_id) target_condition = None for condition in conditions_dict['conditions']: if int(condition['id']) == alert_condition_id: target_condition = condition break if target_condition is None: raise NoEntityException( 'Target alert condition is not included in that policy.' 'policy_id: {}, alert_condition_id {}'.format(policy_id, alert_condition_id) ) data = { 'condition': { 'type': type or target_condition['type'], 'name': name or target_condition['name'], 'entities': entities or target_condition['entities'], 'condition_scope': condition_scope or target_condition['condition_scope'], 'terms': terms or target_condition['terms'], 'metric': metric or target_condition['metric'], 'runbook_url': runbook_url or target_condition['runbook_url'], } } if enabled is not None: data['condition']['enabled'] = str(enabled).lower() if data['condition']['metric'] == 'user_defined': if user_defined: data['condition']['user_defined'] = user_defined elif 'user_defined' in target_condition: data['condition']['user_defined'] = target_condition['user_defined'] else: raise ConfigurationException( 'Metric is set as user_defined but no user_defined config specified' ) return self._put( url='{0}alerts_conditions/{1}.json'.format(self.URL, alert_condition_id), headers=self.headers, data=data )
python
def update( self, alert_condition_id, policy_id, type=None, condition_scope=None, name=None, entities=None, metric=None, runbook_url=None, terms=None, user_defined=None, enabled=None): conditions_dict = self.list(policy_id) target_condition = None for condition in conditions_dict['conditions']: if int(condition['id']) == alert_condition_id: target_condition = condition break if target_condition is None: raise NoEntityException( 'Target alert condition is not included in that policy.' 'policy_id: {}, alert_condition_id {}'.format(policy_id, alert_condition_id) ) data = { 'condition': { 'type': type or target_condition['type'], 'name': name or target_condition['name'], 'entities': entities or target_condition['entities'], 'condition_scope': condition_scope or target_condition['condition_scope'], 'terms': terms or target_condition['terms'], 'metric': metric or target_condition['metric'], 'runbook_url': runbook_url or target_condition['runbook_url'], } } if enabled is not None: data['condition']['enabled'] = str(enabled).lower() if data['condition']['metric'] == 'user_defined': if user_defined: data['condition']['user_defined'] = user_defined elif 'user_defined' in target_condition: data['condition']['user_defined'] = target_condition['user_defined'] else: raise ConfigurationException( 'Metric is set as user_defined but no user_defined config specified' ) return self._put( url='{0}alerts_conditions/{1}.json'.format(self.URL, alert_condition_id), headers=self.headers, data=data )
[ "def", "update", "(", "self", ",", "alert_condition_id", ",", "policy_id", ",", "type", "=", "None", ",", "condition_scope", "=", "None", ",", "name", "=", "None", ",", "entities", "=", "None", ",", "metric", "=", "None", ",", "runbook_url", "=", "None",...
Updates any of the optional parameters of the alert condition :type alert_condition_id: int :param alert_condition_id: Alerts condition id to update :type policy_id: int :param policy_id: Alert policy id where target alert condition belongs to :type type: str :param type: The type of the condition, can be apm_app_metric, apm_kt_metric, servers_metric, browser_metric, mobile_metric :type condition_scope: str :param condition_scope: The scope of the condition, can be instance or application :type name: str :param name: The name of the server :type entities: list[str] :param name: entity ids to which the alert condition is applied :type : str :param metric: The target metric :type : str :param runbook_url: The url of the runbook :type terms: list[hash] :param terms: list of hashes containing threshold config for the alert :type user_defined: hash :param user_defined: hash containing threshold user_defined for the alert required if metric is set to user_defined :type enabled: bool :param enabled: Whether to enable that alert condition :rtype: dict :return: The JSON response of the API :raises: This will raise a :class:`NewRelicAPIServerException<newrelic_api.exceptions.NoEntityException>` if target alert condition is not included in target policy :raises: This will raise a :class:`ConfigurationException<newrelic_api.exceptions.ConfigurationException>` if metric is set as user_defined but user_defined config is not passed :: { "condition": { "id": "integer", "type": "string", "condition_scope": "string", "name": "string", "enabled": "boolean", "entities": [ "integer" ], "metric": "string", "runbook_url": "string", "terms": [ { "duration": "string", "operator": "string", "priority": "string", "threshold": "string", "time_function": "string" } ], "user_defined": { "metric": "string", "value_function": "string" } } }
[ "Updates", "any", "of", "the", "optional", "parameters", "of", "the", "alert", "condition" ]
07b4430aa6ae61e4704e2928a6e7a24c76f0f424
https://github.com/ambitioninc/newrelic-api/blob/07b4430aa6ae61e4704e2928a6e7a24c76f0f424/newrelic_api/alert_conditions.py#L74-L207
19,826
ambitioninc/newrelic-api
newrelic_api/alert_conditions.py
AlertConditions.create
def create( self, policy_id, type, condition_scope, name, entities, metric, terms, runbook_url=None, user_defined=None, enabled=True): """ Creates an alert condition :type policy_id: int :param policy_id: Alert policy id where target alert condition belongs to :type type: str :param type: The type of the condition, can be apm_app_metric, apm_kt_metric, servers_metric, browser_metric, mobile_metric :type condition_scope: str :param condition_scope: The scope of the condition, can be instance or application :type name: str :param name: The name of the server :type entities: list[str] :param name: entity ids to which the alert condition is applied :type : str :param metric: The target metric :type : str :param runbook_url: The url of the runbook :type terms: list[hash] :param terms: list of hashes containing threshold config for the alert :type user_defined: hash :param user_defined: hash containing threshold user_defined for the alert required if metric is set to user_defined :type enabled: bool :param enabled: Whether to enable that alert condition :rtype: dict :return: The JSON response of the API :: { "condition": { "id": "integer", "type": "string", "condition_scope": "string", "name": "string", "enabled": "boolean", "entities": [ "integer" ], "metric": "string", "runbook_url": "string", "terms": [ { "duration": "string", "operator": "string", "priority": "string", "threshold": "string", "time_function": "string" } ], "user_defined": { "metric": "string", "value_function": "string" } } } """ data = { 'condition': { 'type': type, 'name': name, 'enabled': enabled, 'entities': entities, 'condition_scope': condition_scope, 'terms': terms, 'metric': metric, 'runbook_url': runbook_url, } } if metric == 'user_defined': if user_defined: data['condition']['user_defined'] = user_defined else: raise ConfigurationException( 'Metric is set as user_defined but no user_defined config specified' ) return self._post( url='{0}alerts_conditions/policies/{1}.json'.format(self.URL, policy_id), headers=self.headers, data=data )
python
def create( self, policy_id, type, condition_scope, name, entities, metric, terms, runbook_url=None, user_defined=None, enabled=True): data = { 'condition': { 'type': type, 'name': name, 'enabled': enabled, 'entities': entities, 'condition_scope': condition_scope, 'terms': terms, 'metric': metric, 'runbook_url': runbook_url, } } if metric == 'user_defined': if user_defined: data['condition']['user_defined'] = user_defined else: raise ConfigurationException( 'Metric is set as user_defined but no user_defined config specified' ) return self._post( url='{0}alerts_conditions/policies/{1}.json'.format(self.URL, policy_id), headers=self.headers, data=data )
[ "def", "create", "(", "self", ",", "policy_id", ",", "type", ",", "condition_scope", ",", "name", ",", "entities", ",", "metric", ",", "terms", ",", "runbook_url", "=", "None", ",", "user_defined", "=", "None", ",", "enabled", "=", "True", ")", ":", "d...
Creates an alert condition :type policy_id: int :param policy_id: Alert policy id where target alert condition belongs to :type type: str :param type: The type of the condition, can be apm_app_metric, apm_kt_metric, servers_metric, browser_metric, mobile_metric :type condition_scope: str :param condition_scope: The scope of the condition, can be instance or application :type name: str :param name: The name of the server :type entities: list[str] :param name: entity ids to which the alert condition is applied :type : str :param metric: The target metric :type : str :param runbook_url: The url of the runbook :type terms: list[hash] :param terms: list of hashes containing threshold config for the alert :type user_defined: hash :param user_defined: hash containing threshold user_defined for the alert required if metric is set to user_defined :type enabled: bool :param enabled: Whether to enable that alert condition :rtype: dict :return: The JSON response of the API :: { "condition": { "id": "integer", "type": "string", "condition_scope": "string", "name": "string", "enabled": "boolean", "entities": [ "integer" ], "metric": "string", "runbook_url": "string", "terms": [ { "duration": "string", "operator": "string", "priority": "string", "threshold": "string", "time_function": "string" } ], "user_defined": { "metric": "string", "value_function": "string" } } }
[ "Creates", "an", "alert", "condition" ]
07b4430aa6ae61e4704e2928a6e7a24c76f0f424
https://github.com/ambitioninc/newrelic-api/blob/07b4430aa6ae61e4704e2928a6e7a24c76f0f424/newrelic_api/alert_conditions.py#L209-L315
19,827
ambitioninc/newrelic-api
newrelic_api/alert_conditions.py
AlertConditions.delete
def delete(self, alert_condition_id): """ This API endpoint allows you to delete an alert condition :type alert_condition_id: integer :param alert_condition_id: Alert Condition ID :rtype: dict :return: The JSON response of the API :: { "condition": { "id": "integer", "type": "string", "condition_scope": "string", "name": "string", "enabled": "boolean", "entities": [ "integer" ], "metric": "string", "runbook_url": "string", "terms": [ { "duration": "string", "operator": "string", "priority": "string", "threshold": "string", "time_function": "string" } ], "user_defined": { "metric": "string", "value_function": "string" } } } """ return self._delete( url='{0}alerts_conditions/{1}.json'.format(self.URL, alert_condition_id), headers=self.headers )
python
def delete(self, alert_condition_id): return self._delete( url='{0}alerts_conditions/{1}.json'.format(self.URL, alert_condition_id), headers=self.headers )
[ "def", "delete", "(", "self", ",", "alert_condition_id", ")", ":", "return", "self", ".", "_delete", "(", "url", "=", "'{0}alerts_conditions/{1}.json'", ".", "format", "(", "self", ".", "URL", ",", "alert_condition_id", ")", ",", "headers", "=", "self", ".",...
This API endpoint allows you to delete an alert condition :type alert_condition_id: integer :param alert_condition_id: Alert Condition ID :rtype: dict :return: The JSON response of the API :: { "condition": { "id": "integer", "type": "string", "condition_scope": "string", "name": "string", "enabled": "boolean", "entities": [ "integer" ], "metric": "string", "runbook_url": "string", "terms": [ { "duration": "string", "operator": "string", "priority": "string", "threshold": "string", "time_function": "string" } ], "user_defined": { "metric": "string", "value_function": "string" } } }
[ "This", "API", "endpoint", "allows", "you", "to", "delete", "an", "alert", "condition" ]
07b4430aa6ae61e4704e2928a6e7a24c76f0f424
https://github.com/ambitioninc/newrelic-api/blob/07b4430aa6ae61e4704e2928a6e7a24c76f0f424/newrelic_api/alert_conditions.py#L317-L362
19,828
ambitioninc/newrelic-api
newrelic_api/alert_conditions_infra.py
AlertConditionsInfra.list
def list(self, policy_id, limit=None, offset=None): """ This API endpoint returns a paginated list of alert conditions for infrastucture metrics associated with the given policy_id. :type policy_id: int :param policy_id: Alert policy id :type limit: string :param limit: Max amount of results to return :type offset: string :param offset: Starting record to return :rtype: dict :return: The JSON response of the API, with an additional 'pages' key if there are paginated results :: { "data": [ { "id": "integer", "policy_id": "integer", "type": "string", "name": "string", "enabled": "boolean", "where_clause": "string", "comparison": "string", "filter": "hash", "critical_threshold": "hash", "process_where_clause": "string", "created_at_epoch_millis": "time", "updated_at_epoch_millis": "time" } ], "meta": { "limit": "integer", "offset": "integer", "total": "integer" } } """ filters = [ 'policy_id={0}'.format(policy_id), 'limit={0}'.format(limit) if limit else '50', 'offset={0}'.format(offset) if offset else '0' ] return self._get( url='{0}alerts/conditions'.format(self.URL), headers=self.headers, params=self.build_param_string(filters) )
python
def list(self, policy_id, limit=None, offset=None): filters = [ 'policy_id={0}'.format(policy_id), 'limit={0}'.format(limit) if limit else '50', 'offset={0}'.format(offset) if offset else '0' ] return self._get( url='{0}alerts/conditions'.format(self.URL), headers=self.headers, params=self.build_param_string(filters) )
[ "def", "list", "(", "self", ",", "policy_id", ",", "limit", "=", "None", ",", "offset", "=", "None", ")", ":", "filters", "=", "[", "'policy_id={0}'", ".", "format", "(", "policy_id", ")", ",", "'limit={0}'", ".", "format", "(", "limit", ")", "if", "...
This API endpoint returns a paginated list of alert conditions for infrastucture metrics associated with the given policy_id. :type policy_id: int :param policy_id: Alert policy id :type limit: string :param limit: Max amount of results to return :type offset: string :param offset: Starting record to return :rtype: dict :return: The JSON response of the API, with an additional 'pages' key if there are paginated results :: { "data": [ { "id": "integer", "policy_id": "integer", "type": "string", "name": "string", "enabled": "boolean", "where_clause": "string", "comparison": "string", "filter": "hash", "critical_threshold": "hash", "process_where_clause": "string", "created_at_epoch_millis": "time", "updated_at_epoch_millis": "time" } ], "meta": { "limit": "integer", "offset": "integer", "total": "integer" } }
[ "This", "API", "endpoint", "returns", "a", "paginated", "list", "of", "alert", "conditions", "for", "infrastucture", "metrics", "associated", "with", "the", "given", "policy_id", "." ]
07b4430aa6ae61e4704e2928a6e7a24c76f0f424
https://github.com/ambitioninc/newrelic-api/blob/07b4430aa6ae61e4704e2928a6e7a24c76f0f424/newrelic_api/alert_conditions_infra.py#L13-L69
19,829
ambitioninc/newrelic-api
newrelic_api/alert_conditions_infra.py
AlertConditionsInfra.show
def show(self, alert_condition_infra_id): """ This API endpoint returns an alert condition for infrastucture, identified by its ID. :type alert_condition_infra_id: int :param alert_condition_infra_id: Alert Condition Infra ID :rtype: dict :return: The JSON response of the API :: { "data": { "id": "integer", "policy_id": "integer", "type": "string", "name": "string", "enabled": "boolean", "where_clause": "string", "comparison": "string", "filter": "hash", "critical_threshold": "hash", "event_type": "string", "process_where_clause": "string", "created_at_epoch_millis": "time", "updated_at_epoch_millis": "time" } } """ return self._get( url='{0}alerts/conditions/{1}'.format(self.URL, alert_condition_infra_id), headers=self.headers, )
python
def show(self, alert_condition_infra_id): return self._get( url='{0}alerts/conditions/{1}'.format(self.URL, alert_condition_infra_id), headers=self.headers, )
[ "def", "show", "(", "self", ",", "alert_condition_infra_id", ")", ":", "return", "self", ".", "_get", "(", "url", "=", "'{0}alerts/conditions/{1}'", ".", "format", "(", "self", ".", "URL", ",", "alert_condition_infra_id", ")", ",", "headers", "=", "self", "....
This API endpoint returns an alert condition for infrastucture, identified by its ID. :type alert_condition_infra_id: int :param alert_condition_infra_id: Alert Condition Infra ID :rtype: dict :return: The JSON response of the API :: { "data": { "id": "integer", "policy_id": "integer", "type": "string", "name": "string", "enabled": "boolean", "where_clause": "string", "comparison": "string", "filter": "hash", "critical_threshold": "hash", "event_type": "string", "process_where_clause": "string", "created_at_epoch_millis": "time", "updated_at_epoch_millis": "time" } }
[ "This", "API", "endpoint", "returns", "an", "alert", "condition", "for", "infrastucture", "identified", "by", "its", "ID", "." ]
07b4430aa6ae61e4704e2928a6e7a24c76f0f424
https://github.com/ambitioninc/newrelic-api/blob/07b4430aa6ae61e4704e2928a6e7a24c76f0f424/newrelic_api/alert_conditions_infra.py#L71-L106
19,830
ambitioninc/newrelic-api
newrelic_api/alert_conditions_infra.py
AlertConditionsInfra.create
def create(self, policy_id, name, condition_type, alert_condition_configuration, enabled=True): """ This API endpoint allows you to create an alert condition for infrastucture :type policy_id: int :param policy_id: Alert policy id :type name: str :param name: The name of the alert condition :type condition_type: str :param condition_type: The type of the alert condition can be infra_process_running, infra_metric or infra_host_not_reporting :type alert_condition_configuration: hash :param alert_condition_configuration: hash containing config for the alert :type enabled: bool :param enabled: Whether to enable that alert condition :rtype: dict :return: The JSON response of the API :: { "data": { "id": "integer", "policy_id": "integer", "type": "string", "name": "string", "enabled": "boolean", "where_clause": "string", "comparison": "string", "filter": "hash", "critical_threshold": "hash", "event_type": "string", "process_where_clause": "string", "created_at_epoch_millis": "time", "updated_at_epoch_millis": "time" } } """ data = { "data": alert_condition_configuration } data['data']['type'] = condition_type data['data']['policy_id'] = policy_id data['data']['name'] = name data['data']['enabled'] = enabled return self._post( url='{0}alerts/conditions'.format(self.URL), headers=self.headers, data=data )
python
def create(self, policy_id, name, condition_type, alert_condition_configuration, enabled=True): data = { "data": alert_condition_configuration } data['data']['type'] = condition_type data['data']['policy_id'] = policy_id data['data']['name'] = name data['data']['enabled'] = enabled return self._post( url='{0}alerts/conditions'.format(self.URL), headers=self.headers, data=data )
[ "def", "create", "(", "self", ",", "policy_id", ",", "name", ",", "condition_type", ",", "alert_condition_configuration", ",", "enabled", "=", "True", ")", ":", "data", "=", "{", "\"data\"", ":", "alert_condition_configuration", "}", "data", "[", "'data'", "]"...
This API endpoint allows you to create an alert condition for infrastucture :type policy_id: int :param policy_id: Alert policy id :type name: str :param name: The name of the alert condition :type condition_type: str :param condition_type: The type of the alert condition can be infra_process_running, infra_metric or infra_host_not_reporting :type alert_condition_configuration: hash :param alert_condition_configuration: hash containing config for the alert :type enabled: bool :param enabled: Whether to enable that alert condition :rtype: dict :return: The JSON response of the API :: { "data": { "id": "integer", "policy_id": "integer", "type": "string", "name": "string", "enabled": "boolean", "where_clause": "string", "comparison": "string", "filter": "hash", "critical_threshold": "hash", "event_type": "string", "process_where_clause": "string", "created_at_epoch_millis": "time", "updated_at_epoch_millis": "time" } }
[ "This", "API", "endpoint", "allows", "you", "to", "create", "an", "alert", "condition", "for", "infrastucture" ]
07b4430aa6ae61e4704e2928a6e7a24c76f0f424
https://github.com/ambitioninc/newrelic-api/blob/07b4430aa6ae61e4704e2928a6e7a24c76f0f424/newrelic_api/alert_conditions_infra.py#L108-L166
19,831
ambitioninc/newrelic-api
newrelic_api/alert_conditions_infra.py
AlertConditionsInfra.update
def update(self, alert_condition_infra_id, policy_id, name, condition_type, alert_condition_configuration, enabled=True): """ This API endpoint allows you to update an alert condition for infrastucture :type alert_condition_infra_id: int :param alert_condition_infra_id: Alert Condition Infra ID :type policy_id: int :param policy_id: Alert policy id :type name: str :param name: The name of the alert condition :type condition_type: str :param condition_type: The type of the alert condition can be infra_process_running, infra_metric or infra_host_not_reporting :type alert_condition_configuration: hash :param alert_condition_configuration: hash containing config for the alert :type enabled: bool :param enabled: Whether to enable that alert condition :rtype: dict :return: The JSON response of the API :: { "data": { "id": "integer", "policy_id": "integer", "type": "string", "name": "string", "enabled": "boolean", "where_clause": "string", "comparison": "string", "filter": "hash", "critical_threshold": "hash", "event_type": "string", "process_where_clause": "string", "created_at_epoch_millis": "time", "updated_at_epoch_millis": "time" } } """ data = { "data": alert_condition_configuration } data['data']['type'] = condition_type data['data']['policy_id'] = policy_id data['data']['name'] = name data['data']['enabled'] = enabled return self._put( url='{0}alerts/conditions/{1}'.format(self.URL, alert_condition_infra_id), headers=self.headers, data=data )
python
def update(self, alert_condition_infra_id, policy_id, name, condition_type, alert_condition_configuration, enabled=True): data = { "data": alert_condition_configuration } data['data']['type'] = condition_type data['data']['policy_id'] = policy_id data['data']['name'] = name data['data']['enabled'] = enabled return self._put( url='{0}alerts/conditions/{1}'.format(self.URL, alert_condition_infra_id), headers=self.headers, data=data )
[ "def", "update", "(", "self", ",", "alert_condition_infra_id", ",", "policy_id", ",", "name", ",", "condition_type", ",", "alert_condition_configuration", ",", "enabled", "=", "True", ")", ":", "data", "=", "{", "\"data\"", ":", "alert_condition_configuration", "}...
This API endpoint allows you to update an alert condition for infrastucture :type alert_condition_infra_id: int :param alert_condition_infra_id: Alert Condition Infra ID :type policy_id: int :param policy_id: Alert policy id :type name: str :param name: The name of the alert condition :type condition_type: str :param condition_type: The type of the alert condition can be infra_process_running, infra_metric or infra_host_not_reporting :type alert_condition_configuration: hash :param alert_condition_configuration: hash containing config for the alert :type enabled: bool :param enabled: Whether to enable that alert condition :rtype: dict :return: The JSON response of the API :: { "data": { "id": "integer", "policy_id": "integer", "type": "string", "name": "string", "enabled": "boolean", "where_clause": "string", "comparison": "string", "filter": "hash", "critical_threshold": "hash", "event_type": "string", "process_where_clause": "string", "created_at_epoch_millis": "time", "updated_at_epoch_millis": "time" } }
[ "This", "API", "endpoint", "allows", "you", "to", "update", "an", "alert", "condition", "for", "infrastucture" ]
07b4430aa6ae61e4704e2928a6e7a24c76f0f424
https://github.com/ambitioninc/newrelic-api/blob/07b4430aa6ae61e4704e2928a6e7a24c76f0f424/newrelic_api/alert_conditions_infra.py#L168-L230
19,832
ambitioninc/newrelic-api
newrelic_api/alert_conditions_infra.py
AlertConditionsInfra.delete
def delete(self, alert_condition_infra_id): """ This API endpoint allows you to delete an alert condition for infrastucture :type alert_condition_infra_id: integer :param alert_condition_infra_id: Alert Condition Infra ID :rtype: dict :return: The JSON response of the API :: {} """ return self._delete( url='{0}alerts/conditions/{1}'.format(self.URL, alert_condition_infra_id), headers=self.headers )
python
def delete(self, alert_condition_infra_id): return self._delete( url='{0}alerts/conditions/{1}'.format(self.URL, alert_condition_infra_id), headers=self.headers )
[ "def", "delete", "(", "self", ",", "alert_condition_infra_id", ")", ":", "return", "self", ".", "_delete", "(", "url", "=", "'{0}alerts/conditions/{1}'", ".", "format", "(", "self", ".", "URL", ",", "alert_condition_infra_id", ")", ",", "headers", "=", "self",...
This API endpoint allows you to delete an alert condition for infrastucture :type alert_condition_infra_id: integer :param alert_condition_infra_id: Alert Condition Infra ID :rtype: dict :return: The JSON response of the API :: {}
[ "This", "API", "endpoint", "allows", "you", "to", "delete", "an", "alert", "condition", "for", "infrastucture" ]
07b4430aa6ae61e4704e2928a6e7a24c76f0f424
https://github.com/ambitioninc/newrelic-api/blob/07b4430aa6ae61e4704e2928a6e7a24c76f0f424/newrelic_api/alert_conditions_infra.py#L232-L251
19,833
ambitioninc/newrelic-api
newrelic_api/labels.py
Labels.create
def create(self, name, category, applications=None, servers=None): """ This API endpoint will create a new label with the provided name and category :type name: str :param name: The name of the label :type category: str :param category: The Category :type applications: list of int :param applications: An optional list of application ID's :type servers: list of int :param servers: An optional list of server ID's :rtype: dict :return: The JSON response of the API :: { "label": { "key": "string", "category": "string", "name": "string", "links": { "applications": [ "integer" ], "servers": [ "integer" ] } } } """ data = { "label": { "category": category, "name": name, "links": { "applications": applications or [], "servers": servers or [] } } } return self._put( url='{0}labels.json'.format(self.URL), headers=self.headers, data=data )
python
def create(self, name, category, applications=None, servers=None): data = { "label": { "category": category, "name": name, "links": { "applications": applications or [], "servers": servers or [] } } } return self._put( url='{0}labels.json'.format(self.URL), headers=self.headers, data=data )
[ "def", "create", "(", "self", ",", "name", ",", "category", ",", "applications", "=", "None", ",", "servers", "=", "None", ")", ":", "data", "=", "{", "\"label\"", ":", "{", "\"category\"", ":", "category", ",", "\"name\"", ":", "name", ",", "\"links\"...
This API endpoint will create a new label with the provided name and category :type name: str :param name: The name of the label :type category: str :param category: The Category :type applications: list of int :param applications: An optional list of application ID's :type servers: list of int :param servers: An optional list of server ID's :rtype: dict :return: The JSON response of the API :: { "label": { "key": "string", "category": "string", "name": "string", "links": { "applications": [ "integer" ], "servers": [ "integer" ] } } }
[ "This", "API", "endpoint", "will", "create", "a", "new", "label", "with", "the", "provided", "name", "and", "category" ]
07b4430aa6ae61e4704e2928a6e7a24c76f0f424
https://github.com/ambitioninc/newrelic-api/blob/07b4430aa6ae61e4704e2928a6e7a24c76f0f424/newrelic_api/labels.py#L62-L117
19,834
ambitioninc/newrelic-api
newrelic_api/labels.py
Labels.delete
def delete(self, key): """ When applications are provided, this endpoint will remove those applications from the label. When no applications are provided, this endpoint will remove the label. :type key: str :param key: Label key. Example: 'Language:Java' :rtype: dict :return: The JSON response of the API :: { "label": { "key": "string", "category": "string", "name": "string", "links": { "applications": [ "integer" ], "servers": [ "integer" ] } } } """ return self._delete( url='{url}labels/labels/{key}.json'.format( url=self.URL, key=key), headers=self.headers, )
python
def delete(self, key): return self._delete( url='{url}labels/labels/{key}.json'.format( url=self.URL, key=key), headers=self.headers, )
[ "def", "delete", "(", "self", ",", "key", ")", ":", "return", "self", ".", "_delete", "(", "url", "=", "'{url}labels/labels/{key}.json'", ".", "format", "(", "url", "=", "self", ".", "URL", ",", "key", "=", "key", ")", ",", "headers", "=", "self", "....
When applications are provided, this endpoint will remove those applications from the label. When no applications are provided, this endpoint will remove the label. :type key: str :param key: Label key. Example: 'Language:Java' :rtype: dict :return: The JSON response of the API :: { "label": { "key": "string", "category": "string", "name": "string", "links": { "applications": [ "integer" ], "servers": [ "integer" ] } } }
[ "When", "applications", "are", "provided", "this", "endpoint", "will", "remove", "those", "applications", "from", "the", "label", "." ]
07b4430aa6ae61e4704e2928a6e7a24c76f0f424
https://github.com/ambitioninc/newrelic-api/blob/07b4430aa6ae61e4704e2928a6e7a24c76f0f424/newrelic_api/labels.py#L119-L156
19,835
ambitioninc/newrelic-api
newrelic_api/plugins.py
Plugins.list
def list(self, filter_guid=None, filter_ids=None, detailed=None, page=None): """ This API endpoint returns a paginated list of the plugins associated with your New Relic account. Plugins can be filtered by their name or by a list of IDs. :type filter_guid: str :param filter_guid: Filter by name :type filter_ids: list of ints :param filter_ids: Filter by user ids :type detailed: bool :param detailed: Include all data about a plugin :type page: int :param page: Pagination index :rtype: dict :return: The JSON response of the API, with an additional 'pages' key if there are paginated results :: { "plugins": [ { "id": "integer", "name": "string", "guid": "string", "publisher": "string", "details": { "description": "integer", "is_public": "string", "created_at": "time", "updated_at": "time", "last_published_at": "time", "has_unpublished_changes": "boolean", "branding_image_url": "string", "upgraded_at": "time", "short_name": "string", "publisher_about_url": "string", "publisher_support_url": "string", "download_url": "string", "first_edited_at": "time", "last_edited_at": "time", "first_published_at": "time", "published_version": "string" }, "summary_metrics": [ { "id": "integer", "name": "string", "metric": "string", "value_function": "string", "thresholds": { "caution": "float", "critical": "float" }, "values": { "raw": "float", "formatted": "string" } } ] } ], "pages": { "last": { "url": "https://api.newrelic.com/v2/plugins.json?page=2", "rel": "last" }, "next": { "url": "https://api.newrelic.com/v2/plugins.json?page=2", "rel": "next" } } } """ filters = [ 'filter[guid]={0}'.format(filter_guid) if filter_guid else None, 'filter[ids]={0}'.format(','.join([str(app_id) for app_id in filter_ids])) if filter_ids else None, 'detailed={0}'.format(detailed) if detailed is not None else None, 'page={0}'.format(page) if page else None ] return self._get( url='{0}plugins.json'.format(self.URL), headers=self.headers, params=self.build_param_string(filters) )
python
def list(self, filter_guid=None, filter_ids=None, detailed=None, page=None): filters = [ 'filter[guid]={0}'.format(filter_guid) if filter_guid else None, 'filter[ids]={0}'.format(','.join([str(app_id) for app_id in filter_ids])) if filter_ids else None, 'detailed={0}'.format(detailed) if detailed is not None else None, 'page={0}'.format(page) if page else None ] return self._get( url='{0}plugins.json'.format(self.URL), headers=self.headers, params=self.build_param_string(filters) )
[ "def", "list", "(", "self", ",", "filter_guid", "=", "None", ",", "filter_ids", "=", "None", ",", "detailed", "=", "None", ",", "page", "=", "None", ")", ":", "filters", "=", "[", "'filter[guid]={0}'", ".", "format", "(", "filter_guid", ")", "if", "fil...
This API endpoint returns a paginated list of the plugins associated with your New Relic account. Plugins can be filtered by their name or by a list of IDs. :type filter_guid: str :param filter_guid: Filter by name :type filter_ids: list of ints :param filter_ids: Filter by user ids :type detailed: bool :param detailed: Include all data about a plugin :type page: int :param page: Pagination index :rtype: dict :return: The JSON response of the API, with an additional 'pages' key if there are paginated results :: { "plugins": [ { "id": "integer", "name": "string", "guid": "string", "publisher": "string", "details": { "description": "integer", "is_public": "string", "created_at": "time", "updated_at": "time", "last_published_at": "time", "has_unpublished_changes": "boolean", "branding_image_url": "string", "upgraded_at": "time", "short_name": "string", "publisher_about_url": "string", "publisher_support_url": "string", "download_url": "string", "first_edited_at": "time", "last_edited_at": "time", "first_published_at": "time", "published_version": "string" }, "summary_metrics": [ { "id": "integer", "name": "string", "metric": "string", "value_function": "string", "thresholds": { "caution": "float", "critical": "float" }, "values": { "raw": "float", "formatted": "string" } } ] } ], "pages": { "last": { "url": "https://api.newrelic.com/v2/plugins.json?page=2", "rel": "last" }, "next": { "url": "https://api.newrelic.com/v2/plugins.json?page=2", "rel": "next" } } }
[ "This", "API", "endpoint", "returns", "a", "paginated", "list", "of", "the", "plugins", "associated", "with", "your", "New", "Relic", "account", "." ]
07b4430aa6ae61e4704e2928a6e7a24c76f0f424
https://github.com/ambitioninc/newrelic-api/blob/07b4430aa6ae61e4704e2928a6e7a24c76f0f424/newrelic_api/plugins.py#L8-L100
19,836
ambitioninc/newrelic-api
newrelic_api/application_instances.py
ApplicationInstances.list
def list( self, application_id, filter_hostname=None, filter_ids=None, page=None): """ This API endpoint returns a paginated list of instances associated with the given application. Application instances can be filtered by hostname, or the list of application instance IDs. :type application_id: int :param application_id: Application ID :type filter_hostname: str :param filter_hostname: Filter by server hostname :type filter_ids: list of ints :param filter_ids: Filter by application instance ids :type page: int :param page: Pagination index :rtype: dict :return: The JSON response of the API, with an additional 'pages' key if there are paginated results :: { "application_instances": [ { "id": "integer", "application_name": "string", "host": "string", "port": "integer", "language": "integer", "health_status": "string", "application_summary": { "response_time": "float", "throughput": "float", "error_rate": "float", "apdex_score": "float" }, "end_user_summary": { "response_time": "float", "throughput": "float", "apdex_score": "float" }, "links": { "application": "integer", "application_host": "integer", "server": "integer" } } ], "pages": { "last": { "url": "https://api.newrelic.com/v2/applications/{application_id}/instances.json?page=2", "rel": "last" }, "next": { "url": "https://api.newrelic.com/v2/applications/{application_id}/instances.json?page=2", "rel": "next" } } } """ filters = [ 'filter[hostname]={0}'.format(filter_hostname) if filter_hostname else None, 'filter[ids]={0}'.format(','.join([str(app_id) for app_id in filter_ids])) if filter_ids else None, 'page={0}'.format(page) if page else None ] return self._get( url='{root}applications/{application_id}/instances.json'.format( root=self.URL, application_id=application_id ), headers=self.headers, params=self.build_param_string(filters) )
python
def list( self, application_id, filter_hostname=None, filter_ids=None, page=None): filters = [ 'filter[hostname]={0}'.format(filter_hostname) if filter_hostname else None, 'filter[ids]={0}'.format(','.join([str(app_id) for app_id in filter_ids])) if filter_ids else None, 'page={0}'.format(page) if page else None ] return self._get( url='{root}applications/{application_id}/instances.json'.format( root=self.URL, application_id=application_id ), headers=self.headers, params=self.build_param_string(filters) )
[ "def", "list", "(", "self", ",", "application_id", ",", "filter_hostname", "=", "None", ",", "filter_ids", "=", "None", ",", "page", "=", "None", ")", ":", "filters", "=", "[", "'filter[hostname]={0}'", ".", "format", "(", "filter_hostname", ")", "if", "fi...
This API endpoint returns a paginated list of instances associated with the given application. Application instances can be filtered by hostname, or the list of application instance IDs. :type application_id: int :param application_id: Application ID :type filter_hostname: str :param filter_hostname: Filter by server hostname :type filter_ids: list of ints :param filter_ids: Filter by application instance ids :type page: int :param page: Pagination index :rtype: dict :return: The JSON response of the API, with an additional 'pages' key if there are paginated results :: { "application_instances": [ { "id": "integer", "application_name": "string", "host": "string", "port": "integer", "language": "integer", "health_status": "string", "application_summary": { "response_time": "float", "throughput": "float", "error_rate": "float", "apdex_score": "float" }, "end_user_summary": { "response_time": "float", "throughput": "float", "apdex_score": "float" }, "links": { "application": "integer", "application_host": "integer", "server": "integer" } } ], "pages": { "last": { "url": "https://api.newrelic.com/v2/applications/{application_id}/instances.json?page=2", "rel": "last" }, "next": { "url": "https://api.newrelic.com/v2/applications/{application_id}/instances.json?page=2", "rel": "next" } } }
[ "This", "API", "endpoint", "returns", "a", "paginated", "list", "of", "instances", "associated", "with", "the", "given", "application", "." ]
07b4430aa6ae61e4704e2928a6e7a24c76f0f424
https://github.com/ambitioninc/newrelic-api/blob/07b4430aa6ae61e4704e2928a6e7a24c76f0f424/newrelic_api/application_instances.py#L8-L88
19,837
ambitioninc/newrelic-api
newrelic_api/application_hosts.py
ApplicationHosts.show
def show(self, application_id, host_id): """ This API endpoint returns a single application host, identified by its ID. :type application_id: int :param application_id: Application ID :type host_id: int :param host_id: Application host ID :rtype: dict :return: The JSON response of the API :: { "application_host": { "id": "integer", "application_name": "string", "host": "string", "language": "integer", "health_status": "string", "application_summary": { "response_time": "float", "throughput": "float", "error_rate": "float", "apdex_score": "float" }, "end_user_summary": { "response_time": "float", "throughput": "float", "apdex_score": "float" }, "links": { "application": "integer", "application_instances": [ "integer" ], "server": "integer" } } } """ return self._get( url='{root}applications/{application_id}/hosts/{host_id}.json'.format( root=self.URL, application_id=application_id, host_id=host_id ), headers=self.headers, )
python
def show(self, application_id, host_id): return self._get( url='{root}applications/{application_id}/hosts/{host_id}.json'.format( root=self.URL, application_id=application_id, host_id=host_id ), headers=self.headers, )
[ "def", "show", "(", "self", ",", "application_id", ",", "host_id", ")", ":", "return", "self", ".", "_get", "(", "url", "=", "'{root}applications/{application_id}/hosts/{host_id}.json'", ".", "format", "(", "root", "=", "self", ".", "URL", ",", "application_id",...
This API endpoint returns a single application host, identified by its ID. :type application_id: int :param application_id: Application ID :type host_id: int :param host_id: Application host ID :rtype: dict :return: The JSON response of the API :: { "application_host": { "id": "integer", "application_name": "string", "host": "string", "language": "integer", "health_status": "string", "application_summary": { "response_time": "float", "throughput": "float", "error_rate": "float", "apdex_score": "float" }, "end_user_summary": { "response_time": "float", "throughput": "float", "apdex_score": "float" }, "links": { "application": "integer", "application_instances": [ "integer" ], "server": "integer" } } }
[ "This", "API", "endpoint", "returns", "a", "single", "application", "host", "identified", "by", "its", "ID", "." ]
07b4430aa6ae61e4704e2928a6e7a24c76f0f424
https://github.com/ambitioninc/newrelic-api/blob/07b4430aa6ae61e4704e2928a6e7a24c76f0f424/newrelic_api/application_hosts.py#L91-L143
19,838
ambitioninc/newrelic-api
newrelic_api/components.py
Components.metric_data
def metric_data( self, id, names, values=None, from_dt=None, to_dt=None, summarize=False): """ This API endpoint returns a list of values for each of the requested metrics. The list of available metrics can be returned using the Metric Name API endpoint. Metric data can be filtered by a number of parameters, including multiple names and values, and by time range. Metric names and values will be matched intelligently in the background. You can also retrieve a summarized data point across the entire time range selected by using the summarize parameter. **Note** All times sent and received are formatted in UTC. The default time range is the last 30 minutes. :type id: int :param id: Component ID :type names: list of str :param names: Retrieve specific metrics by name :type values: list of str :param values: Retrieve specific metric values :type from_dt: datetime :param from_dt: Retrieve metrics after this time :type to_dt: datetime :param to_dt: Retrieve metrics before this time :type summarize: bool :param summarize: Summarize the data :rtype: dict :return: The JSON response of the API :: { "metric_data": { "from": "time", "to": "time", "metrics": [ { "name": "string", "timeslices": [ { "from": "time", "to": "time", "values": "hash" } ] } ] } } """ params = [ 'from={0}'.format(from_dt) if from_dt else None, 'to={0}'.format(to_dt) if to_dt else None, 'summarize=true' if summarize else None ] params += ['names[]={0}'.format(name) for name in names] if values: params += ['values[]={0}'.format(value) for value in values] return self._get( url='{0}components/{1}/metrics/data.json'.format(self.URL, id), headers=self.headers, params=self.build_param_string(params) )
python
def metric_data( self, id, names, values=None, from_dt=None, to_dt=None, summarize=False): params = [ 'from={0}'.format(from_dt) if from_dt else None, 'to={0}'.format(to_dt) if to_dt else None, 'summarize=true' if summarize else None ] params += ['names[]={0}'.format(name) for name in names] if values: params += ['values[]={0}'.format(value) for value in values] return self._get( url='{0}components/{1}/metrics/data.json'.format(self.URL, id), headers=self.headers, params=self.build_param_string(params) )
[ "def", "metric_data", "(", "self", ",", "id", ",", "names", ",", "values", "=", "None", ",", "from_dt", "=", "None", ",", "to_dt", "=", "None", ",", "summarize", "=", "False", ")", ":", "params", "=", "[", "'from={0}'", ".", "format", "(", "from_dt",...
This API endpoint returns a list of values for each of the requested metrics. The list of available metrics can be returned using the Metric Name API endpoint. Metric data can be filtered by a number of parameters, including multiple names and values, and by time range. Metric names and values will be matched intelligently in the background. You can also retrieve a summarized data point across the entire time range selected by using the summarize parameter. **Note** All times sent and received are formatted in UTC. The default time range is the last 30 minutes. :type id: int :param id: Component ID :type names: list of str :param names: Retrieve specific metrics by name :type values: list of str :param values: Retrieve specific metric values :type from_dt: datetime :param from_dt: Retrieve metrics after this time :type to_dt: datetime :param to_dt: Retrieve metrics before this time :type summarize: bool :param summarize: Summarize the data :rtype: dict :return: The JSON response of the API :: { "metric_data": { "from": "time", "to": "time", "metrics": [ { "name": "string", "timeslices": [ { "from": "time", "to": "time", "values": "hash" } ] } ] } }
[ "This", "API", "endpoint", "returns", "a", "list", "of", "values", "for", "each", "of", "the", "requested", "metrics", ".", "The", "list", "of", "available", "metrics", "can", "be", "returned", "using", "the", "Metric", "Name", "API", "endpoint", "." ]
07b4430aa6ae61e4704e2928a6e7a24c76f0f424
https://github.com/ambitioninc/newrelic-api/blob/07b4430aa6ae61e4704e2928a6e7a24c76f0f424/newrelic_api/components.py#L176-L251
19,839
ambitioninc/newrelic-api
newrelic_api/dashboards.py
Dashboards.create
def create(self, dashboard_data): """ This API endpoint creates a dashboard and all defined widgets. :type dashboard: dict :param dashboard: Dashboard Dictionary :rtype dict :return: The JSON response of the API :: { "dashboard": { "id": "integer", "title": "string", "description": "string", "icon": "string", "created_at": "time", "updated_at": "time", "visibility": "string", "editable": "string", "ui_url": "string", "api_url": "string", "owner_email": "string", "metadata": { "version": "integer" }, "widgets": [ { "visualization": "string", "layout": { "width": "integer", "height": "integer", "row": "integer", "column": "integer" }, "widget_id": "integer", "account_id": "integer", "data": [ "nrql": "string" ], "presentation": { "title": "string", "notes": "string" } } ], "filter": { "event_types": ["string"], "attributes": ["string"] } } } """ return self._post( url='{0}dashboards.json'.format(self.URL), headers=self.headers, data=dashboard_data, )
python
def create(self, dashboard_data): return self._post( url='{0}dashboards.json'.format(self.URL), headers=self.headers, data=dashboard_data, )
[ "def", "create", "(", "self", ",", "dashboard_data", ")", ":", "return", "self", ".", "_post", "(", "url", "=", "'{0}dashboards.json'", ".", "format", "(", "self", ".", "URL", ")", ",", "headers", "=", "self", ".", "headers", ",", "data", "=", "dashboa...
This API endpoint creates a dashboard and all defined widgets. :type dashboard: dict :param dashboard: Dashboard Dictionary :rtype dict :return: The JSON response of the API :: { "dashboard": { "id": "integer", "title": "string", "description": "string", "icon": "string", "created_at": "time", "updated_at": "time", "visibility": "string", "editable": "string", "ui_url": "string", "api_url": "string", "owner_email": "string", "metadata": { "version": "integer" }, "widgets": [ { "visualization": "string", "layout": { "width": "integer", "height": "integer", "row": "integer", "column": "integer" }, "widget_id": "integer", "account_id": "integer", "data": [ "nrql": "string" ], "presentation": { "title": "string", "notes": "string" } } ], "filter": { "event_types": ["string"], "attributes": ["string"] } } }
[ "This", "API", "endpoint", "creates", "a", "dashboard", "and", "all", "defined", "widgets", "." ]
07b4430aa6ae61e4704e2928a6e7a24c76f0f424
https://github.com/ambitioninc/newrelic-api/blob/07b4430aa6ae61e4704e2928a6e7a24c76f0f424/newrelic_api/dashboards.py#L151-L209
19,840
ambitioninc/newrelic-api
newrelic_api/dashboards.py
Dashboards.update
def update(self, id, dashboard_data): """ This API endpoint updates a dashboard and all defined widgets. :type id: int :param id: Dashboard ID :type dashboard: dict :param dashboard: Dashboard Dictionary :rtype dict :return: The JSON response of the API :: { "dashboard": { "id": "integer", "title": "string", "description": "string", "icon": "string", "created_at": "time", "updated_at": "time", "visibility": "string", "editable": "string", "ui_url": "string", "api_url": "string", "owner_email": "string", "metadata": { "version": "integer" }, "widgets": [ { "visualization": "string", "layout": { "width": "integer", "height": "integer", "row": "integer", "column": "integer" }, "widget_id": "integer", "account_id": "integer", "data": [ "nrql": "string" ], "presentation": { "title": "string", "notes": "string" } } ], "filter": { "event_types": ["string"], "attributes": ["string"] } } } """ return self._put( url='{0}dashboards/{1}.json'.format(self.URL, id), headers=self.headers, data=dashboard_data, )
python
def update(self, id, dashboard_data): return self._put( url='{0}dashboards/{1}.json'.format(self.URL, id), headers=self.headers, data=dashboard_data, )
[ "def", "update", "(", "self", ",", "id", ",", "dashboard_data", ")", ":", "return", "self", ".", "_put", "(", "url", "=", "'{0}dashboards/{1}.json'", ".", "format", "(", "self", ".", "URL", ",", "id", ")", ",", "headers", "=", "self", ".", "headers", ...
This API endpoint updates a dashboard and all defined widgets. :type id: int :param id: Dashboard ID :type dashboard: dict :param dashboard: Dashboard Dictionary :rtype dict :return: The JSON response of the API :: { "dashboard": { "id": "integer", "title": "string", "description": "string", "icon": "string", "created_at": "time", "updated_at": "time", "visibility": "string", "editable": "string", "ui_url": "string", "api_url": "string", "owner_email": "string", "metadata": { "version": "integer" }, "widgets": [ { "visualization": "string", "layout": { "width": "integer", "height": "integer", "row": "integer", "column": "integer" }, "widget_id": "integer", "account_id": "integer", "data": [ "nrql": "string" ], "presentation": { "title": "string", "notes": "string" } } ], "filter": { "event_types": ["string"], "attributes": ["string"] } } }
[ "This", "API", "endpoint", "updates", "a", "dashboard", "and", "all", "defined", "widgets", "." ]
07b4430aa6ae61e4704e2928a6e7a24c76f0f424
https://github.com/ambitioninc/newrelic-api/blob/07b4430aa6ae61e4704e2928a6e7a24c76f0f424/newrelic_api/dashboards.py#L211-L272
19,841
borntyping/python-dice
dice/grammar.py
operatorPrecedence
def operatorPrecedence(base, operators): """ This re-implements pyparsing's operatorPrecedence function. It gets rid of a few annoying bugs, like always putting operators inside a Group, and matching the whole grammar with Forward first (there may actually be a reason for that, but I couldn't find it). It doesn't support trinary expressions, but they should be easy to add if it turns out I need them. """ # The full expression, used to provide sub-expressions expression = Forward() # The initial expression last = base | Suppress('(') + expression + Suppress(')') def parse_operator(expr, arity, association, action=None, extra=None): return expr, arity, association, action, extra for op in operators: # Use a function to default action to None expr, arity, association, action, extra = parse_operator(*op) # Check that the arity is valid if arity < 1 or arity > 2: raise Exception("Arity must be unary (1) or binary (2)") if association not in (opAssoc.LEFT, opAssoc.RIGHT): raise Exception("Association must be LEFT or RIGHT") # This will contain the expression this = Forward() # Create an expression based on the association and arity if association is opAssoc.LEFT: new_last = (last | extra) if extra else last if arity == 1: operator_expression = new_last + OneOrMore(expr) elif arity == 2: operator_expression = last + OneOrMore(expr + new_last) elif association is opAssoc.RIGHT: new_this = (this | extra) if extra else this if arity == 1: operator_expression = expr + new_this # Currently no operator uses this, so marking it nocover for now elif arity == 2: # nocover operator_expression = last + OneOrMore(new_this) # nocover # Set the parse action for the operator if action is not None: operator_expression.setParseAction(action) this <<= (operator_expression | last) last = this # Set the full expression and return it expression <<= last return expression
python
def operatorPrecedence(base, operators): # The full expression, used to provide sub-expressions expression = Forward() # The initial expression last = base | Suppress('(') + expression + Suppress(')') def parse_operator(expr, arity, association, action=None, extra=None): return expr, arity, association, action, extra for op in operators: # Use a function to default action to None expr, arity, association, action, extra = parse_operator(*op) # Check that the arity is valid if arity < 1 or arity > 2: raise Exception("Arity must be unary (1) or binary (2)") if association not in (opAssoc.LEFT, opAssoc.RIGHT): raise Exception("Association must be LEFT or RIGHT") # This will contain the expression this = Forward() # Create an expression based on the association and arity if association is opAssoc.LEFT: new_last = (last | extra) if extra else last if arity == 1: operator_expression = new_last + OneOrMore(expr) elif arity == 2: operator_expression = last + OneOrMore(expr + new_last) elif association is opAssoc.RIGHT: new_this = (this | extra) if extra else this if arity == 1: operator_expression = expr + new_this # Currently no operator uses this, so marking it nocover for now elif arity == 2: # nocover operator_expression = last + OneOrMore(new_this) # nocover # Set the parse action for the operator if action is not None: operator_expression.setParseAction(action) this <<= (operator_expression | last) last = this # Set the full expression and return it expression <<= last return expression
[ "def", "operatorPrecedence", "(", "base", ",", "operators", ")", ":", "# The full expression, used to provide sub-expressions", "expression", "=", "Forward", "(", ")", "# The initial expression", "last", "=", "base", "|", "Suppress", "(", "'('", ")", "+", "expression"...
This re-implements pyparsing's operatorPrecedence function. It gets rid of a few annoying bugs, like always putting operators inside a Group, and matching the whole grammar with Forward first (there may actually be a reason for that, but I couldn't find it). It doesn't support trinary expressions, but they should be easy to add if it turns out I need them.
[ "This", "re", "-", "implements", "pyparsing", "s", "operatorPrecedence", "function", "." ]
88398c77534ebec19f1f18478e475d0b7a5bc717
https://github.com/borntyping/python-dice/blob/88398c77534ebec19f1f18478e475d0b7a5bc717/dice/grammar.py#L25-L83
19,842
borntyping/python-dice
dice/elements.py
Element.set_parse_attributes
def set_parse_attributes(self, string, location, tokens): "Fluent API for setting parsed location" self.string = string self.location = location self.tokens = tokens return self
python
def set_parse_attributes(self, string, location, tokens): "Fluent API for setting parsed location" self.string = string self.location = location self.tokens = tokens return self
[ "def", "set_parse_attributes", "(", "self", ",", "string", ",", "location", ",", "tokens", ")", ":", "self", ".", "string", "=", "string", "self", ".", "location", "=", "location", "self", ".", "tokens", "=", "tokens", "return", "self" ]
Fluent API for setting parsed location
[ "Fluent", "API", "for", "setting", "parsed", "location" ]
88398c77534ebec19f1f18478e475d0b7a5bc717
https://github.com/borntyping/python-dice/blob/88398c77534ebec19f1f18478e475d0b7a5bc717/dice/elements.py#L26-L31
19,843
borntyping/python-dice
dice/elements.py
Element.evaluate_object
def evaluate_object(obj, cls=None, cache=False, **kwargs): """Evaluates elements, and coerces objects to a class if needed""" old_obj = obj if isinstance(obj, Element): if cache: obj = obj.evaluate_cached(**kwargs) else: obj = obj.evaluate(cache=cache, **kwargs) if cls is not None and type(obj) != cls: obj = cls(obj) for attr in ('string', 'location', 'tokens'): if hasattr(old_obj, attr): setattr(obj, attr, getattr(old_obj, attr)) return obj
python
def evaluate_object(obj, cls=None, cache=False, **kwargs): old_obj = obj if isinstance(obj, Element): if cache: obj = obj.evaluate_cached(**kwargs) else: obj = obj.evaluate(cache=cache, **kwargs) if cls is not None and type(obj) != cls: obj = cls(obj) for attr in ('string', 'location', 'tokens'): if hasattr(old_obj, attr): setattr(obj, attr, getattr(old_obj, attr)) return obj
[ "def", "evaluate_object", "(", "obj", ",", "cls", "=", "None", ",", "cache", "=", "False", ",", "*", "*", "kwargs", ")", ":", "old_obj", "=", "obj", "if", "isinstance", "(", "obj", ",", "Element", ")", ":", "if", "cache", ":", "obj", "=", "obj", ...
Evaluates elements, and coerces objects to a class if needed
[ "Evaluates", "elements", "and", "coerces", "objects", "to", "a", "class", "if", "needed" ]
88398c77534ebec19f1f18478e475d0b7a5bc717
https://github.com/borntyping/python-dice/blob/88398c77534ebec19f1f18478e475d0b7a5bc717/dice/elements.py#L44-L60
19,844
networks-lab/metaknowledge
metaknowledge/graphHelpers.py
readGraph
def readGraph(edgeList, nodeList = None, directed = False, idKey = 'ID', eSource = 'From', eDest = 'To'): """Reads the files given by _edgeList_ and _nodeList_ and creates a networkx graph for the files. This is designed only for the files produced by metaknowledge and is meant to be the reverse of [writeGraph()](#metaknowledge.graphHelpers.writeGraph), if this does not produce the desired results the networkx builtin [networkx.read_edgelist()](https://networkx.github.io/documentation/networkx-1.10/reference/generated/networkx.readwrite.edgelist.read_edgelist.html) could be tried as it is aimed at a more general usage. The read edge list format assumes the column named _eSource_ (default `'From'`) is the source node, then the column _eDest_ (default `'To'`) givens the destination and all other columns are attributes of the edges, e.g. weight. The read node list format assumes the column _idKey_ (default `'ID'`) is the ID of the node for the edge list and the resulting network. All other columns are considered attributes of the node, e.g. count. **Note**: If the names of the columns do not match those given to **readGraph()** a `KeyError` exception will be raised. **Note**: If nodes appear in the edgelist but not the nodeList they will be created silently with no attributes. # Parameters _edgeList_ : `str` > a string giving the path to the edge list file _nodeList_ : `optional [str]` > default `None`, a string giving the path to the node list file _directed_ : `optional [bool]` > default `False`, if `True` the produced network is directed from _eSource_ to _eDest_ _idKey_ : `optional [str]` > default `'ID'`, the name of the ID column in the node list _eSource_ : `optional [str]` > default `'From'`, the name of the source column in the edge list _eDest_ : `optional [str]` > default `'To'`, the name of the destination column in the edge list # Returns `networkx Graph` > the graph described by the input files """ progArgs = (0, "Starting to reading graphs") if metaknowledge.VERBOSE_MODE: progKwargs = {'dummy' : False} else: progKwargs = {'dummy' : True} with _ProgressBar(*progArgs, **progKwargs) as PBar: if directed: grph = nx.DiGraph() else: grph = nx.Graph() if nodeList: PBar.updateVal(0, "Reading " + nodeList) f = open(os.path.expanduser(os.path.abspath(nodeList))) nFile = csv.DictReader(f) for line in nFile: vals = line ndID = vals[idKey] del vals[idKey] if len(vals) > 0: grph.add_node(ndID, **vals) else: grph.add_node(ndID) f.close() PBar.updateVal(.25, "Reading " + edgeList) f = open(os.path.expanduser(os.path.abspath(edgeList))) eFile = csv.DictReader(f) for line in eFile: vals = line eFrom = vals[eSource] eTo = vals[eDest] del vals[eSource] del vals[eDest] if len(vals) > 0: grph.add_edge(eFrom, eTo, **vals) else: grph.add_edge(eFrom, eTo) PBar.finish("{} nodes and {} edges found".format(len(grph.nodes()), len(grph.edges()))) f.close() return grph
python
def readGraph(edgeList, nodeList = None, directed = False, idKey = 'ID', eSource = 'From', eDest = 'To'): progArgs = (0, "Starting to reading graphs") if metaknowledge.VERBOSE_MODE: progKwargs = {'dummy' : False} else: progKwargs = {'dummy' : True} with _ProgressBar(*progArgs, **progKwargs) as PBar: if directed: grph = nx.DiGraph() else: grph = nx.Graph() if nodeList: PBar.updateVal(0, "Reading " + nodeList) f = open(os.path.expanduser(os.path.abspath(nodeList))) nFile = csv.DictReader(f) for line in nFile: vals = line ndID = vals[idKey] del vals[idKey] if len(vals) > 0: grph.add_node(ndID, **vals) else: grph.add_node(ndID) f.close() PBar.updateVal(.25, "Reading " + edgeList) f = open(os.path.expanduser(os.path.abspath(edgeList))) eFile = csv.DictReader(f) for line in eFile: vals = line eFrom = vals[eSource] eTo = vals[eDest] del vals[eSource] del vals[eDest] if len(vals) > 0: grph.add_edge(eFrom, eTo, **vals) else: grph.add_edge(eFrom, eTo) PBar.finish("{} nodes and {} edges found".format(len(grph.nodes()), len(grph.edges()))) f.close() return grph
[ "def", "readGraph", "(", "edgeList", ",", "nodeList", "=", "None", ",", "directed", "=", "False", ",", "idKey", "=", "'ID'", ",", "eSource", "=", "'From'", ",", "eDest", "=", "'To'", ")", ":", "progArgs", "=", "(", "0", ",", "\"Starting to reading graphs...
Reads the files given by _edgeList_ and _nodeList_ and creates a networkx graph for the files. This is designed only for the files produced by metaknowledge and is meant to be the reverse of [writeGraph()](#metaknowledge.graphHelpers.writeGraph), if this does not produce the desired results the networkx builtin [networkx.read_edgelist()](https://networkx.github.io/documentation/networkx-1.10/reference/generated/networkx.readwrite.edgelist.read_edgelist.html) could be tried as it is aimed at a more general usage. The read edge list format assumes the column named _eSource_ (default `'From'`) is the source node, then the column _eDest_ (default `'To'`) givens the destination and all other columns are attributes of the edges, e.g. weight. The read node list format assumes the column _idKey_ (default `'ID'`) is the ID of the node for the edge list and the resulting network. All other columns are considered attributes of the node, e.g. count. **Note**: If the names of the columns do not match those given to **readGraph()** a `KeyError` exception will be raised. **Note**: If nodes appear in the edgelist but not the nodeList they will be created silently with no attributes. # Parameters _edgeList_ : `str` > a string giving the path to the edge list file _nodeList_ : `optional [str]` > default `None`, a string giving the path to the node list file _directed_ : `optional [bool]` > default `False`, if `True` the produced network is directed from _eSource_ to _eDest_ _idKey_ : `optional [str]` > default `'ID'`, the name of the ID column in the node list _eSource_ : `optional [str]` > default `'From'`, the name of the source column in the edge list _eDest_ : `optional [str]` > default `'To'`, the name of the destination column in the edge list # Returns `networkx Graph` > the graph described by the input files
[ "Reads", "the", "files", "given", "by", "_edgeList_", "and", "_nodeList_", "and", "creates", "a", "networkx", "graph", "for", "the", "files", "." ]
8162bf95e66bb6f9916081338e6e2a6132faff75
https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/graphHelpers.py#L11-L94
19,845
networks-lab/metaknowledge
metaknowledge/graphHelpers.py
writeGraph
def writeGraph(grph, name, edgeInfo = True, typing = False, suffix = 'csv', overwrite = True, allSameAttribute = False): """Writes both the edge list and the node attribute list of _grph_ to files starting with _name_. The output files start with _name_, the file type (edgeList, nodeAttributes) then if typing is True the type of graph (directed or undirected) then the suffix, the default is as follows: >> name_fileType.suffix Both files are csv's with comma delimiters and double quote quoting characters. The edge list has two columns for the source and destination of the edge, `'From'` and `'To'` respectively, then, if _edgeInfo_ is `True`, for each attribute of the node another column is created. The node list has one column call "ID" with the node ids used by networkx and all other columns are the node attributes. To read back these files use [readGraph()](#metaknowledge.graphHelpers.readGraph) and to write only one type of lsit use [writeEdgeList()](#metaknowledge.graphHelpers.writeEdgeList) or [writeNodeAttributeFile()](#metaknowledge.graphHelpers.writeNodeAttributeFile). **Warning**: this function will overwrite files, if they are in the way of the output, to prevent this set _overwrite_ to `False` **Note**: If any nodes or edges are missing an attribute a `KeyError` will be raised. # Parameters _grph_ : `networkx Graph` > A networkx graph of the network to be written. _name_ : `str` > The start of the file name to be written, can include a path. _edgeInfo_ : `optional [bool]` > Default `True`, if `True` the the attributes of each edge are written to the edge list. _typing_ : `optional [bool]` > Default `False`, if `True` the directed ness of the graph will be added to the file names. _suffix_ : `optional [str]` > Default `"csv"`, the suffix of the file. _overwrite_ : `optional [bool]` > Default `True`, if `True` files will be overwritten silently, otherwise an `OSError` exception will be raised. """ progArgs = (0, "Writing the graph to files starting with: {}".format(name)) if metaknowledge.VERBOSE_MODE: progKwargs = {'dummy' : False} else: progKwargs = {'dummy' : True} with _ProgressBar(*progArgs, **progKwargs) as PBar: if typing: if isinstance(grph, nx.classes.digraph.DiGraph) or isinstance(grph, nx.classes.multidigraph.MultiDiGraph): grphType = "_directed" else: grphType = "_undirected" else: grphType = '' nameCompts = os.path.split(os.path.expanduser(os.path.normpath(name))) if nameCompts[0] == '' and nameCompts[1] == '': edgeListName = "edgeList"+ grphType + '.' + suffix nodesAtrName = "nodeAttributes"+ grphType + '.' + suffix elif nameCompts[0] == '': edgeListName = nameCompts[1] + "_edgeList"+ grphType + '.' + suffix nodesAtrName = nameCompts[1] + "_nodeAttributes"+ grphType + '.' + suffix elif nameCompts[1] == '': edgeListName = os.path.join(nameCompts[0], "edgeList"+ grphType + '.' + suffix) nodesAtrName = os.path.join(nameCompts[0], "nodeAttributes"+ grphType + '.' + suffix) else: edgeListName = os.path.join(nameCompts[0], nameCompts[1] + "_edgeList"+ grphType + '.' + suffix) nodesAtrName = os.path.join(nameCompts[0], nameCompts[1] + "_nodeAttributes"+ grphType + '.' + suffix) if not overwrite: if os.path.isfile(edgeListName): raise OSError(edgeListName+ " already exists") if os.path.isfile(nodesAtrName): raise OSError(nodesAtrName + " already exists") writeEdgeList(grph, edgeListName, extraInfo = edgeInfo, allSameAttribute = allSameAttribute, _progBar = PBar) writeNodeAttributeFile(grph, nodesAtrName, allSameAttribute = allSameAttribute, _progBar = PBar) PBar.finish("{} nodes and {} edges written to file".format(len(grph.nodes()), len(grph.edges())))
python
def writeGraph(grph, name, edgeInfo = True, typing = False, suffix = 'csv', overwrite = True, allSameAttribute = False): progArgs = (0, "Writing the graph to files starting with: {}".format(name)) if metaknowledge.VERBOSE_MODE: progKwargs = {'dummy' : False} else: progKwargs = {'dummy' : True} with _ProgressBar(*progArgs, **progKwargs) as PBar: if typing: if isinstance(grph, nx.classes.digraph.DiGraph) or isinstance(grph, nx.classes.multidigraph.MultiDiGraph): grphType = "_directed" else: grphType = "_undirected" else: grphType = '' nameCompts = os.path.split(os.path.expanduser(os.path.normpath(name))) if nameCompts[0] == '' and nameCompts[1] == '': edgeListName = "edgeList"+ grphType + '.' + suffix nodesAtrName = "nodeAttributes"+ grphType + '.' + suffix elif nameCompts[0] == '': edgeListName = nameCompts[1] + "_edgeList"+ grphType + '.' + suffix nodesAtrName = nameCompts[1] + "_nodeAttributes"+ grphType + '.' + suffix elif nameCompts[1] == '': edgeListName = os.path.join(nameCompts[0], "edgeList"+ grphType + '.' + suffix) nodesAtrName = os.path.join(nameCompts[0], "nodeAttributes"+ grphType + '.' + suffix) else: edgeListName = os.path.join(nameCompts[0], nameCompts[1] + "_edgeList"+ grphType + '.' + suffix) nodesAtrName = os.path.join(nameCompts[0], nameCompts[1] + "_nodeAttributes"+ grphType + '.' + suffix) if not overwrite: if os.path.isfile(edgeListName): raise OSError(edgeListName+ " already exists") if os.path.isfile(nodesAtrName): raise OSError(nodesAtrName + " already exists") writeEdgeList(grph, edgeListName, extraInfo = edgeInfo, allSameAttribute = allSameAttribute, _progBar = PBar) writeNodeAttributeFile(grph, nodesAtrName, allSameAttribute = allSameAttribute, _progBar = PBar) PBar.finish("{} nodes and {} edges written to file".format(len(grph.nodes()), len(grph.edges())))
[ "def", "writeGraph", "(", "grph", ",", "name", ",", "edgeInfo", "=", "True", ",", "typing", "=", "False", ",", "suffix", "=", "'csv'", ",", "overwrite", "=", "True", ",", "allSameAttribute", "=", "False", ")", ":", "progArgs", "=", "(", "0", ",", "\"...
Writes both the edge list and the node attribute list of _grph_ to files starting with _name_. The output files start with _name_, the file type (edgeList, nodeAttributes) then if typing is True the type of graph (directed or undirected) then the suffix, the default is as follows: >> name_fileType.suffix Both files are csv's with comma delimiters and double quote quoting characters. The edge list has two columns for the source and destination of the edge, `'From'` and `'To'` respectively, then, if _edgeInfo_ is `True`, for each attribute of the node another column is created. The node list has one column call "ID" with the node ids used by networkx and all other columns are the node attributes. To read back these files use [readGraph()](#metaknowledge.graphHelpers.readGraph) and to write only one type of lsit use [writeEdgeList()](#metaknowledge.graphHelpers.writeEdgeList) or [writeNodeAttributeFile()](#metaknowledge.graphHelpers.writeNodeAttributeFile). **Warning**: this function will overwrite files, if they are in the way of the output, to prevent this set _overwrite_ to `False` **Note**: If any nodes or edges are missing an attribute a `KeyError` will be raised. # Parameters _grph_ : `networkx Graph` > A networkx graph of the network to be written. _name_ : `str` > The start of the file name to be written, can include a path. _edgeInfo_ : `optional [bool]` > Default `True`, if `True` the the attributes of each edge are written to the edge list. _typing_ : `optional [bool]` > Default `False`, if `True` the directed ness of the graph will be added to the file names. _suffix_ : `optional [str]` > Default `"csv"`, the suffix of the file. _overwrite_ : `optional [bool]` > Default `True`, if `True` files will be overwritten silently, otherwise an `OSError` exception will be raised.
[ "Writes", "both", "the", "edge", "list", "and", "the", "node", "attribute", "list", "of", "_grph_", "to", "files", "starting", "with", "_name_", "." ]
8162bf95e66bb6f9916081338e6e2a6132faff75
https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/graphHelpers.py#L96-L170
19,846
networks-lab/metaknowledge
metaknowledge/graphHelpers.py
getNodeDegrees
def getNodeDegrees(grph, weightString = "weight", strictMode = False, returnType = int, edgeType = 'bi'): """ Retunrs a dictionary of nodes to their degrees, the degree is determined by adding the weight of edge with the weight being the string weightString that gives the name of the attribute of each edge containng thier weight. The Weights are then converted to the type returnType. If weightString is give as False instead each edge is counted as 1. edgeType, takes in one of three strings: 'bi', 'in', 'out'. 'bi' means both nodes on the edge count it, 'out' mans only the one the edge comes form counts it and 'in' means only the node the edge goes to counts it. 'bi' is the default. Use only on directional graphs as otherwise the selected nodes is random. """ ndsDict = {} for nd in grph.nodes(): ndsDict[nd] = returnType(0) for e in grph.edges(data = True): if weightString: try: edgVal = returnType(e[2][weightString]) except KeyError: if strictMode: raise KeyError("The edge from " + str(e[0]) + " to " + str(e[1]) + " does not have the attribute: '" + str(weightString) + "'") else: edgVal = returnType(1) else: edgVal = returnType(1) if edgeType == 'bi': ndsDict[e[0]] += edgVal ndsDict[e[1]] += edgVal elif edgeType == 'in': ndsDict[e[1]] += edgVal elif edgeType == 'out': ndsDict[e[0]] += edgVal else: raise ValueError("edgeType must be 'bi', 'in', or 'out'") return ndsDict
python
def getNodeDegrees(grph, weightString = "weight", strictMode = False, returnType = int, edgeType = 'bi'): ndsDict = {} for nd in grph.nodes(): ndsDict[nd] = returnType(0) for e in grph.edges(data = True): if weightString: try: edgVal = returnType(e[2][weightString]) except KeyError: if strictMode: raise KeyError("The edge from " + str(e[0]) + " to " + str(e[1]) + " does not have the attribute: '" + str(weightString) + "'") else: edgVal = returnType(1) else: edgVal = returnType(1) if edgeType == 'bi': ndsDict[e[0]] += edgVal ndsDict[e[1]] += edgVal elif edgeType == 'in': ndsDict[e[1]] += edgVal elif edgeType == 'out': ndsDict[e[0]] += edgVal else: raise ValueError("edgeType must be 'bi', 'in', or 'out'") return ndsDict
[ "def", "getNodeDegrees", "(", "grph", ",", "weightString", "=", "\"weight\"", ",", "strictMode", "=", "False", ",", "returnType", "=", "int", ",", "edgeType", "=", "'bi'", ")", ":", "ndsDict", "=", "{", "}", "for", "nd", "in", "grph", ".", "nodes", "("...
Retunrs a dictionary of nodes to their degrees, the degree is determined by adding the weight of edge with the weight being the string weightString that gives the name of the attribute of each edge containng thier weight. The Weights are then converted to the type returnType. If weightString is give as False instead each edge is counted as 1. edgeType, takes in one of three strings: 'bi', 'in', 'out'. 'bi' means both nodes on the edge count it, 'out' mans only the one the edge comes form counts it and 'in' means only the node the edge goes to counts it. 'bi' is the default. Use only on directional graphs as otherwise the selected nodes is random.
[ "Retunrs", "a", "dictionary", "of", "nodes", "to", "their", "degrees", "the", "degree", "is", "determined", "by", "adding", "the", "weight", "of", "edge", "with", "the", "weight", "being", "the", "string", "weightString", "that", "gives", "the", "name", "of"...
8162bf95e66bb6f9916081338e6e2a6132faff75
https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/graphHelpers.py#L457-L486
19,847
networks-lab/metaknowledge
metaknowledge/graphHelpers.py
mergeGraphs
def mergeGraphs(targetGraph, addedGraph, incrementedNodeVal = 'count', incrementedEdgeVal = 'weight'): """A quick way of merging graphs, this is meant to be quick and is only intended for graphs generated by metaknowledge. This does not check anything and as such may cause unexpected results if the source and target were not generated by the same method. **mergeGraphs**() will **modify** _targetGraph_ in place by adding the nodes and edges found in the second, _addedGraph_. If a node or edge exists _targetGraph_ is given precedence, but the edge and node attributes given by _incrementedNodeVal_ and incrementedEdgeVal are added instead of being overwritten. # Parameters _targetGraph_ : `networkx Graph` > the graph to be modified, it has precedence. _addedGraph_ : `networkx Graph` > the graph that is unmodified, it is added and does **not** have precedence. _incrementedNodeVal_ : `optional [str]` > default `'count'`, the name of the count attribute for the graph's nodes. When merging this attribute will be the sum of the values in the input graphs, instead of _targetGraph_'s value. _incrementedEdgeVal_ : `optional [str]` > default `'weight'`, the name of the weight attribute for the graph's edges. When merging this attribute will be the sum of the values in the input graphs, instead of _targetGraph_'s value. """ for addedNode, attribs in addedGraph.nodes(data = True): if incrementedNodeVal: try: targetGraph.node[addedNode][incrementedNodeVal] += attribs[incrementedNodeVal] except KeyError: targetGraph.add_node(addedNode, **attribs) else: if not targetGraph.has_node(addedNode): targetGraph.add_node(addedNode, **attribs) for edgeNode1, edgeNode2, attribs in addedGraph.edges(data = True): if incrementedEdgeVal: try: targetGraph.edges[edgeNode1, edgeNode2][incrementedEdgeVal] += attribs[incrementedEdgeVal] except KeyError: targetGraph.add_edge(edgeNode1, edgeNode2, **attribs) else: if not targetGraph.Graph.has_edge(edgeNode1, edgeNode2): targetGraph.add_edge(edgeNode1, edgeNode2, **attribs)
python
def mergeGraphs(targetGraph, addedGraph, incrementedNodeVal = 'count', incrementedEdgeVal = 'weight'): for addedNode, attribs in addedGraph.nodes(data = True): if incrementedNodeVal: try: targetGraph.node[addedNode][incrementedNodeVal] += attribs[incrementedNodeVal] except KeyError: targetGraph.add_node(addedNode, **attribs) else: if not targetGraph.has_node(addedNode): targetGraph.add_node(addedNode, **attribs) for edgeNode1, edgeNode2, attribs in addedGraph.edges(data = True): if incrementedEdgeVal: try: targetGraph.edges[edgeNode1, edgeNode2][incrementedEdgeVal] += attribs[incrementedEdgeVal] except KeyError: targetGraph.add_edge(edgeNode1, edgeNode2, **attribs) else: if not targetGraph.Graph.has_edge(edgeNode1, edgeNode2): targetGraph.add_edge(edgeNode1, edgeNode2, **attribs)
[ "def", "mergeGraphs", "(", "targetGraph", ",", "addedGraph", ",", "incrementedNodeVal", "=", "'count'", ",", "incrementedEdgeVal", "=", "'weight'", ")", ":", "for", "addedNode", ",", "attribs", "in", "addedGraph", ".", "nodes", "(", "data", "=", "True", ")", ...
A quick way of merging graphs, this is meant to be quick and is only intended for graphs generated by metaknowledge. This does not check anything and as such may cause unexpected results if the source and target were not generated by the same method. **mergeGraphs**() will **modify** _targetGraph_ in place by adding the nodes and edges found in the second, _addedGraph_. If a node or edge exists _targetGraph_ is given precedence, but the edge and node attributes given by _incrementedNodeVal_ and incrementedEdgeVal are added instead of being overwritten. # Parameters _targetGraph_ : `networkx Graph` > the graph to be modified, it has precedence. _addedGraph_ : `networkx Graph` > the graph that is unmodified, it is added and does **not** have precedence. _incrementedNodeVal_ : `optional [str]` > default `'count'`, the name of the count attribute for the graph's nodes. When merging this attribute will be the sum of the values in the input graphs, instead of _targetGraph_'s value. _incrementedEdgeVal_ : `optional [str]` > default `'weight'`, the name of the weight attribute for the graph's edges. When merging this attribute will be the sum of the values in the input graphs, instead of _targetGraph_'s value.
[ "A", "quick", "way", "of", "merging", "graphs", "this", "is", "meant", "to", "be", "quick", "and", "is", "only", "intended", "for", "graphs", "generated", "by", "metaknowledge", ".", "This", "does", "not", "check", "anything", "and", "as", "such", "may", ...
8162bf95e66bb6f9916081338e6e2a6132faff75
https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/graphHelpers.py#L691-L732
19,848
networks-lab/metaknowledge
metaknowledge/medline/tagProcessing/tagFunctions.py
AD
def AD(val): """Affiliation Undoing what the parser does then splitting at the semicolons and dropping newlines extra fitlering is required beacuse some AD's end with a semicolon""" retDict = {} for v in val: split = v.split(' : ') retDict[split[0]] = [s for s in' : '.join(split[1:]).replace('\n', '').split(';') if s != ''] return retDict
python
def AD(val): retDict = {} for v in val: split = v.split(' : ') retDict[split[0]] = [s for s in' : '.join(split[1:]).replace('\n', '').split(';') if s != ''] return retDict
[ "def", "AD", "(", "val", ")", ":", "retDict", "=", "{", "}", "for", "v", "in", "val", ":", "split", "=", "v", ".", "split", "(", "' : '", ")", "retDict", "[", "split", "[", "0", "]", "]", "=", "[", "s", "for", "s", "in", "' : '", ".", "join...
Affiliation Undoing what the parser does then splitting at the semicolons and dropping newlines extra fitlering is required beacuse some AD's end with a semicolon
[ "Affiliation", "Undoing", "what", "the", "parser", "does", "then", "splitting", "at", "the", "semicolons", "and", "dropping", "newlines", "extra", "fitlering", "is", "required", "beacuse", "some", "AD", "s", "end", "with", "a", "semicolon" ]
8162bf95e66bb6f9916081338e6e2a6132faff75
https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/medline/tagProcessing/tagFunctions.py#L218-L225
19,849
networks-lab/metaknowledge
metaknowledge/medline/tagProcessing/tagFunctions.py
AUID
def AUID(val): """AuthorIdentifier one line only just need to undo the parser's effects""" retDict = {} for v in val: split = v.split(' : ') retDict[split[0]] = ' : '.join(split[1:]) return retDict
python
def AUID(val): retDict = {} for v in val: split = v.split(' : ') retDict[split[0]] = ' : '.join(split[1:]) return retDict
[ "def", "AUID", "(", "val", ")", ":", "retDict", "=", "{", "}", "for", "v", "in", "val", ":", "split", "=", "v", ".", "split", "(", "' : '", ")", "retDict", "[", "split", "[", "0", "]", "]", "=", "' : '", ".", "join", "(", "split", "[", "1", ...
AuthorIdentifier one line only just need to undo the parser's effects
[ "AuthorIdentifier", "one", "line", "only", "just", "need", "to", "undo", "the", "parser", "s", "effects" ]
8162bf95e66bb6f9916081338e6e2a6132faff75
https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/medline/tagProcessing/tagFunctions.py#L322-L329
19,850
networks-lab/metaknowledge
metaknowledge/constants.py
isInteractive
def isInteractive(): """ A basic check of if the program is running in interactive mode """ if sys.stdout.isatty() and os.name != 'nt': #Hopefully everything but ms supports '\r' try: import threading except ImportError: return False else: return True else: return False
python
def isInteractive(): if sys.stdout.isatty() and os.name != 'nt': #Hopefully everything but ms supports '\r' try: import threading except ImportError: return False else: return True else: return False
[ "def", "isInteractive", "(", ")", ":", "if", "sys", ".", "stdout", ".", "isatty", "(", ")", "and", "os", ".", "name", "!=", "'nt'", ":", "#Hopefully everything but ms supports '\\r'", "try", ":", "import", "threading", "except", "ImportError", ":", "return", ...
A basic check of if the program is running in interactive mode
[ "A", "basic", "check", "of", "if", "the", "program", "is", "running", "in", "interactive", "mode" ]
8162bf95e66bb6f9916081338e6e2a6132faff75
https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/constants.py#L27-L40
19,851
networks-lab/metaknowledge
metaknowledge/grants/nsercGrant.py
NSERCGrant.getInstitutions
def getInstitutions(self, tags = None, seperator = ";", _getTag = False): """Returns a list with the names of the institution. The optional arguments are ignored # Returns `list [str]` > A list with 1 entry the name of the institution """ if tags is None: tags = [] elif isinstance(tags, str): tags = [tags] for k in self.keys(): if 'institution' in k.lower() and k not in tags: tags.append(k) return super().getInvestigators(tags = tags, seperator = seperator, _getTag = _getTag)
python
def getInstitutions(self, tags = None, seperator = ";", _getTag = False): if tags is None: tags = [] elif isinstance(tags, str): tags = [tags] for k in self.keys(): if 'institution' in k.lower() and k not in tags: tags.append(k) return super().getInvestigators(tags = tags, seperator = seperator, _getTag = _getTag)
[ "def", "getInstitutions", "(", "self", ",", "tags", "=", "None", ",", "seperator", "=", "\";\"", ",", "_getTag", "=", "False", ")", ":", "if", "tags", "is", "None", ":", "tags", "=", "[", "]", "elif", "isinstance", "(", "tags", ",", "str", ")", ":"...
Returns a list with the names of the institution. The optional arguments are ignored # Returns `list [str]` > A list with 1 entry the name of the institution
[ "Returns", "a", "list", "with", "the", "names", "of", "the", "institution", ".", "The", "optional", "arguments", "are", "ignored" ]
8162bf95e66bb6f9916081338e6e2a6132faff75
https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/grants/nsercGrant.py#L46-L62
19,852
networks-lab/metaknowledge
metaknowledge/medline/recordMedline.py
MedlineRecord.writeRecord
def writeRecord(self, f): """This is nearly identical to the original the FAU tag is the only tag not writen in the same place, doing so would require changing the parser and lots of extra logic. """ if self.bad: raise BadPubmedRecord("This record cannot be converted to a file as the input was malformed.\nThe original line number (if any) is: {} and the original file is: '{}'".format(self._sourceLine, self._sourceFile)) else: authTags = {} for tag in authorBasedTags: for val in self._fieldDict.get(tag, []): split = val.split(' : ') try: authTags[split[0]].append("{0}{1}- {2}\n".format(tag, ' ' * (4 - len(tag)),' : '.join(split[1:]).replace('\n', '\n '))) except KeyError: authTags[split[0]] = ["{0}{1}- {2}\n".format(tag, ' ' * (4 - len(tag)),' : '.join(split[1:]).replace('\n', '\n '))] for tag, value in self._fieldDict.items(): if tag in authorBasedTags: continue else: for v in value: f.write("{0}{1}- {2}\n".format(tag, ' ' * (4 - len(tag)), v.replace('\n', '\n '))) if tag == 'AU': for authVal in authTags.get(v,[]): f.write(authVal)
python
def writeRecord(self, f): if self.bad: raise BadPubmedRecord("This record cannot be converted to a file as the input was malformed.\nThe original line number (if any) is: {} and the original file is: '{}'".format(self._sourceLine, self._sourceFile)) else: authTags = {} for tag in authorBasedTags: for val in self._fieldDict.get(tag, []): split = val.split(' : ') try: authTags[split[0]].append("{0}{1}- {2}\n".format(tag, ' ' * (4 - len(tag)),' : '.join(split[1:]).replace('\n', '\n '))) except KeyError: authTags[split[0]] = ["{0}{1}- {2}\n".format(tag, ' ' * (4 - len(tag)),' : '.join(split[1:]).replace('\n', '\n '))] for tag, value in self._fieldDict.items(): if tag in authorBasedTags: continue else: for v in value: f.write("{0}{1}- {2}\n".format(tag, ' ' * (4 - len(tag)), v.replace('\n', '\n '))) if tag == 'AU': for authVal in authTags.get(v,[]): f.write(authVal)
[ "def", "writeRecord", "(", "self", ",", "f", ")", ":", "if", "self", ".", "bad", ":", "raise", "BadPubmedRecord", "(", "\"This record cannot be converted to a file as the input was malformed.\\nThe original line number (if any) is: {} and the original file is: '{}'\"", ".", "form...
This is nearly identical to the original the FAU tag is the only tag not writen in the same place, doing so would require changing the parser and lots of extra logic.
[ "This", "is", "nearly", "identical", "to", "the", "original", "the", "FAU", "tag", "is", "the", "only", "tag", "not", "writen", "in", "the", "same", "place", "doing", "so", "would", "require", "changing", "the", "parser", "and", "lots", "of", "extra", "l...
8162bf95e66bb6f9916081338e6e2a6132faff75
https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/medline/recordMedline.py#L66-L88
19,853
networks-lab/metaknowledge
metaknowledge/contour/plotting.py
quickVisual
def quickVisual(G, showLabel = False): """Just makes a simple _matplotlib_ figure and displays it, with each node coloured by its type. You can add labels with _showLabel_. This looks a bit nicer than the one provided my _networkx_'s defaults. # Parameters _showLabel_ : `optional [bool]` > Default `False`, if `True` labels will be added to the nodes giving their IDs. """ colours = "brcmykwg" f = plt.figure(1) ax = f.add_subplot(1,1,1) ndTypes = [] ndColours = [] layout = nx.spring_layout(G, k = 4 / math.sqrt(len(G.nodes()))) for nd in G.nodes(data = True): if 'type' in nd[1]: if nd[1]['type'] not in ndTypes: ndTypes.append(nd[1]['type']) ndColours.append(colours[ndTypes.index(nd[1]['type']) % len(colours)]) elif len(ndColours) > 1: raise RuntimeError("Some nodes do not have a type") if len(ndColours) < 1: nx.draw_networkx_nodes(G, pos = layout, node_color = colours[0], node_shape = '8', node_size = 100, ax = ax) else: nx.draw_networkx_nodes(G, pos = layout, node_color = ndColours, node_shape = '8', node_size = 100, ax = ax) nx.draw_networkx_edges(G, pos = layout, width = .7, ax = ax) if showLabel: nx.draw_networkx_labels(G, pos = layout, font_size = 8, ax = ax) plt.axis('off') f.set_facecolor('w')
python
def quickVisual(G, showLabel = False): colours = "brcmykwg" f = plt.figure(1) ax = f.add_subplot(1,1,1) ndTypes = [] ndColours = [] layout = nx.spring_layout(G, k = 4 / math.sqrt(len(G.nodes()))) for nd in G.nodes(data = True): if 'type' in nd[1]: if nd[1]['type'] not in ndTypes: ndTypes.append(nd[1]['type']) ndColours.append(colours[ndTypes.index(nd[1]['type']) % len(colours)]) elif len(ndColours) > 1: raise RuntimeError("Some nodes do not have a type") if len(ndColours) < 1: nx.draw_networkx_nodes(G, pos = layout, node_color = colours[0], node_shape = '8', node_size = 100, ax = ax) else: nx.draw_networkx_nodes(G, pos = layout, node_color = ndColours, node_shape = '8', node_size = 100, ax = ax) nx.draw_networkx_edges(G, pos = layout, width = .7, ax = ax) if showLabel: nx.draw_networkx_labels(G, pos = layout, font_size = 8, ax = ax) plt.axis('off') f.set_facecolor('w')
[ "def", "quickVisual", "(", "G", ",", "showLabel", "=", "False", ")", ":", "colours", "=", "\"brcmykwg\"", "f", "=", "plt", ".", "figure", "(", "1", ")", "ax", "=", "f", ".", "add_subplot", "(", "1", ",", "1", ",", "1", ")", "ndTypes", "=", "[", ...
Just makes a simple _matplotlib_ figure and displays it, with each node coloured by its type. You can add labels with _showLabel_. This looks a bit nicer than the one provided my _networkx_'s defaults. # Parameters _showLabel_ : `optional [bool]` > Default `False`, if `True` labels will be added to the nodes giving their IDs.
[ "Just", "makes", "a", "simple", "_matplotlib_", "figure", "and", "displays", "it", "with", "each", "node", "coloured", "by", "its", "type", ".", "You", "can", "add", "labels", "with", "_showLabel_", ".", "This", "looks", "a", "bit", "nicer", "than", "the",...
8162bf95e66bb6f9916081338e6e2a6132faff75
https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/contour/plotting.py#L8-L38
19,854
networks-lab/metaknowledge
metaknowledge/contour/plotting.py
graphDensityContourPlot
def graphDensityContourPlot(G, iters = 50, layout = None, layoutScaleFactor = 1, overlay = False, nodeSize = 10, axisSamples = 100, blurringFactor = .1, contours = 15, graphType = 'coloured'): """Creates a 3D plot giving the density of nodes on a 2D plane, as a surface in 3D. Most of the options are for tweaking the final appearance. _layout_ and _layoutScaleFactor_ allow a pre-layout graph to be provided. If a layout is not provided the [networkx.spring_layout()](https://networkx.github.io/documentation/latest/reference/generated/networkx.drawing.layout.spring_layout.html) is used after _iters_ iterations. Then, once the graph has been laid out a grid of _axisSamples_ cells by _axisSamples_ cells is overlaid and the number of nodes in each cell is determined, a gaussian blur is then applied with a sigma of _blurringFactor_. This then forms a surface in 3 dimensions, which is then plotted. If you find the resultant image looks too banded raise the the _contours_ number to ~50. # Parameters _G_ : `networkx Graph` > The graph to be plotted _iters_ : `optional [int]` > Default `50`, the number of iterations for the spring layout if _layout_ is not provided. _layout_ : `optional [networkx layout dictionary]` > Default `None`, if provided will be used as a layout of the graph, the maximum distance from the origin along any axis must also given as _layoutScaleFactor_, which is by default `1`. _layoutScaleFactor_ : `optional [double]` > Default `1`, The maximum distance from the origin allowed along any axis given by _layout_, i.e. the layout must fit in a square centered at the origin with side lengths 2 * _layoutScaleFactor_ _overlay_ : `optional [bool]` > Default `False`, if `True` the 2D graph will be plotted on the X-Y plane at Z = 0. _nodeSize_ : `optional [double]` > Default `10`, the size of the nodes dawn in the overlay _axisSamples_ : `optional [int]` > Default 100, the number of cells used along each axis for sampling. A larger number will mean a lower average density. _blurringFactor_ : `optional [double]` > Default `0.1`, the sigma value used for smoothing the surface density. The higher this number the smoother the surface. _contours_ : `optional [int]` > Default 15, the number of different heights drawn. If this number is low the resultant image will look very banded. It is recommended this be raised above `50` if you want your images to look good, **Warning** this will make them much slower to generate and interact with. _graphType_ : `optional [str]` > Default `'coloured'`, if `'coloured'` the image will have a destiny based colourization applied, the only other option is `'solid'` which removes the colourization. """ from mpl_toolkits.mplot3d import Axes3D if not isinstance(G, nx.classes.digraph.DiGraph) and not isinstance(G, nx.classes.graph.Graph): raise TypeError("{} is not a valid input.".format(type(G))) if layout is None: layout = nx.spring_layout(G, scale = axisSamples - 1, iterations = iters) grid = np.zeros( [axisSamples, axisSamples],dtype=np.float32) for v in layout.values(): x, y = tuple(int(x) for x in v.round(0)) grid[y][x] += 1 elif isinstance(layout, dict): layout = layout.copy() grid = np.zeros([axisSamples, axisSamples],dtype=np.float32) multFactor = (axisSamples - 1) / layoutScaleFactor for k in layout.keys(): tmpPos = layout[k] * multFactor layout[k] = tmpPos x, y = tuple(int(x) for x in tmpPos.round(0)) grid[y][x] += 1 else: raise TypeError("{} is not a valid input.".format(type(layout))) fig = plt.figure() #axis = fig.add_subplot(111) axis = fig.gca(projection='3d') if overlay: nx.draw_networkx(G, pos = layout, ax = axis, node_size = nodeSize, with_labels = False, edgelist = []) grid = ndi.gaussian_filter(grid, (blurringFactor * axisSamples, blurringFactor * axisSamples)) X = Y = np.arange(0, axisSamples, 1) X, Y = np.meshgrid(X, Y) if graphType == "solid": CS = axis.plot_surface(X,Y, grid) else: CS = axis.contourf(X, Y, grid, contours) axis.set_xlabel('X') axis.set_ylabel('Y') axis.set_zlabel('Node Density')
python
def graphDensityContourPlot(G, iters = 50, layout = None, layoutScaleFactor = 1, overlay = False, nodeSize = 10, axisSamples = 100, blurringFactor = .1, contours = 15, graphType = 'coloured'): from mpl_toolkits.mplot3d import Axes3D if not isinstance(G, nx.classes.digraph.DiGraph) and not isinstance(G, nx.classes.graph.Graph): raise TypeError("{} is not a valid input.".format(type(G))) if layout is None: layout = nx.spring_layout(G, scale = axisSamples - 1, iterations = iters) grid = np.zeros( [axisSamples, axisSamples],dtype=np.float32) for v in layout.values(): x, y = tuple(int(x) for x in v.round(0)) grid[y][x] += 1 elif isinstance(layout, dict): layout = layout.copy() grid = np.zeros([axisSamples, axisSamples],dtype=np.float32) multFactor = (axisSamples - 1) / layoutScaleFactor for k in layout.keys(): tmpPos = layout[k] * multFactor layout[k] = tmpPos x, y = tuple(int(x) for x in tmpPos.round(0)) grid[y][x] += 1 else: raise TypeError("{} is not a valid input.".format(type(layout))) fig = plt.figure() #axis = fig.add_subplot(111) axis = fig.gca(projection='3d') if overlay: nx.draw_networkx(G, pos = layout, ax = axis, node_size = nodeSize, with_labels = False, edgelist = []) grid = ndi.gaussian_filter(grid, (blurringFactor * axisSamples, blurringFactor * axisSamples)) X = Y = np.arange(0, axisSamples, 1) X, Y = np.meshgrid(X, Y) if graphType == "solid": CS = axis.plot_surface(X,Y, grid) else: CS = axis.contourf(X, Y, grid, contours) axis.set_xlabel('X') axis.set_ylabel('Y') axis.set_zlabel('Node Density')
[ "def", "graphDensityContourPlot", "(", "G", ",", "iters", "=", "50", ",", "layout", "=", "None", ",", "layoutScaleFactor", "=", "1", ",", "overlay", "=", "False", ",", "nodeSize", "=", "10", ",", "axisSamples", "=", "100", ",", "blurringFactor", "=", ".1...
Creates a 3D plot giving the density of nodes on a 2D plane, as a surface in 3D. Most of the options are for tweaking the final appearance. _layout_ and _layoutScaleFactor_ allow a pre-layout graph to be provided. If a layout is not provided the [networkx.spring_layout()](https://networkx.github.io/documentation/latest/reference/generated/networkx.drawing.layout.spring_layout.html) is used after _iters_ iterations. Then, once the graph has been laid out a grid of _axisSamples_ cells by _axisSamples_ cells is overlaid and the number of nodes in each cell is determined, a gaussian blur is then applied with a sigma of _blurringFactor_. This then forms a surface in 3 dimensions, which is then plotted. If you find the resultant image looks too banded raise the the _contours_ number to ~50. # Parameters _G_ : `networkx Graph` > The graph to be plotted _iters_ : `optional [int]` > Default `50`, the number of iterations for the spring layout if _layout_ is not provided. _layout_ : `optional [networkx layout dictionary]` > Default `None`, if provided will be used as a layout of the graph, the maximum distance from the origin along any axis must also given as _layoutScaleFactor_, which is by default `1`. _layoutScaleFactor_ : `optional [double]` > Default `1`, The maximum distance from the origin allowed along any axis given by _layout_, i.e. the layout must fit in a square centered at the origin with side lengths 2 * _layoutScaleFactor_ _overlay_ : `optional [bool]` > Default `False`, if `True` the 2D graph will be plotted on the X-Y plane at Z = 0. _nodeSize_ : `optional [double]` > Default `10`, the size of the nodes dawn in the overlay _axisSamples_ : `optional [int]` > Default 100, the number of cells used along each axis for sampling. A larger number will mean a lower average density. _blurringFactor_ : `optional [double]` > Default `0.1`, the sigma value used for smoothing the surface density. The higher this number the smoother the surface. _contours_ : `optional [int]` > Default 15, the number of different heights drawn. If this number is low the resultant image will look very banded. It is recommended this be raised above `50` if you want your images to look good, **Warning** this will make them much slower to generate and interact with. _graphType_ : `optional [str]` > Default `'coloured'`, if `'coloured'` the image will have a destiny based colourization applied, the only other option is `'solid'` which removes the colourization.
[ "Creates", "a", "3D", "plot", "giving", "the", "density", "of", "nodes", "on", "a", "2D", "plane", "as", "a", "surface", "in", "3D", "." ]
8162bf95e66bb6f9916081338e6e2a6132faff75
https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/contour/plotting.py#L40-L125
19,855
networks-lab/metaknowledge
metaknowledge/WOS/tagProcessing/helpFuncs.py
makeBiDirectional
def makeBiDirectional(d): """ Helper for generating tagNameConverter Makes dict that maps from key to value and back """ dTmp = d.copy() for k in d: dTmp[d[k]] = k return dTmp
python
def makeBiDirectional(d): dTmp = d.copy() for k in d: dTmp[d[k]] = k return dTmp
[ "def", "makeBiDirectional", "(", "d", ")", ":", "dTmp", "=", "d", ".", "copy", "(", ")", "for", "k", "in", "d", ":", "dTmp", "[", "d", "[", "k", "]", "]", "=", "k", "return", "dTmp" ]
Helper for generating tagNameConverter Makes dict that maps from key to value and back
[ "Helper", "for", "generating", "tagNameConverter", "Makes", "dict", "that", "maps", "from", "key", "to", "value", "and", "back" ]
8162bf95e66bb6f9916081338e6e2a6132faff75
https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/WOS/tagProcessing/helpFuncs.py#L27-L35
19,856
networks-lab/metaknowledge
metaknowledge/WOS/tagProcessing/helpFuncs.py
reverseDict
def reverseDict(d): """ Helper for generating fullToTag Makes dict of value to key """ retD = {} for k in d: retD[d[k]] = k return retD
python
def reverseDict(d): retD = {} for k in d: retD[d[k]] = k return retD
[ "def", "reverseDict", "(", "d", ")", ":", "retD", "=", "{", "}", "for", "k", "in", "d", ":", "retD", "[", "d", "[", "k", "]", "]", "=", "k", "return", "retD" ]
Helper for generating fullToTag Makes dict of value to key
[ "Helper", "for", "generating", "fullToTag", "Makes", "dict", "of", "value", "to", "key" ]
8162bf95e66bb6f9916081338e6e2a6132faff75
https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/WOS/tagProcessing/helpFuncs.py#L37-L45
19,857
networks-lab/metaknowledge
metaknowledge/recordCollection.py
makeNodeTuple
def makeNodeTuple(citation, idVal, nodeInfo, fullInfo, nodeType, count, coreCitesDict, coreValues, detailedValues, addCR): """Makes a tuple of idVal and a dict of the selected attributes""" d = {} if nodeInfo: if nodeType == 'full': if coreValues: if citation in coreCitesDict: R = coreCitesDict[citation] d['MK-ID'] = R.id if not detailedValues: infoVals = [] for tag in coreValues: tagVal = R.get(tag) if isinstance(tagVal, str): infoVals.append(tagVal.replace(',','')) elif isinstance(tagVal, list): infoVals.append(tagVal[0].replace(',','')) else: pass d['info'] = ', '.join(infoVals) else: for tag in coreValues: v = R.get(tag, None) if isinstance(v, list): d[tag] = '|'.join(sorted(v)) else: d[tag] = v d['inCore'] = True if addCR: d['citations'] = '|'.join((str(c) for c in R.get('citations', []))) else: d['MK-ID'] = 'None' d['info'] = citation.allButDOI() d['inCore'] = False if addCR: d['citations'] = '' else: d['info'] = citation.allButDOI() elif nodeType == 'journal': if citation.isJournal(): d['info'] = str(citation.FullJournalName()) else: d['info'] = "None" elif nodeType == 'original': d['info'] = str(citation) else: d['info'] = idVal if fullInfo: d['fullCite'] = str(citation) if count: d['count'] = 1 return (idVal, d)
python
def makeNodeTuple(citation, idVal, nodeInfo, fullInfo, nodeType, count, coreCitesDict, coreValues, detailedValues, addCR): d = {} if nodeInfo: if nodeType == 'full': if coreValues: if citation in coreCitesDict: R = coreCitesDict[citation] d['MK-ID'] = R.id if not detailedValues: infoVals = [] for tag in coreValues: tagVal = R.get(tag) if isinstance(tagVal, str): infoVals.append(tagVal.replace(',','')) elif isinstance(tagVal, list): infoVals.append(tagVal[0].replace(',','')) else: pass d['info'] = ', '.join(infoVals) else: for tag in coreValues: v = R.get(tag, None) if isinstance(v, list): d[tag] = '|'.join(sorted(v)) else: d[tag] = v d['inCore'] = True if addCR: d['citations'] = '|'.join((str(c) for c in R.get('citations', []))) else: d['MK-ID'] = 'None' d['info'] = citation.allButDOI() d['inCore'] = False if addCR: d['citations'] = '' else: d['info'] = citation.allButDOI() elif nodeType == 'journal': if citation.isJournal(): d['info'] = str(citation.FullJournalName()) else: d['info'] = "None" elif nodeType == 'original': d['info'] = str(citation) else: d['info'] = idVal if fullInfo: d['fullCite'] = str(citation) if count: d['count'] = 1 return (idVal, d)
[ "def", "makeNodeTuple", "(", "citation", ",", "idVal", ",", "nodeInfo", ",", "fullInfo", ",", "nodeType", ",", "count", ",", "coreCitesDict", ",", "coreValues", ",", "detailedValues", ",", "addCR", ")", ":", "d", "=", "{", "}", "if", "nodeInfo", ":", "if...
Makes a tuple of idVal and a dict of the selected attributes
[ "Makes", "a", "tuple", "of", "idVal", "and", "a", "dict", "of", "the", "selected", "attributes" ]
8162bf95e66bb6f9916081338e6e2a6132faff75
https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/recordCollection.py#L1709-L1760
19,858
networks-lab/metaknowledge
metaknowledge/recordCollection.py
expandRecs
def expandRecs(G, RecCollect, nodeType, weighted): """Expand all the citations from _RecCollect_""" for Rec in RecCollect: fullCiteList = [makeID(c, nodeType) for c in Rec.createCitation(multiCite = True)] if len(fullCiteList) > 1: for i, citeID1 in enumerate(fullCiteList): if citeID1 in G: for citeID2 in fullCiteList[i + 1:]: if citeID2 not in G: G.add_node(citeID2, **G.node[citeID1]) if weighted: G.add_edge(citeID1, citeID2, weight = 1) else: G.add_edge(citeID1, citeID2) elif weighted: try: G.edges[citeID1, citeID2]['weight'] += 1 except KeyError: G.add_edge(citeID1, citeID2, weight = 1) for e1, e2, data in G.edges(citeID1, data = True): G.add_edge(citeID2, e2, **data)
python
def expandRecs(G, RecCollect, nodeType, weighted): for Rec in RecCollect: fullCiteList = [makeID(c, nodeType) for c in Rec.createCitation(multiCite = True)] if len(fullCiteList) > 1: for i, citeID1 in enumerate(fullCiteList): if citeID1 in G: for citeID2 in fullCiteList[i + 1:]: if citeID2 not in G: G.add_node(citeID2, **G.node[citeID1]) if weighted: G.add_edge(citeID1, citeID2, weight = 1) else: G.add_edge(citeID1, citeID2) elif weighted: try: G.edges[citeID1, citeID2]['weight'] += 1 except KeyError: G.add_edge(citeID1, citeID2, weight = 1) for e1, e2, data in G.edges(citeID1, data = True): G.add_edge(citeID2, e2, **data)
[ "def", "expandRecs", "(", "G", ",", "RecCollect", ",", "nodeType", ",", "weighted", ")", ":", "for", "Rec", "in", "RecCollect", ":", "fullCiteList", "=", "[", "makeID", "(", "c", ",", "nodeType", ")", "for", "c", "in", "Rec", ".", "createCitation", "("...
Expand all the citations from _RecCollect_
[ "Expand", "all", "the", "citations", "from", "_RecCollect_" ]
8162bf95e66bb6f9916081338e6e2a6132faff75
https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/recordCollection.py#L1792-L1812
19,859
networks-lab/metaknowledge
metaknowledge/recordCollection.py
RecordCollection.dropNonJournals
def dropNonJournals(self, ptVal = 'J', dropBad = True, invert = False): """Drops the non journal type `Records` from the collection, this is done by checking _ptVal_ against the PT tag # Parameters _ptVal_ : `optional [str]` > Default `'J'`, The value of the PT tag to be kept, default is `'J'` the journal tag, other tags can be substituted. _dropBad_ : `optional [bool]` > Default `True`, if `True` bad `Records` will be dropped as well those that are not journal entries _invert_ : `optional [bool]` > Default `False`, Set `True` to drop journals (or the PT tag given by _ptVal_) instead of keeping them. **Note**, it still drops bad Records if _dropBad_ is `True` """ if dropBad: self.dropBadEntries() if invert: self._collection = {r for r in self._collection if r['pubType'] != ptVal.upper()} else: self._collection = {r for r in self._collection if r['pubType'] == ptVal.upper()}
python
def dropNonJournals(self, ptVal = 'J', dropBad = True, invert = False): if dropBad: self.dropBadEntries() if invert: self._collection = {r for r in self._collection if r['pubType'] != ptVal.upper()} else: self._collection = {r for r in self._collection if r['pubType'] == ptVal.upper()}
[ "def", "dropNonJournals", "(", "self", ",", "ptVal", "=", "'J'", ",", "dropBad", "=", "True", ",", "invert", "=", "False", ")", ":", "if", "dropBad", ":", "self", ".", "dropBadEntries", "(", ")", "if", "invert", ":", "self", ".", "_collection", "=", ...
Drops the non journal type `Records` from the collection, this is done by checking _ptVal_ against the PT tag # Parameters _ptVal_ : `optional [str]` > Default `'J'`, The value of the PT tag to be kept, default is `'J'` the journal tag, other tags can be substituted. _dropBad_ : `optional [bool]` > Default `True`, if `True` bad `Records` will be dropped as well those that are not journal entries _invert_ : `optional [bool]` > Default `False`, Set `True` to drop journals (or the PT tag given by _ptVal_) instead of keeping them. **Note**, it still drops bad Records if _dropBad_ is `True`
[ "Drops", "the", "non", "journal", "type", "Records", "from", "the", "collection", "this", "is", "done", "by", "checking", "_ptVal_", "against", "the", "PT", "tag" ]
8162bf95e66bb6f9916081338e6e2a6132faff75
https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/recordCollection.py#L192-L214
19,860
networks-lab/metaknowledge
metaknowledge/recordCollection.py
RecordCollection.writeFile
def writeFile(self, fname = None): """Writes the `RecordCollection` to a file, the written file's format is identical to those download from WOS. The order of `Records` written is random. # Parameters _fname_ : `optional [str]` > Default `None`, if given the output file will written to _fanme_, if `None` the `RecordCollection`'s name's first 200 characters are used with the suffix .isi """ if len(self._collectedTypes) < 2: recEncoding = self.peek().encoding() else: recEncoding = 'utf-8' if fname: f = open(fname, mode = 'w', encoding = recEncoding) else: f = open(self.name[:200] + '.txt', mode = 'w', encoding = recEncoding) if self._collectedTypes == {'WOSRecord'}: f.write("\ufeffFN Thomson Reuters Web of Science\u2122\n") f.write("VR 1.0\n") elif self._collectedTypes == {'MedlineRecord'}: f.write('\n') elif self._collectedTypes == {'ScopusRecord'}: f.write("\ufeff{}\n".format(','.join(scopusHeader))) for R in self._collection: R.writeRecord(f) f.write('\n') if self._collectedTypes == {'WOSRecord'}: f.write('EF') f.close()
python
def writeFile(self, fname = None): if len(self._collectedTypes) < 2: recEncoding = self.peek().encoding() else: recEncoding = 'utf-8' if fname: f = open(fname, mode = 'w', encoding = recEncoding) else: f = open(self.name[:200] + '.txt', mode = 'w', encoding = recEncoding) if self._collectedTypes == {'WOSRecord'}: f.write("\ufeffFN Thomson Reuters Web of Science\u2122\n") f.write("VR 1.0\n") elif self._collectedTypes == {'MedlineRecord'}: f.write('\n') elif self._collectedTypes == {'ScopusRecord'}: f.write("\ufeff{}\n".format(','.join(scopusHeader))) for R in self._collection: R.writeRecord(f) f.write('\n') if self._collectedTypes == {'WOSRecord'}: f.write('EF') f.close()
[ "def", "writeFile", "(", "self", ",", "fname", "=", "None", ")", ":", "if", "len", "(", "self", ".", "_collectedTypes", ")", "<", "2", ":", "recEncoding", "=", "self", ".", "peek", "(", ")", ".", "encoding", "(", ")", "else", ":", "recEncoding", "=...
Writes the `RecordCollection` to a file, the written file's format is identical to those download from WOS. The order of `Records` written is random. # Parameters _fname_ : `optional [str]` > Default `None`, if given the output file will written to _fanme_, if `None` the `RecordCollection`'s name's first 200 characters are used with the suffix .isi
[ "Writes", "the", "RecordCollection", "to", "a", "file", "the", "written", "file", "s", "format", "is", "identical", "to", "those", "download", "from", "WOS", ".", "The", "order", "of", "Records", "written", "is", "random", "." ]
8162bf95e66bb6f9916081338e6e2a6132faff75
https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/recordCollection.py#L216-L245
19,861
networks-lab/metaknowledge
metaknowledge/recordCollection.py
RecordCollection.writeBib
def writeBib(self, fname = None, maxStringLength = 1000, wosMode = False, reducedOutput = False, niceIDs = True): """Writes a bibTex entry to _fname_ for each `Record` in the collection. If the Record is of a journal article (PT J) the bibtext type is set to `'article'`, otherwise it is set to `'misc'`. The ID of the entry is the WOS number and all the Record's fields are given as entries with their long names. **Note** This is not meant to be used directly with LaTeX none of the special characters have been escaped and there are a large number of unnecessary fields provided. _niceID_ and _maxLength_ have been provided to make conversions easier only. **Note** Record entries that are lists have their values separated with the string `' and '`, as this is the way bibTex understands # Parameters _fname_ : `optional [str]` > Default `None`, The name of the file to be written. If not given one will be derived from the collection and the file will be written to . _maxStringLength_ : `optional [int]` > Default 1000, The max length for a continuous string. Most bibTex implementation only allow string to be up to 1000 characters ([source](https://www.cs.arizona.edu/~collberg/Teaching/07.231/BibTeX/bibtex.html)), this splits them up into substrings then uses the native string concatenation (the `'#'` character) to allow for longer strings _WOSMode_ : `optional [bool]` > Default `False`, if `True` the data produced will be unprocessed and use double curly braces. This is the style WOS produces bib files in and mostly macthes that. _restrictedOutput_ : `optional [bool]` > Default `False`, if `True` the tags output will be limited to: `'AF'`, `'BF'`, `'ED'`, `'TI'`, `'SO'`, `'LA'`, `'NR'`, `'TC'`, `'Z9'`, `'PU'`, `'J9'`, `'PY'`, `'PD'`, `'VL'`, `'IS'`, `'SU'`, `'PG'`, `'DI'`, `'D2'`, and `'UT'` _niceID_ : `optional [bool]` > Default `True`, if `True` the IDs used will be derived from the authors, publishing date and title, if `False` it will be the UT tag """ if fname: f = open(fname, mode = 'w', encoding = 'utf-8') else: f = open(self.name[:200] + '.bib', mode = 'w', encoding = 'utf-8') f.write("%This file was generated by the metaknowledge Python package.\n%The contents have been automatically generated and are likely to not work with\n%LaTeX without some human intervention. This file is meant for other automatic\n%systems and not to be used directly for making citations\n") #I figure this is worth mentioning, as someone will get annoyed at none of the special characters being escaped and how terrible some of the fields look to humans for R in self: try: f.write('\n\n') f.write(R.bibString(maxLength = maxStringLength, WOSMode = wosMode, restrictedOutput = reducedOutput, niceID = niceIDs)) except BadWOSRecord: pass except AttributeError: raise RecordsNotCompatible("The Record '{}', with ID '{}' does not support writing to bibtext files.".format(R, R.id)) f.close()
python
def writeBib(self, fname = None, maxStringLength = 1000, wosMode = False, reducedOutput = False, niceIDs = True): if fname: f = open(fname, mode = 'w', encoding = 'utf-8') else: f = open(self.name[:200] + '.bib', mode = 'w', encoding = 'utf-8') f.write("%This file was generated by the metaknowledge Python package.\n%The contents have been automatically generated and are likely to not work with\n%LaTeX without some human intervention. This file is meant for other automatic\n%systems and not to be used directly for making citations\n") #I figure this is worth mentioning, as someone will get annoyed at none of the special characters being escaped and how terrible some of the fields look to humans for R in self: try: f.write('\n\n') f.write(R.bibString(maxLength = maxStringLength, WOSMode = wosMode, restrictedOutput = reducedOutput, niceID = niceIDs)) except BadWOSRecord: pass except AttributeError: raise RecordsNotCompatible("The Record '{}', with ID '{}' does not support writing to bibtext files.".format(R, R.id)) f.close()
[ "def", "writeBib", "(", "self", ",", "fname", "=", "None", ",", "maxStringLength", "=", "1000", ",", "wosMode", "=", "False", ",", "reducedOutput", "=", "False", ",", "niceIDs", "=", "True", ")", ":", "if", "fname", ":", "f", "=", "open", "(", "fname...
Writes a bibTex entry to _fname_ for each `Record` in the collection. If the Record is of a journal article (PT J) the bibtext type is set to `'article'`, otherwise it is set to `'misc'`. The ID of the entry is the WOS number and all the Record's fields are given as entries with their long names. **Note** This is not meant to be used directly with LaTeX none of the special characters have been escaped and there are a large number of unnecessary fields provided. _niceID_ and _maxLength_ have been provided to make conversions easier only. **Note** Record entries that are lists have their values separated with the string `' and '`, as this is the way bibTex understands # Parameters _fname_ : `optional [str]` > Default `None`, The name of the file to be written. If not given one will be derived from the collection and the file will be written to . _maxStringLength_ : `optional [int]` > Default 1000, The max length for a continuous string. Most bibTex implementation only allow string to be up to 1000 characters ([source](https://www.cs.arizona.edu/~collberg/Teaching/07.231/BibTeX/bibtex.html)), this splits them up into substrings then uses the native string concatenation (the `'#'` character) to allow for longer strings _WOSMode_ : `optional [bool]` > Default `False`, if `True` the data produced will be unprocessed and use double curly braces. This is the style WOS produces bib files in and mostly macthes that. _restrictedOutput_ : `optional [bool]` > Default `False`, if `True` the tags output will be limited to: `'AF'`, `'BF'`, `'ED'`, `'TI'`, `'SO'`, `'LA'`, `'NR'`, `'TC'`, `'Z9'`, `'PU'`, `'J9'`, `'PY'`, `'PD'`, `'VL'`, `'IS'`, `'SU'`, `'PG'`, `'DI'`, `'D2'`, and `'UT'` _niceID_ : `optional [bool]` > Default `True`, if `True` the IDs used will be derived from the authors, publishing date and title, if `False` it will be the UT tag
[ "Writes", "a", "bibTex", "entry", "to", "_fname_", "for", "each", "Record", "in", "the", "collection", "." ]
8162bf95e66bb6f9916081338e6e2a6132faff75
https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/recordCollection.py#L373-L418
19,862
networks-lab/metaknowledge
metaknowledge/recordCollection.py
RecordCollection.makeDict
def makeDict(self, onlyTheseTags = None, longNames = False, raw = False, numAuthors = True, genderCounts = True): """Returns a dict with each key a tag and the values being lists of the values for each of the Records in the collection, `None` is given when there is no value and they are in the same order across each tag. When used with pandas: `pandas.DataFrame(RC.makeDict())` returns a data frame with each column a tag and each row a Record. # Parameters _onlyTheseTags_ : `optional [iterable]` > Default `None`, if an iterable (list, tuple, etc) only the tags in _onlyTheseTags_ will be used, if not given then all tags in the records are given. > If you want to use all known tags pass [metaknowledge.knownTagsList](./ExtendedRecord.html#metaknowledge.ExtendedRecord.tagProcessingFunc). _longNames_ : `optional [bool]` > Default `False`, if `True` will convert the tags to their longer names, otherwise the short 2 character ones will be used. _cleanedVal_ : `optional [bool]` > Default `True`, if `True` the processed values for each `Record`'s field will be provided, otherwise the raw values are given. _numAuthors_ : `optional [bool]` > Default `True`, if `True` adds the number of authors as the column `'numAuthors'`. """ if onlyTheseTags: for i in range(len(onlyTheseTags)): if onlyTheseTags[i] in fullToTagDict: onlyTheseTags[i] = fullToTagDict[onlyTheseTags[i]] retrievedFields = onlyTheseTags else: retrievedFields = [] for R in self: tagsLst = [t for t in R.keys() if t not in retrievedFields] retrievedFields += tagsLst if longNames: try: retrievedFields = [tagToFullDict[t] for t in retrievedFields] except KeyError: raise KeyError("One of the tags could not be converted to a long name.") retDict = {k : [] for k in retrievedFields} if numAuthors: retDict["num-Authors"] = [] if genderCounts: retDict.update({'num-Male' : [], 'num-Female' : [], 'num-Unknown' : []}) for R in self: if numAuthors: retDict["num-Authors"].append(len(R.get('authorsShort', []))) if genderCounts: m, f, u = R.authGenders(_countsTuple = True) retDict['num-Male'].append(m) retDict['num-Female'].append(f) retDict['num-Unknown'].append(u) for k, v in R.subDict(retrievedFields, raw = raw).items(): retDict[k].append(v) return retDict
python
def makeDict(self, onlyTheseTags = None, longNames = False, raw = False, numAuthors = True, genderCounts = True): if onlyTheseTags: for i in range(len(onlyTheseTags)): if onlyTheseTags[i] in fullToTagDict: onlyTheseTags[i] = fullToTagDict[onlyTheseTags[i]] retrievedFields = onlyTheseTags else: retrievedFields = [] for R in self: tagsLst = [t for t in R.keys() if t not in retrievedFields] retrievedFields += tagsLst if longNames: try: retrievedFields = [tagToFullDict[t] for t in retrievedFields] except KeyError: raise KeyError("One of the tags could not be converted to a long name.") retDict = {k : [] for k in retrievedFields} if numAuthors: retDict["num-Authors"] = [] if genderCounts: retDict.update({'num-Male' : [], 'num-Female' : [], 'num-Unknown' : []}) for R in self: if numAuthors: retDict["num-Authors"].append(len(R.get('authorsShort', []))) if genderCounts: m, f, u = R.authGenders(_countsTuple = True) retDict['num-Male'].append(m) retDict['num-Female'].append(f) retDict['num-Unknown'].append(u) for k, v in R.subDict(retrievedFields, raw = raw).items(): retDict[k].append(v) return retDict
[ "def", "makeDict", "(", "self", ",", "onlyTheseTags", "=", "None", ",", "longNames", "=", "False", ",", "raw", "=", "False", ",", "numAuthors", "=", "True", ",", "genderCounts", "=", "True", ")", ":", "if", "onlyTheseTags", ":", "for", "i", "in", "rang...
Returns a dict with each key a tag and the values being lists of the values for each of the Records in the collection, `None` is given when there is no value and they are in the same order across each tag. When used with pandas: `pandas.DataFrame(RC.makeDict())` returns a data frame with each column a tag and each row a Record. # Parameters _onlyTheseTags_ : `optional [iterable]` > Default `None`, if an iterable (list, tuple, etc) only the tags in _onlyTheseTags_ will be used, if not given then all tags in the records are given. > If you want to use all known tags pass [metaknowledge.knownTagsList](./ExtendedRecord.html#metaknowledge.ExtendedRecord.tagProcessingFunc). _longNames_ : `optional [bool]` > Default `False`, if `True` will convert the tags to their longer names, otherwise the short 2 character ones will be used. _cleanedVal_ : `optional [bool]` > Default `True`, if `True` the processed values for each `Record`'s field will be provided, otherwise the raw values are given. _numAuthors_ : `optional [bool]` > Default `True`, if `True` adds the number of authors as the column `'numAuthors'`.
[ "Returns", "a", "dict", "with", "each", "key", "a", "tag", "and", "the", "values", "being", "lists", "of", "the", "values", "for", "each", "of", "the", "Records", "in", "the", "collection", "None", "is", "given", "when", "there", "is", "no", "value", "...
8162bf95e66bb6f9916081338e6e2a6132faff75
https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/recordCollection.py#L698-L753
19,863
networks-lab/metaknowledge
metaknowledge/recordCollection.py
RecordCollection.getCitations
def getCitations(self, field = None, values = None, pandasFriendly = True, counts = True): """Creates a pandas ready dict with each row a different citation the contained Records and columns containing the original string, year, journal, author's name and the number of times it occured. There are also options to filter the output citations with _field_ and _values_ # Parameters _field_ : `optional str` > Default `None`, if given all citations missing the named field will be dropped. _values_ : `optional str or list[str]` > Default `None`, if _field_ is also given only those citations with one of the strings given in _values_ will be included. > e.g. to get only citations from 1990 or 1991: `field = year, values = [1991, 1990]` _pandasFriendly_ : `optional bool` > Default `True`, if `False` a list of the citations will be returned instead of the more complicated pandas dict _counts_ : `optional bool` > Default `True`, if `False` the counts columns will be removed # Returns `dict` > A pandas ready dict with all the Citations """ retCites = [] if values is not None: if isinstance(values, (str, int, float)) or not isinstance(values, collections.abc.Container): values = [values] for R in self: retCites += R.getCitations(field = field, values = values, pandasFriendly = False) if pandasFriendly: return _pandasPrep(retCites, counts) else: return list(set(retCites))
python
def getCitations(self, field = None, values = None, pandasFriendly = True, counts = True): retCites = [] if values is not None: if isinstance(values, (str, int, float)) or not isinstance(values, collections.abc.Container): values = [values] for R in self: retCites += R.getCitations(field = field, values = values, pandasFriendly = False) if pandasFriendly: return _pandasPrep(retCites, counts) else: return list(set(retCites))
[ "def", "getCitations", "(", "self", ",", "field", "=", "None", ",", "values", "=", "None", ",", "pandasFriendly", "=", "True", ",", "counts", "=", "True", ")", ":", "retCites", "=", "[", "]", "if", "values", "is", "not", "None", ":", "if", "isinstanc...
Creates a pandas ready dict with each row a different citation the contained Records and columns containing the original string, year, journal, author's name and the number of times it occured. There are also options to filter the output citations with _field_ and _values_ # Parameters _field_ : `optional str` > Default `None`, if given all citations missing the named field will be dropped. _values_ : `optional str or list[str]` > Default `None`, if _field_ is also given only those citations with one of the strings given in _values_ will be included. > e.g. to get only citations from 1990 or 1991: `field = year, values = [1991, 1990]` _pandasFriendly_ : `optional bool` > Default `True`, if `False` a list of the citations will be returned instead of the more complicated pandas dict _counts_ : `optional bool` > Default `True`, if `False` the counts columns will be removed # Returns `dict` > A pandas ready dict with all the Citations
[ "Creates", "a", "pandas", "ready", "dict", "with", "each", "row", "a", "different", "citation", "the", "contained", "Records", "and", "columns", "containing", "the", "original", "string", "year", "journal", "author", "s", "name", "and", "the", "number", "of", ...
8162bf95e66bb6f9916081338e6e2a6132faff75
https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/recordCollection.py#L900-L940
19,864
networks-lab/metaknowledge
metaknowledge/recordCollection.py
RecordCollection.networkCoCitation
def networkCoCitation(self, dropAnon = True, nodeType = "full", nodeInfo = True, fullInfo = False, weighted = True, dropNonJournals = False, count = True, keyWords = None, detailedCore = True, detailedCoreAttributes = False, coreOnly = False, expandedCore = False, addCR = False): """Creates a co-citation network for the RecordCollection. # Parameters _nodeType_ : `optional [str]` > One of `"full"`, `"original"`, `"author"`, `"journal"` or `"year"`. Specifies the value of the nodes in the graph. The default `"full"` causes the citations to be compared holistically using the [metaknowledge.Citation](./Citation.html#metaknowledge.citation.Citation) builtin comparison operators. `"original"` uses the raw original strings of the citations. While `"author"`, `"journal"` and `"year"` each use the author, journal and year respectively. _dropAnon_ : `optional [bool]` > default `True`, if `True` citations labeled anonymous are removed from the network _nodeInfo_ : `optional [bool]` > default `True`, if `True` an extra piece of information is stored with each node. The extra inforamtion is detemined by _nodeType_. _fullInfo_ : `optional [bool]` > default `False`, if `True` the original citation string is added to the node as an extra value, the attribute is labeled as fullCite _weighted_ : `optional [bool]` > default `True`, wether the edges are weighted. If `True` the edges are weighted by the number of citations. _dropNonJournals_ : `optional [bool]` > default `False`, wether to drop citations of non-journals _count_ : `optional [bool]` > default `True`, causes the number of occurrences of a node to be counted _keyWords_ : `optional [str] or [list[str]]` > A string or list of strings that the citations are checked against, if they contain any of the strings they are removed from the network _detailedCore_ : `optional [bool or iterable[WOS tag Strings]]` > default `True`, if `True` all Citations from the core (those of records in the RecordCollection) and the _nodeType_ is `'full'` all nodes from the core will be given info strings composed of information from the Record objects themselves. This is Equivalent to passing the list: `['AF', 'PY', 'TI', 'SO', 'VL', 'BP']`. > If _detailedCore_ is an iterable (That evaluates to `True`) of WOS Tags (or long names) The values of those tags will be used to make the info attribute. All > The resultant string is the values of each tag, with commas removed, seperated by `', '`, just like the info given by non-core Citations. Note that for tags like `'AF'` that return lists only the first entry in the list will be used. Also a second attribute is created for all nodes called inCore wich is a boolean describing if the node is in the core or not. > Note: _detailedCore_ is not identical to the _detailedInfo_ argument of [Recordcollection.networkCoAuthor()](#metaknowledge.RecordCollection.networkCoAuthor) _coreOnly_ : `optional [bool]` > default `False`, if `True` only Citations from the RecordCollection will be included in the network _expandedCore_ : `optional [bool]` > default `False`, if `True` all citations in the ouput graph that are records in the collection will be duplicated for each author. If the nodes are `"full"`, `"original"` or `"author"` this will result in new noded being created for the other options the results are **not** defined or tested. Edges will be created between each of the nodes for each record expanded, attributes will be copied from exiting nodes. # Returns `Networkx Graph` > A networkx graph with hashes as ID and co-citation as edges """ allowedTypes = ["full", "original", "author", "journal", "year"] if nodeType not in allowedTypes: raise RCValueError("{} is not an allowed nodeType.".format(nodeType)) coreValues = [] if bool(detailedCore): try: for tag in detailedCore: coreValues.append(normalizeToTag(tag)) except TypeError: coreValues = ['id', 'authorsFull', 'year', 'title', 'journal', 'volume', 'beginningPage'] tmpgrph = nx.Graph() pcount = 0 progArgs = (0, "Starting to make a co-citation network") if metaknowledge.VERBOSE_MODE: progKwargs = {'dummy' : False} else: progKwargs = {'dummy' : True} with _ProgressBar(*progArgs, **progKwargs) as PBar: if coreOnly or coreValues or expandedCore: coreCitesDict = {R.createCitation() : R for R in self} if coreOnly: coreCites = coreCitesDict.keys() else: coreCites = None else: coreCitesDict = None coreCites = None for R in self: if PBar: pcount += 1 PBar.updateVal(pcount / len(self), "Analyzing: {}".format(R)) Cites = R.get('citations') if Cites: filteredCites = filterCites(Cites, nodeType, dropAnon, dropNonJournals, keyWords, coreCites) addToNetwork(tmpgrph, filteredCites, count, weighted, nodeType, nodeInfo , fullInfo, coreCitesDict, coreValues, detailedCoreAttributes, addCR, headNd = None) if expandedCore: if PBar: PBar.updateVal(.98, "Expanding core Records") expandRecs(tmpgrph, self, nodeType, weighted) if PBar: PBar.finish("Done making a co-citation network from {}".format(self)) return tmpgrph
python
def networkCoCitation(self, dropAnon = True, nodeType = "full", nodeInfo = True, fullInfo = False, weighted = True, dropNonJournals = False, count = True, keyWords = None, detailedCore = True, detailedCoreAttributes = False, coreOnly = False, expandedCore = False, addCR = False): allowedTypes = ["full", "original", "author", "journal", "year"] if nodeType not in allowedTypes: raise RCValueError("{} is not an allowed nodeType.".format(nodeType)) coreValues = [] if bool(detailedCore): try: for tag in detailedCore: coreValues.append(normalizeToTag(tag)) except TypeError: coreValues = ['id', 'authorsFull', 'year', 'title', 'journal', 'volume', 'beginningPage'] tmpgrph = nx.Graph() pcount = 0 progArgs = (0, "Starting to make a co-citation network") if metaknowledge.VERBOSE_MODE: progKwargs = {'dummy' : False} else: progKwargs = {'dummy' : True} with _ProgressBar(*progArgs, **progKwargs) as PBar: if coreOnly or coreValues or expandedCore: coreCitesDict = {R.createCitation() : R for R in self} if coreOnly: coreCites = coreCitesDict.keys() else: coreCites = None else: coreCitesDict = None coreCites = None for R in self: if PBar: pcount += 1 PBar.updateVal(pcount / len(self), "Analyzing: {}".format(R)) Cites = R.get('citations') if Cites: filteredCites = filterCites(Cites, nodeType, dropAnon, dropNonJournals, keyWords, coreCites) addToNetwork(tmpgrph, filteredCites, count, weighted, nodeType, nodeInfo , fullInfo, coreCitesDict, coreValues, detailedCoreAttributes, addCR, headNd = None) if expandedCore: if PBar: PBar.updateVal(.98, "Expanding core Records") expandRecs(tmpgrph, self, nodeType, weighted) if PBar: PBar.finish("Done making a co-citation network from {}".format(self)) return tmpgrph
[ "def", "networkCoCitation", "(", "self", ",", "dropAnon", "=", "True", ",", "nodeType", "=", "\"full\"", ",", "nodeInfo", "=", "True", ",", "fullInfo", "=", "False", ",", "weighted", "=", "True", ",", "dropNonJournals", "=", "False", ",", "count", "=", "...
Creates a co-citation network for the RecordCollection. # Parameters _nodeType_ : `optional [str]` > One of `"full"`, `"original"`, `"author"`, `"journal"` or `"year"`. Specifies the value of the nodes in the graph. The default `"full"` causes the citations to be compared holistically using the [metaknowledge.Citation](./Citation.html#metaknowledge.citation.Citation) builtin comparison operators. `"original"` uses the raw original strings of the citations. While `"author"`, `"journal"` and `"year"` each use the author, journal and year respectively. _dropAnon_ : `optional [bool]` > default `True`, if `True` citations labeled anonymous are removed from the network _nodeInfo_ : `optional [bool]` > default `True`, if `True` an extra piece of information is stored with each node. The extra inforamtion is detemined by _nodeType_. _fullInfo_ : `optional [bool]` > default `False`, if `True` the original citation string is added to the node as an extra value, the attribute is labeled as fullCite _weighted_ : `optional [bool]` > default `True`, wether the edges are weighted. If `True` the edges are weighted by the number of citations. _dropNonJournals_ : `optional [bool]` > default `False`, wether to drop citations of non-journals _count_ : `optional [bool]` > default `True`, causes the number of occurrences of a node to be counted _keyWords_ : `optional [str] or [list[str]]` > A string or list of strings that the citations are checked against, if they contain any of the strings they are removed from the network _detailedCore_ : `optional [bool or iterable[WOS tag Strings]]` > default `True`, if `True` all Citations from the core (those of records in the RecordCollection) and the _nodeType_ is `'full'` all nodes from the core will be given info strings composed of information from the Record objects themselves. This is Equivalent to passing the list: `['AF', 'PY', 'TI', 'SO', 'VL', 'BP']`. > If _detailedCore_ is an iterable (That evaluates to `True`) of WOS Tags (or long names) The values of those tags will be used to make the info attribute. All > The resultant string is the values of each tag, with commas removed, seperated by `', '`, just like the info given by non-core Citations. Note that for tags like `'AF'` that return lists only the first entry in the list will be used. Also a second attribute is created for all nodes called inCore wich is a boolean describing if the node is in the core or not. > Note: _detailedCore_ is not identical to the _detailedInfo_ argument of [Recordcollection.networkCoAuthor()](#metaknowledge.RecordCollection.networkCoAuthor) _coreOnly_ : `optional [bool]` > default `False`, if `True` only Citations from the RecordCollection will be included in the network _expandedCore_ : `optional [bool]` > default `False`, if `True` all citations in the ouput graph that are records in the collection will be duplicated for each author. If the nodes are `"full"`, `"original"` or `"author"` this will result in new noded being created for the other options the results are **not** defined or tested. Edges will be created between each of the nodes for each record expanded, attributes will be copied from exiting nodes. # Returns `Networkx Graph` > A networkx graph with hashes as ID and co-citation as edges
[ "Creates", "a", "co", "-", "citation", "network", "for", "the", "RecordCollection", "." ]
8162bf95e66bb6f9916081338e6e2a6132faff75
https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/recordCollection.py#L1075-L1177
19,865
networks-lab/metaknowledge
metaknowledge/recordCollection.py
RecordCollection.networkBibCoupling
def networkBibCoupling(self, weighted = True, fullInfo = False, addCR = False): """Creates a bibliographic coupling network based on citations for the RecordCollection. # Parameters _weighted_ : `optional bool` > Default `True`, if `True` the weight of the edges will be added to the network _fullInfo_ : `optional bool` > Default `False`, if `True` the full citation string will be added to each of the nodes of the network. # Returns `Networkx Graph` > A graph of the bibliographic coupling """ progArgs = (0, "Make a citation network for coupling") if metaknowledge.VERBOSE_MODE: progKwargs = {'dummy' : False} else: progKwargs = {'dummy' : True} with _ProgressBar(*progArgs, **progKwargs) as PBar: citeGrph = self.networkCitation(weighted = False, directed = True, detailedCore = True, fullInfo = fullInfo, count = False, nodeInfo = True, addCR = addCR, _quiet = True) pcount = 0 pmax = len(citeGrph) PBar.updateVal(.2, "Starting to classify nodes") workingGrph = nx.Graph() couplingSet = set() for n, d in citeGrph.nodes(data = True): pcount += 1 PBar.updateVal(.2 + .4 * (pcount / pmax), "Classifying: {}".format(n)) if d['inCore']: workingGrph.add_node(n, **d) if citeGrph.in_degree(n) > 0: couplingSet.add(n) pcount = 0 pmax = len(couplingSet) for n in couplingSet: PBar.updateVal(.6 + .4 * (pcount / pmax), "Coupling: {}".format(n)) citesLst = list(citeGrph.in_edges(n)) for i, edgeOuter in enumerate(citesLst): outerNode = edgeOuter[0] for edgeInner in citesLst[i + 1:]: innerNode = edgeInner[0] if weighted and workingGrph.has_edge(outerNode, innerNode): workingGrph.edges[outerNode, innerNode]['weight'] += 1 elif weighted: workingGrph.add_edge(outerNode, innerNode, weight = 1) else: workingGrph.add_edge(outerNode, innerNode) PBar.finish("Done making a bib-coupling network from {}".format(self)) return workingGrph
python
def networkBibCoupling(self, weighted = True, fullInfo = False, addCR = False): progArgs = (0, "Make a citation network for coupling") if metaknowledge.VERBOSE_MODE: progKwargs = {'dummy' : False} else: progKwargs = {'dummy' : True} with _ProgressBar(*progArgs, **progKwargs) as PBar: citeGrph = self.networkCitation(weighted = False, directed = True, detailedCore = True, fullInfo = fullInfo, count = False, nodeInfo = True, addCR = addCR, _quiet = True) pcount = 0 pmax = len(citeGrph) PBar.updateVal(.2, "Starting to classify nodes") workingGrph = nx.Graph() couplingSet = set() for n, d in citeGrph.nodes(data = True): pcount += 1 PBar.updateVal(.2 + .4 * (pcount / pmax), "Classifying: {}".format(n)) if d['inCore']: workingGrph.add_node(n, **d) if citeGrph.in_degree(n) > 0: couplingSet.add(n) pcount = 0 pmax = len(couplingSet) for n in couplingSet: PBar.updateVal(.6 + .4 * (pcount / pmax), "Coupling: {}".format(n)) citesLst = list(citeGrph.in_edges(n)) for i, edgeOuter in enumerate(citesLst): outerNode = edgeOuter[0] for edgeInner in citesLst[i + 1:]: innerNode = edgeInner[0] if weighted and workingGrph.has_edge(outerNode, innerNode): workingGrph.edges[outerNode, innerNode]['weight'] += 1 elif weighted: workingGrph.add_edge(outerNode, innerNode, weight = 1) else: workingGrph.add_edge(outerNode, innerNode) PBar.finish("Done making a bib-coupling network from {}".format(self)) return workingGrph
[ "def", "networkBibCoupling", "(", "self", ",", "weighted", "=", "True", ",", "fullInfo", "=", "False", ",", "addCR", "=", "False", ")", ":", "progArgs", "=", "(", "0", ",", "\"Make a citation network for coupling\"", ")", "if", "metaknowledge", ".", "VERBOSE_M...
Creates a bibliographic coupling network based on citations for the RecordCollection. # Parameters _weighted_ : `optional bool` > Default `True`, if `True` the weight of the edges will be added to the network _fullInfo_ : `optional bool` > Default `False`, if `True` the full citation string will be added to each of the nodes of the network. # Returns `Networkx Graph` > A graph of the bibliographic coupling
[ "Creates", "a", "bibliographic", "coupling", "network", "based", "on", "citations", "for", "the", "RecordCollection", "." ]
8162bf95e66bb6f9916081338e6e2a6132faff75
https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/recordCollection.py#L1294-L1348
19,866
networks-lab/metaknowledge
metaknowledge/recordCollection.py
RecordCollection.yearSplit
def yearSplit(self, startYear, endYear, dropMissingYears = True): """Creates a RecordCollection of Records from the years between _startYear_ and _endYear_ inclusive. # Parameters _startYear_ : `int` > The smallest year to be included in the returned RecordCollection _endYear_ : `int` > The largest year to be included in the returned RecordCollection _dropMissingYears_ : `optional [bool]` > Default `True`, if `True` Records with missing years will be dropped. If `False` a `TypeError` exception will be raised # Returns `RecordCollection` > A RecordCollection of Records from _startYear_ to _endYear_ """ recordsInRange = set() for R in self: try: if R.get('year') >= startYear and R.get('year') <= endYear: recordsInRange.add(R) except TypeError: if dropMissingYears: pass else: raise RCret = RecordCollection(recordsInRange, name = "{}({}-{})".format(self.name, startYear, endYear), quietStart = True) RCret._collectedTypes = self._collectedTypes.copy() return RCret
python
def yearSplit(self, startYear, endYear, dropMissingYears = True): recordsInRange = set() for R in self: try: if R.get('year') >= startYear and R.get('year') <= endYear: recordsInRange.add(R) except TypeError: if dropMissingYears: pass else: raise RCret = RecordCollection(recordsInRange, name = "{}({}-{})".format(self.name, startYear, endYear), quietStart = True) RCret._collectedTypes = self._collectedTypes.copy() return RCret
[ "def", "yearSplit", "(", "self", ",", "startYear", ",", "endYear", ",", "dropMissingYears", "=", "True", ")", ":", "recordsInRange", "=", "set", "(", ")", "for", "R", "in", "self", ":", "try", ":", "if", "R", ".", "get", "(", "'year'", ")", ">=", "...
Creates a RecordCollection of Records from the years between _startYear_ and _endYear_ inclusive. # Parameters _startYear_ : `int` > The smallest year to be included in the returned RecordCollection _endYear_ : `int` > The largest year to be included in the returned RecordCollection _dropMissingYears_ : `optional [bool]` > Default `True`, if `True` Records with missing years will be dropped. If `False` a `TypeError` exception will be raised # Returns `RecordCollection` > A RecordCollection of Records from _startYear_ to _endYear_
[ "Creates", "a", "RecordCollection", "of", "Records", "from", "the", "years", "between", "_startYear_", "and", "_endYear_", "inclusive", "." ]
8162bf95e66bb6f9916081338e6e2a6132faff75
https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/recordCollection.py#L1362-L1397
19,867
networks-lab/metaknowledge
metaknowledge/recordCollection.py
RecordCollection.localCiteStats
def localCiteStats(self, pandasFriendly = False, keyType = "citation"): """Returns a dict with all the citations in the CR field as keys and the number of times they occur as the values # Parameters _pandasFriendly_ : `optional [bool]` > default `False`, makes the output be a dict with two keys one `'Citations'` is the citations the other is their occurrence counts as `'Counts'`. _keyType_ : `optional [str]` > default `'citation'`, the type of key to use for the dictionary, the valid strings are `'citation'`, `'journal'`, `'year'` or `'author'`. IF changed from `'citation'` all citations matching the requested option will be contracted and their counts added together. # Returns `dict[str, int or Citation : int]` > A dictionary with keys as given by _keyType_ and integers giving their rates of occurrence in the collection """ count = 0 recCount = len(self) progArgs = (0, "Starting to get the local stats on {}s.".format(keyType)) if metaknowledge.VERBOSE_MODE: progKwargs = {'dummy' : False} else: progKwargs = {'dummy' : True} with _ProgressBar(*progArgs, **progKwargs) as PBar: keyTypesLst = ["citation", "journal", "year", "author"] citesDict = {} if keyType not in keyTypesLst: raise TypeError("{} is not a valid key type, only '{}' or '{}' are.".format(keyType, "', '".join(keyTypesLst[:-1]), keyTypesLst[-1])) for R in self: rCites = R.get('citations') if PBar: count += 1 PBar.updateVal(count / recCount, "Analysing: {}".format(R.UT)) if rCites: for c in rCites: if keyType == keyTypesLst[0]: cVal = c else: cVal = getattr(c, keyType) if cVal is None: continue if cVal in citesDict: citesDict[cVal] += 1 else: citesDict[cVal] = 1 if PBar: PBar.finish("Done, {} {} fields analysed".format(len(citesDict), keyType)) if pandasFriendly: citeLst = [] countLst = [] for cite, occ in citesDict.items(): citeLst.append(cite) countLst.append(occ) return {"Citations" : citeLst, "Counts" : countLst} else: return citesDict
python
def localCiteStats(self, pandasFriendly = False, keyType = "citation"): count = 0 recCount = len(self) progArgs = (0, "Starting to get the local stats on {}s.".format(keyType)) if metaknowledge.VERBOSE_MODE: progKwargs = {'dummy' : False} else: progKwargs = {'dummy' : True} with _ProgressBar(*progArgs, **progKwargs) as PBar: keyTypesLst = ["citation", "journal", "year", "author"] citesDict = {} if keyType not in keyTypesLst: raise TypeError("{} is not a valid key type, only '{}' or '{}' are.".format(keyType, "', '".join(keyTypesLst[:-1]), keyTypesLst[-1])) for R in self: rCites = R.get('citations') if PBar: count += 1 PBar.updateVal(count / recCount, "Analysing: {}".format(R.UT)) if rCites: for c in rCites: if keyType == keyTypesLst[0]: cVal = c else: cVal = getattr(c, keyType) if cVal is None: continue if cVal in citesDict: citesDict[cVal] += 1 else: citesDict[cVal] = 1 if PBar: PBar.finish("Done, {} {} fields analysed".format(len(citesDict), keyType)) if pandasFriendly: citeLst = [] countLst = [] for cite, occ in citesDict.items(): citeLst.append(cite) countLst.append(occ) return {"Citations" : citeLst, "Counts" : countLst} else: return citesDict
[ "def", "localCiteStats", "(", "self", ",", "pandasFriendly", "=", "False", ",", "keyType", "=", "\"citation\"", ")", ":", "count", "=", "0", "recCount", "=", "len", "(", "self", ")", "progArgs", "=", "(", "0", ",", "\"Starting to get the local stats on {}s.\""...
Returns a dict with all the citations in the CR field as keys and the number of times they occur as the values # Parameters _pandasFriendly_ : `optional [bool]` > default `False`, makes the output be a dict with two keys one `'Citations'` is the citations the other is their occurrence counts as `'Counts'`. _keyType_ : `optional [str]` > default `'citation'`, the type of key to use for the dictionary, the valid strings are `'citation'`, `'journal'`, `'year'` or `'author'`. IF changed from `'citation'` all citations matching the requested option will be contracted and their counts added together. # Returns `dict[str, int or Citation : int]` > A dictionary with keys as given by _keyType_ and integers giving their rates of occurrence in the collection
[ "Returns", "a", "dict", "with", "all", "the", "citations", "in", "the", "CR", "field", "as", "keys", "and", "the", "number", "of", "times", "they", "occur", "as", "the", "values" ]
8162bf95e66bb6f9916081338e6e2a6132faff75
https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/recordCollection.py#L1399-L1457
19,868
networks-lab/metaknowledge
metaknowledge/recordCollection.py
RecordCollection.localCitesOf
def localCitesOf(self, rec): """Takes in a Record, WOS string, citation string or Citation and returns a RecordCollection of all records that cite it. # Parameters _rec_ : `Record, str or Citation` > The object that is being cited # Returns `RecordCollection` > A `RecordCollection` containing only those `Records` that cite _rec_ """ localCites = [] if isinstance(rec, Record): recCite = rec.createCitation() if isinstance(rec, str): try: recCite = self.getID(rec) except ValueError: try: recCite = Citation(rec) except AttributeError: raise ValueError("{} is not a valid WOS string or a valid citation string".format(recCite)) else: if recCite is None: return RecordCollection(inCollection = localCites, name = "Records_citing_{}".format(rec), quietStart = True) else: recCite = recCite.createCitation() elif isinstance(rec, Citation): recCite = rec else: raise ValueError("{} is not a valid input, rec must be a Record, string or Citation object.".format(rec)) for R in self: rCites = R.get('citations') if rCites: for cite in rCites: if recCite == cite: localCites.append(R) break return RecordCollection(inCollection = localCites, name = "Records_citing_'{}'".format(rec), quietStart = True)
python
def localCitesOf(self, rec): localCites = [] if isinstance(rec, Record): recCite = rec.createCitation() if isinstance(rec, str): try: recCite = self.getID(rec) except ValueError: try: recCite = Citation(rec) except AttributeError: raise ValueError("{} is not a valid WOS string or a valid citation string".format(recCite)) else: if recCite is None: return RecordCollection(inCollection = localCites, name = "Records_citing_{}".format(rec), quietStart = True) else: recCite = recCite.createCitation() elif isinstance(rec, Citation): recCite = rec else: raise ValueError("{} is not a valid input, rec must be a Record, string or Citation object.".format(rec)) for R in self: rCites = R.get('citations') if rCites: for cite in rCites: if recCite == cite: localCites.append(R) break return RecordCollection(inCollection = localCites, name = "Records_citing_'{}'".format(rec), quietStart = True)
[ "def", "localCitesOf", "(", "self", ",", "rec", ")", ":", "localCites", "=", "[", "]", "if", "isinstance", "(", "rec", ",", "Record", ")", ":", "recCite", "=", "rec", ".", "createCitation", "(", ")", "if", "isinstance", "(", "rec", ",", "str", ")", ...
Takes in a Record, WOS string, citation string or Citation and returns a RecordCollection of all records that cite it. # Parameters _rec_ : `Record, str or Citation` > The object that is being cited # Returns `RecordCollection` > A `RecordCollection` containing only those `Records` that cite _rec_
[ "Takes", "in", "a", "Record", "WOS", "string", "citation", "string", "or", "Citation", "and", "returns", "a", "RecordCollection", "of", "all", "records", "that", "cite", "it", "." ]
8162bf95e66bb6f9916081338e6e2a6132faff75
https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/recordCollection.py#L1459-L1501
19,869
networks-lab/metaknowledge
metaknowledge/recordCollection.py
RecordCollection.citeFilter
def citeFilter(self, keyString = '', field = 'all', reverse = False, caseSensitive = False): """Filters `Records` by some string, _keyString_, in their citations and returns all `Records` with at least one citation possessing _keyString_ in the field given by _field_. # Parameters _keyString_ : `optional [str]` > Default `''`, gives the string to be searched for, if it is is blank then all citations with the specified field will be matched _field_ : `optional [str]` > Default `'all'`, gives the component of the citation to be looked at, it can be one of a few strings. The default is `'all'` which will cause the entire original `Citation` to be searched. It can be used to search across fields, e.g. `'1970, V2'` is a valid keystring The other options are: + `'author'`, searches the author field + `'year'`, searches the year field + `'journal'`, searches the journal field + `'V'`, searches the volume field + `'P'`, searches the page field + `'misc'`, searches all the remaining uncategorized information + `'anonymous'`, searches for anonymous `Citations`, _keyString_ is not ignored + `'bad'`, searches for bad citations, keyString is not used _reverse_ : `optional [bool]` > Default `False`, being set to `True` causes all `Records` not matching the query to be returned _caseSensitive_ : `optional [bool]` > Default `False`, if `True` causes the search across the original to be case sensitive, **only** the `'all'` option can be case sensitive """ retRecs = [] keyString = str(keyString) for R in self: try: if field == 'all': for cite in R.get('citations'): if caseSensitive: if keyString in cite.original: retRecs.append(R) break else: if keyString.upper() in cite.original.upper(): retRecs.append(R) break elif field == 'author': for cite in R.get('citations'): try: if keyString.upper() in cite.author.upper(): retRecs.append(R) break except AttributeError: pass elif field == 'journal': for cite in R.get('citations'): try: if keyString.upper() in cite.journal: retRecs.append(R) break except AttributeError: pass elif field == 'year': for cite in R.get('citations'): try: if int(keyString) == cite.year: retRecs.append(R) break except AttributeError: pass elif field == 'V': for cite in R.get('citations'): try: if keyString.upper() in cite.V: retRecs.append(R) break except AttributeError: pass elif field == 'P': for cite in R.get('citations'): try: if keyString.upper() in cite.P: retRecs.append(R) break except AttributeError: pass elif field == 'misc': for cite in R.get('citations'): try: if keyString.upper() in cite.misc: retRecs.append(R) break except AttributeError: pass elif field == 'anonymous': for cite in R.get('citations'): if cite.isAnonymous(): retRecs.append(R) break elif field == 'bad': for cite in R.get('citations'): if cite.bad: retRecs.append(R) break except TypeError: pass if reverse: excluded = [] for R in self: if R not in retRecs: excluded.append(R) return RecordCollection(inCollection = excluded, name = self.name, quietStart = True) else: return RecordCollection(inCollection = retRecs, name = self.name, quietStart = True)
python
def citeFilter(self, keyString = '', field = 'all', reverse = False, caseSensitive = False): retRecs = [] keyString = str(keyString) for R in self: try: if field == 'all': for cite in R.get('citations'): if caseSensitive: if keyString in cite.original: retRecs.append(R) break else: if keyString.upper() in cite.original.upper(): retRecs.append(R) break elif field == 'author': for cite in R.get('citations'): try: if keyString.upper() in cite.author.upper(): retRecs.append(R) break except AttributeError: pass elif field == 'journal': for cite in R.get('citations'): try: if keyString.upper() in cite.journal: retRecs.append(R) break except AttributeError: pass elif field == 'year': for cite in R.get('citations'): try: if int(keyString) == cite.year: retRecs.append(R) break except AttributeError: pass elif field == 'V': for cite in R.get('citations'): try: if keyString.upper() in cite.V: retRecs.append(R) break except AttributeError: pass elif field == 'P': for cite in R.get('citations'): try: if keyString.upper() in cite.P: retRecs.append(R) break except AttributeError: pass elif field == 'misc': for cite in R.get('citations'): try: if keyString.upper() in cite.misc: retRecs.append(R) break except AttributeError: pass elif field == 'anonymous': for cite in R.get('citations'): if cite.isAnonymous(): retRecs.append(R) break elif field == 'bad': for cite in R.get('citations'): if cite.bad: retRecs.append(R) break except TypeError: pass if reverse: excluded = [] for R in self: if R not in retRecs: excluded.append(R) return RecordCollection(inCollection = excluded, name = self.name, quietStart = True) else: return RecordCollection(inCollection = retRecs, name = self.name, quietStart = True)
[ "def", "citeFilter", "(", "self", ",", "keyString", "=", "''", ",", "field", "=", "'all'", ",", "reverse", "=", "False", ",", "caseSensitive", "=", "False", ")", ":", "retRecs", "=", "[", "]", "keyString", "=", "str", "(", "keyString", ")", "for", "R...
Filters `Records` by some string, _keyString_, in their citations and returns all `Records` with at least one citation possessing _keyString_ in the field given by _field_. # Parameters _keyString_ : `optional [str]` > Default `''`, gives the string to be searched for, if it is is blank then all citations with the specified field will be matched _field_ : `optional [str]` > Default `'all'`, gives the component of the citation to be looked at, it can be one of a few strings. The default is `'all'` which will cause the entire original `Citation` to be searched. It can be used to search across fields, e.g. `'1970, V2'` is a valid keystring The other options are: + `'author'`, searches the author field + `'year'`, searches the year field + `'journal'`, searches the journal field + `'V'`, searches the volume field + `'P'`, searches the page field + `'misc'`, searches all the remaining uncategorized information + `'anonymous'`, searches for anonymous `Citations`, _keyString_ is not ignored + `'bad'`, searches for bad citations, keyString is not used _reverse_ : `optional [bool]` > Default `False`, being set to `True` causes all `Records` not matching the query to be returned _caseSensitive_ : `optional [bool]` > Default `False`, if `True` causes the search across the original to be case sensitive, **only** the `'all'` option can be case sensitive
[ "Filters", "Records", "by", "some", "string", "_keyString_", "in", "their", "citations", "and", "returns", "all", "Records", "with", "at", "least", "one", "citation", "possessing", "_keyString_", "in", "the", "field", "given", "by", "_field_", "." ]
8162bf95e66bb6f9916081338e6e2a6132faff75
https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/recordCollection.py#L1503-L1615
19,870
networks-lab/metaknowledge
metaknowledge/citation.py
filterNonJournals
def filterNonJournals(citesLst, invert = False): """Removes the `Citations` from _citesLst_ that are not journals # Parameters _citesLst_ : `list [Citation]` > A list of citations to be filtered _invert_ : `optional [bool]` > Default `False`, if `True` non-journals will be kept instead of journals # Returns `list [Citation]` > A filtered list of Citations from _citesLst_ """ retCites = [] for c in citesLst: if c.isJournal(): if not invert: retCites.append(c) elif invert: retCites.append(c) return retCites
python
def filterNonJournals(citesLst, invert = False): retCites = [] for c in citesLst: if c.isJournal(): if not invert: retCites.append(c) elif invert: retCites.append(c) return retCites
[ "def", "filterNonJournals", "(", "citesLst", ",", "invert", "=", "False", ")", ":", "retCites", "=", "[", "]", "for", "c", "in", "citesLst", ":", "if", "c", ".", "isJournal", "(", ")", ":", "if", "not", "invert", ":", "retCites", ".", "append", "(", ...
Removes the `Citations` from _citesLst_ that are not journals # Parameters _citesLst_ : `list [Citation]` > A list of citations to be filtered _invert_ : `optional [bool]` > Default `False`, if `True` non-journals will be kept instead of journals # Returns `list [Citation]` > A filtered list of Citations from _citesLst_
[ "Removes", "the", "Citations", "from", "_citesLst_", "that", "are", "not", "journals" ]
8162bf95e66bb6f9916081338e6e2a6132faff75
https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/citation.py#L364-L391
19,871
networks-lab/metaknowledge
metaknowledge/mkCollection.py
Collection.add
def add(self, elem): """ Adds _elem_ to the collection. # Parameters _elem_ : `object` > The object to be added """ if isinstance(elem, self._allowedTypes): self._collection.add(elem) self._collectedTypes.add(type(elem).__name__) else: raise CollectionTypeError("{} can only contain '{}', '{}' is not allowed.".format(type(self).__name__, self._allowedTypes, elem))
python
def add(self, elem): if isinstance(elem, self._allowedTypes): self._collection.add(elem) self._collectedTypes.add(type(elem).__name__) else: raise CollectionTypeError("{} can only contain '{}', '{}' is not allowed.".format(type(self).__name__, self._allowedTypes, elem))
[ "def", "add", "(", "self", ",", "elem", ")", ":", "if", "isinstance", "(", "elem", ",", "self", ".", "_allowedTypes", ")", ":", "self", ".", "_collection", ".", "add", "(", "elem", ")", "self", ".", "_collectedTypes", ".", "add", "(", "type", "(", ...
Adds _elem_ to the collection. # Parameters _elem_ : `object` > The object to be added
[ "Adds", "_elem_", "to", "the", "collection", "." ]
8162bf95e66bb6f9916081338e6e2a6132faff75
https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/mkCollection.py#L120-L133
19,872
networks-lab/metaknowledge
metaknowledge/mkCollection.py
Collection.remove
def remove(self, elem): """Removes _elem_ from the collection, will raise a KeyError is _elem_ is missing # Parameters _elem_ : `object` > The object to be removed """ try: return self._collection.remove(elem) except KeyError: raise KeyError("'{}' was not found in the {}: '{}'.".format(elem, type(self).__name__, self)) from None
python
def remove(self, elem): try: return self._collection.remove(elem) except KeyError: raise KeyError("'{}' was not found in the {}: '{}'.".format(elem, type(self).__name__, self)) from None
[ "def", "remove", "(", "self", ",", "elem", ")", ":", "try", ":", "return", "self", ".", "_collection", ".", "remove", "(", "elem", ")", "except", "KeyError", ":", "raise", "KeyError", "(", "\"'{}' was not found in the {}: '{}'.\"", ".", "format", "(", "elem"...
Removes _elem_ from the collection, will raise a KeyError is _elem_ is missing # Parameters _elem_ : `object` > The object to be removed
[ "Removes", "_elem_", "from", "the", "collection", "will", "raise", "a", "KeyError", "is", "_elem_", "is", "missing" ]
8162bf95e66bb6f9916081338e6e2a6132faff75
https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/mkCollection.py#L147-L159
19,873
networks-lab/metaknowledge
metaknowledge/mkCollection.py
Collection.clear
def clear(self): """"Removes all elements from the collection and resets the error handling """ self.bad = False self.errors = {} self._collection.clear()
python
def clear(self): "self.bad = False self.errors = {} self._collection.clear()
[ "def", "clear", "(", "self", ")", ":", "self", ".", "bad", "=", "False", "self", ".", "errors", "=", "{", "}", "self", ".", "_collection", ".", "clear", "(", ")" ]
Removes all elements from the collection and resets the error handling
[ "Removes", "all", "elements", "from", "the", "collection", "and", "resets", "the", "error", "handling" ]
8162bf95e66bb6f9916081338e6e2a6132faff75
https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/mkCollection.py#L161-L166
19,874
networks-lab/metaknowledge
metaknowledge/mkCollection.py
Collection.pop
def pop(self): """Removes a random element from the collection and returns it # Returns `object` > A random object from the collection """ try: return self._collection.pop() except KeyError: raise KeyError("Nothing left in the {}: '{}'.".format(type(self).__name__, self)) from None
python
def pop(self): try: return self._collection.pop() except KeyError: raise KeyError("Nothing left in the {}: '{}'.".format(type(self).__name__, self)) from None
[ "def", "pop", "(", "self", ")", ":", "try", ":", "return", "self", ".", "_collection", ".", "pop", "(", ")", "except", "KeyError", ":", "raise", "KeyError", "(", "\"Nothing left in the {}: '{}'.\"", ".", "format", "(", "type", "(", "self", ")", ".", "__n...
Removes a random element from the collection and returns it # Returns `object` > A random object from the collection
[ "Removes", "a", "random", "element", "from", "the", "collection", "and", "returns", "it" ]
8162bf95e66bb6f9916081338e6e2a6132faff75
https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/mkCollection.py#L168-L180
19,875
networks-lab/metaknowledge
metaknowledge/mkCollection.py
Collection.copy
def copy(self): """Creates a shallow copy of the collection # Returns `Collection` > A copy of the `Collection` """ collectedCopy = copy.copy(self) collectedCopy._collection = copy.copy(collectedCopy._collection) self._collectedTypes = copy.copy(self._collectedTypes) self._allowedTypes = copy.copy(self._allowedTypes) collectedCopy.errors = copy.copy(collectedCopy.errors) return collectedCopy
python
def copy(self): collectedCopy = copy.copy(self) collectedCopy._collection = copy.copy(collectedCopy._collection) self._collectedTypes = copy.copy(self._collectedTypes) self._allowedTypes = copy.copy(self._allowedTypes) collectedCopy.errors = copy.copy(collectedCopy.errors) return collectedCopy
[ "def", "copy", "(", "self", ")", ":", "collectedCopy", "=", "copy", ".", "copy", "(", "self", ")", "collectedCopy", ".", "_collection", "=", "copy", ".", "copy", "(", "collectedCopy", ".", "_collection", ")", "self", ".", "_collectedTypes", "=", "copy", ...
Creates a shallow copy of the collection # Returns `Collection` > A copy of the `Collection`
[ "Creates", "a", "shallow", "copy", "of", "the", "collection" ]
8162bf95e66bb6f9916081338e6e2a6132faff75
https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/mkCollection.py#L279-L293
19,876
networks-lab/metaknowledge
metaknowledge/mkCollection.py
Collection.chunk
def chunk(self, maxSize): """Splits the `Collection` into _maxSize_ size or smaller `Collections` # Parameters _maxSize_ : `int` > The maximum number of elements in a retuned `Collection` # Returns `list [Collection]` > A list of `Collections` that if all merged (`|` operator) would create the original """ chunks = [] currentSize = maxSize + 1 for i in self: if currentSize >= maxSize: currentSize = 0 chunks.append(type(self)({i}, name = 'Chunk-{}-of-{}'.format(len(chunks), self.name), quietStart = True)) else: chunks[-1].add(i) currentSize += 1 return chunks
python
def chunk(self, maxSize): chunks = [] currentSize = maxSize + 1 for i in self: if currentSize >= maxSize: currentSize = 0 chunks.append(type(self)({i}, name = 'Chunk-{}-of-{}'.format(len(chunks), self.name), quietStart = True)) else: chunks[-1].add(i) currentSize += 1 return chunks
[ "def", "chunk", "(", "self", ",", "maxSize", ")", ":", "chunks", "=", "[", "]", "currentSize", "=", "maxSize", "+", "1", "for", "i", "in", "self", ":", "if", "currentSize", ">=", "maxSize", ":", "currentSize", "=", "0", "chunks", ".", "append", "(", ...
Splits the `Collection` into _maxSize_ size or smaller `Collections` # Parameters _maxSize_ : `int` > The maximum number of elements in a retuned `Collection` # Returns `list [Collection]` > A list of `Collections` that if all merged (`|` operator) would create the original
[ "Splits", "the", "Collection", "into", "_maxSize_", "size", "or", "smaller", "Collections" ]
8162bf95e66bb6f9916081338e6e2a6132faff75
https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/mkCollection.py#L309-L334
19,877
networks-lab/metaknowledge
metaknowledge/mkCollection.py
Collection.split
def split(self, maxSize): """Destructively, splits the `Collection` into _maxSize_ size or smaller `Collections`. The source `Collection` will be empty after this operation # Parameters _maxSize_ : `int` > The maximum number of elements in a retuned `Collection` # Returns `list [Collection]` > A list of `Collections` that if all merged (`|` operator) would create the original """ chunks = [] currentSize = maxSize + 1 try: while True: if currentSize >= maxSize: currentSize = 0 chunks.append(type(self)({self.pop()}, name = 'Chunk-{}-of-{}'.format(len(chunks), self.name), quietStart = True)) else: chunks[-1].add(self.pop()) currentSize += 1 except KeyError: self.clear() self.name = 'Emptied-{}'.format(self.name) return chunks
python
def split(self, maxSize): chunks = [] currentSize = maxSize + 1 try: while True: if currentSize >= maxSize: currentSize = 0 chunks.append(type(self)({self.pop()}, name = 'Chunk-{}-of-{}'.format(len(chunks), self.name), quietStart = True)) else: chunks[-1].add(self.pop()) currentSize += 1 except KeyError: self.clear() self.name = 'Emptied-{}'.format(self.name) return chunks
[ "def", "split", "(", "self", ",", "maxSize", ")", ":", "chunks", "=", "[", "]", "currentSize", "=", "maxSize", "+", "1", "try", ":", "while", "True", ":", "if", "currentSize", ">=", "maxSize", ":", "currentSize", "=", "0", "chunks", ".", "append", "(...
Destructively, splits the `Collection` into _maxSize_ size or smaller `Collections`. The source `Collection` will be empty after this operation # Parameters _maxSize_ : `int` > The maximum number of elements in a retuned `Collection` # Returns `list [Collection]` > A list of `Collections` that if all merged (`|` operator) would create the original
[ "Destructively", "splits", "the", "Collection", "into", "_maxSize_", "size", "or", "smaller", "Collections", ".", "The", "source", "Collection", "will", "be", "empty", "after", "this", "operation" ]
8162bf95e66bb6f9916081338e6e2a6132faff75
https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/mkCollection.py#L336-L364
19,878
networks-lab/metaknowledge
metaknowledge/mkCollection.py
CollectionWithIDs.containsID
def containsID(self, idVal): """Checks if the collected items contains the give _idVal_ # Parameters _idVal_ : `str` > The queried id string # Returns `bool` > `True` if the item is in the collection """ for i in self: if i.id == idVal: return True return False
python
def containsID(self, idVal): for i in self: if i.id == idVal: return True return False
[ "def", "containsID", "(", "self", ",", "idVal", ")", ":", "for", "i", "in", "self", ":", "if", "i", ".", "id", "==", "idVal", ":", "return", "True", "return", "False" ]
Checks if the collected items contains the give _idVal_ # Parameters _idVal_ : `str` > The queried id string # Returns `bool` > `True` if the item is in the collection
[ "Checks", "if", "the", "collected", "items", "contains", "the", "give", "_idVal_" ]
8162bf95e66bb6f9916081338e6e2a6132faff75
https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/mkCollection.py#L420-L438
19,879
networks-lab/metaknowledge
metaknowledge/mkCollection.py
CollectionWithIDs.discardID
def discardID(self, idVal): """Checks if the collected items contains the give _idVal_ and discards it if it is found, will not raise an exception if item is not found # Parameters _idVal_ : `str` > The discarded id string """ for i in self: if i.id == idVal: self._collection.discard(i) return
python
def discardID(self, idVal): for i in self: if i.id == idVal: self._collection.discard(i) return
[ "def", "discardID", "(", "self", ",", "idVal", ")", ":", "for", "i", "in", "self", ":", "if", "i", ".", "id", "==", "idVal", ":", "self", ".", "_collection", ".", "discard", "(", "i", ")", "return" ]
Checks if the collected items contains the give _idVal_ and discards it if it is found, will not raise an exception if item is not found # Parameters _idVal_ : `str` > The discarded id string
[ "Checks", "if", "the", "collected", "items", "contains", "the", "give", "_idVal_", "and", "discards", "it", "if", "it", "is", "found", "will", "not", "raise", "an", "exception", "if", "item", "is", "not", "found" ]
8162bf95e66bb6f9916081338e6e2a6132faff75
https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/mkCollection.py#L440-L452
19,880
networks-lab/metaknowledge
metaknowledge/mkCollection.py
CollectionWithIDs.removeID
def removeID(self, idVal): """Checks if the collected items contains the give _idVal_ and removes it if it is found, will raise a `KeyError` if item is not found # Parameters _idVal_ : `str` > The removed id string """ for i in self: if i.id == idVal: self._collection.remove(i) return raise KeyError("A Record with the ID '{}' was not found in the RecordCollection: '{}'.".format(idVal, self))
python
def removeID(self, idVal): for i in self: if i.id == idVal: self._collection.remove(i) return raise KeyError("A Record with the ID '{}' was not found in the RecordCollection: '{}'.".format(idVal, self))
[ "def", "removeID", "(", "self", ",", "idVal", ")", ":", "for", "i", "in", "self", ":", "if", "i", ".", "id", "==", "idVal", ":", "self", ".", "_collection", ".", "remove", "(", "i", ")", "return", "raise", "KeyError", "(", "\"A Record with the ID '{}' ...
Checks if the collected items contains the give _idVal_ and removes it if it is found, will raise a `KeyError` if item is not found # Parameters _idVal_ : `str` > The removed id string
[ "Checks", "if", "the", "collected", "items", "contains", "the", "give", "_idVal_", "and", "removes", "it", "if", "it", "is", "found", "will", "raise", "a", "KeyError", "if", "item", "is", "not", "found" ]
8162bf95e66bb6f9916081338e6e2a6132faff75
https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/mkCollection.py#L454-L467
19,881
networks-lab/metaknowledge
metaknowledge/mkCollection.py
CollectionWithIDs.badEntries
def badEntries(self): """Creates a new collection of the same type with only the bad entries # Returns `CollectionWithIDs` > A collection of only the bad entries """ badEntries = set() for i in self: if i.bad: badEntries.add(i) return type(self)(badEntries, quietStart = True)
python
def badEntries(self): badEntries = set() for i in self: if i.bad: badEntries.add(i) return type(self)(badEntries, quietStart = True)
[ "def", "badEntries", "(", "self", ")", ":", "badEntries", "=", "set", "(", ")", "for", "i", "in", "self", ":", "if", "i", ".", "bad", ":", "badEntries", ".", "add", "(", "i", ")", "return", "type", "(", "self", ")", "(", "badEntries", ",", "quiet...
Creates a new collection of the same type with only the bad entries # Returns `CollectionWithIDs` > A collection of only the bad entries
[ "Creates", "a", "new", "collection", "of", "the", "same", "type", "with", "only", "the", "bad", "entries" ]
8162bf95e66bb6f9916081338e6e2a6132faff75
https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/mkCollection.py#L489-L502
19,882
networks-lab/metaknowledge
metaknowledge/mkCollection.py
CollectionWithIDs.dropBadEntries
def dropBadEntries(self): """Removes all the bad entries from the collection """ self._collection = set((i for i in self if not i.bad)) self.bad = False self.errors = {}
python
def dropBadEntries(self): self._collection = set((i for i in self if not i.bad)) self.bad = False self.errors = {}
[ "def", "dropBadEntries", "(", "self", ")", ":", "self", ".", "_collection", "=", "set", "(", "(", "i", "for", "i", "in", "self", "if", "not", "i", ".", "bad", ")", ")", "self", ".", "bad", "=", "False", "self", ".", "errors", "=", "{", "}" ]
Removes all the bad entries from the collection
[ "Removes", "all", "the", "bad", "entries", "from", "the", "collection" ]
8162bf95e66bb6f9916081338e6e2a6132faff75
https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/mkCollection.py#L504-L509
19,883
networks-lab/metaknowledge
metaknowledge/mkCollection.py
CollectionWithIDs.tags
def tags(self): """Creates a list of all the tags of the contained items # Returns `list [str]` > A list of all the tags """ tags = set() for i in self: tags |= set(i.keys()) return tags
python
def tags(self): tags = set() for i in self: tags |= set(i.keys()) return tags
[ "def", "tags", "(", "self", ")", ":", "tags", "=", "set", "(", ")", "for", "i", "in", "self", ":", "tags", "|=", "set", "(", "i", ".", "keys", "(", ")", ")", "return", "tags" ]
Creates a list of all the tags of the contained items # Returns `list [str]` > A list of all the tags
[ "Creates", "a", "list", "of", "all", "the", "tags", "of", "the", "contained", "items" ]
8162bf95e66bb6f9916081338e6e2a6132faff75
https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/mkCollection.py#L511-L523
19,884
networks-lab/metaknowledge
metaknowledge/mkCollection.py
CollectionWithIDs.rankedSeries
def rankedSeries(self, tag, outputFile = None, giveCounts = True, giveRanks = False, greatestFirst = True, pandasMode = True, limitTo = None): """Creates an pandas dict of the ordered list of all the values of _tag_, with and ranked by their number of occurrences. A list can also be returned with the the counts or ranks added or it can be written to a file. # Parameters _tag_ : `str` > The tag to be ranked _outputFile_ : `optional str` > A file path to write a csv with 2 columns, one the tag values the other their counts _giveCounts_ : `optional bool` > Default `True`, if `True` the retuned list will be composed of tuples the first values being the tag value and the second their counts. This supersedes _giveRanks_. _giveRanks_ : `optional bool` > Default `False`, if `True` and _giveCounts_ is `False`, the retuned list will be composed of tuples the first values being the tag value and the second their ranks. This is superseded by _giveCounts_. _greatestFirst_ : `optional bool` > Default `True`, if `True` the returned list will be ordered with the highest ranked value first, otherwise the lowest ranked will be first. _pandasMode_ : `optional bool` > Default `True`, if `True` a `dict` ready for pandas will be returned, otherwise a list _limitTo_ : `optional list[values]` > Default `None`, if a list is provided only those values in the list will be counted or returned # Returns `dict[str:list[value]] or list[str]` > A `dict` or `list` will be returned depending on if _pandasMode_ is `True` """ if giveRanks and giveCounts: raise mkException("rankedSeries cannot return counts and ranks only one of giveRanks or giveCounts can be True.") seriesDict = {} for R in self: #This should be faster than using get, since get is a wrapper for __getitem__ try: val = R[tag] except KeyError: continue if not isinstance(val, list): val = [val] for entry in val: if limitTo and entry not in limitTo: continue if entry in seriesDict: seriesDict[entry] += 1 else: seriesDict[entry] = 1 seriesList = sorted(seriesDict.items(), key = lambda x: x[1], reverse = greatestFirst) if outputFile is not None: with open(outputFile, 'w') as f: writer = csv.writer(f, dialect = 'excel') writer.writerow((str(tag), 'count')) writer.writerows(seriesList) if giveCounts and not pandasMode: return seriesList elif giveRanks or pandasMode: if not greatestFirst: seriesList.reverse() currentRank = 1 retList = [] panDict = {'entry' : [], 'count' : [], 'rank' : []} try: currentCount = seriesList[0][1] except IndexError: #Empty series so no need to loop pass else: for valString, count in seriesList: if currentCount > count: currentRank += 1 currentCount = count if pandasMode: panDict['entry'].append(valString) panDict['count'].append(count) panDict['rank'].append(currentRank) else: retList.append((valString, currentRank)) if not greatestFirst: retList.reverse() if pandasMode: return panDict else: return retList else: return [e for e,c in seriesList]
python
def rankedSeries(self, tag, outputFile = None, giveCounts = True, giveRanks = False, greatestFirst = True, pandasMode = True, limitTo = None): if giveRanks and giveCounts: raise mkException("rankedSeries cannot return counts and ranks only one of giveRanks or giveCounts can be True.") seriesDict = {} for R in self: #This should be faster than using get, since get is a wrapper for __getitem__ try: val = R[tag] except KeyError: continue if not isinstance(val, list): val = [val] for entry in val: if limitTo and entry not in limitTo: continue if entry in seriesDict: seriesDict[entry] += 1 else: seriesDict[entry] = 1 seriesList = sorted(seriesDict.items(), key = lambda x: x[1], reverse = greatestFirst) if outputFile is not None: with open(outputFile, 'w') as f: writer = csv.writer(f, dialect = 'excel') writer.writerow((str(tag), 'count')) writer.writerows(seriesList) if giveCounts and not pandasMode: return seriesList elif giveRanks or pandasMode: if not greatestFirst: seriesList.reverse() currentRank = 1 retList = [] panDict = {'entry' : [], 'count' : [], 'rank' : []} try: currentCount = seriesList[0][1] except IndexError: #Empty series so no need to loop pass else: for valString, count in seriesList: if currentCount > count: currentRank += 1 currentCount = count if pandasMode: panDict['entry'].append(valString) panDict['count'].append(count) panDict['rank'].append(currentRank) else: retList.append((valString, currentRank)) if not greatestFirst: retList.reverse() if pandasMode: return panDict else: return retList else: return [e for e,c in seriesList]
[ "def", "rankedSeries", "(", "self", ",", "tag", ",", "outputFile", "=", "None", ",", "giveCounts", "=", "True", ",", "giveRanks", "=", "False", ",", "greatestFirst", "=", "True", ",", "pandasMode", "=", "True", ",", "limitTo", "=", "None", ")", ":", "i...
Creates an pandas dict of the ordered list of all the values of _tag_, with and ranked by their number of occurrences. A list can also be returned with the the counts or ranks added or it can be written to a file. # Parameters _tag_ : `str` > The tag to be ranked _outputFile_ : `optional str` > A file path to write a csv with 2 columns, one the tag values the other their counts _giveCounts_ : `optional bool` > Default `True`, if `True` the retuned list will be composed of tuples the first values being the tag value and the second their counts. This supersedes _giveRanks_. _giveRanks_ : `optional bool` > Default `False`, if `True` and _giveCounts_ is `False`, the retuned list will be composed of tuples the first values being the tag value and the second their ranks. This is superseded by _giveCounts_. _greatestFirst_ : `optional bool` > Default `True`, if `True` the returned list will be ordered with the highest ranked value first, otherwise the lowest ranked will be first. _pandasMode_ : `optional bool` > Default `True`, if `True` a `dict` ready for pandas will be returned, otherwise a list _limitTo_ : `optional list[values]` > Default `None`, if a list is provided only those values in the list will be counted or returned # Returns `dict[str:list[value]] or list[str]` > A `dict` or `list` will be returned depending on if _pandasMode_ is `True`
[ "Creates", "an", "pandas", "dict", "of", "the", "ordered", "list", "of", "all", "the", "values", "of", "_tag_", "with", "and", "ranked", "by", "their", "number", "of", "occurrences", ".", "A", "list", "can", "also", "be", "returned", "with", "the", "the"...
8162bf95e66bb6f9916081338e6e2a6132faff75
https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/mkCollection.py#L569-L663
19,885
networks-lab/metaknowledge
metaknowledge/mkCollection.py
CollectionWithIDs.timeSeries
def timeSeries(self, tag = None, outputFile = None, giveYears = True, greatestFirst = True, limitTo = False, pandasMode = True): """Creates an pandas dict of the ordered list of all the values of _tag_, with and ranked by the year the occurred in, multiple year occurrences will create multiple entries. A list can also be returned with the the counts or years added or it can be written to a file. If no _tag_ is given the `Records` in the collection will be used # Parameters _tag_ : `optional str` > Default `None`, if provided the tag will be ordered _outputFile_ : `optional str` > A file path to write a csv with 2 columns, one the tag values the other their years _giveYears_ : `optional bool` > Default `True`, if `True` the retuned list will be composed of tuples the first values being the tag value and the second their years. _greatestFirst_ : `optional bool` > Default `True`, if `True` the returned list will be ordered with the highest years first, otherwise the lowest years will be first. _pandasMode_ : `optional bool` > Default `True`, if `True` a `dict` ready for pandas will be returned, otherwise a list _limitTo_ : `optional list[values]` > Default `None`, if a list is provided only those values in the list will be counted or returned # Returns `dict[str:list[value]] or list[str]` > A `dict` or `list` will be returned depending on if _pandasMode_ is `True` """ seriesDict = {} for R in self: #This should be faster than using get, since get is a wrapper for __getitem__ try: year = R['year'] except KeyError: continue if tag is None: seriesDict[R] = {year : 1} else: try: val = R[tag] except KeyError: continue if not isinstance(val, list): val = [val] for entry in val: if limitTo and entry not in limitTo: continue if entry in seriesDict: try: seriesDict[entry][year] += 1 except KeyError: seriesDict[entry][year] = 1 else: seriesDict[entry] = {year : 1} seriesList = [] for e, yd in seriesDict.items(): seriesList += [(e, y) for y in yd.keys()] seriesList = sorted(seriesList, key = lambda x: x[1], reverse = greatestFirst) if outputFile is not None: with open(outputFile, 'w') as f: writer = csv.writer(f, dialect = 'excel') writer.writerow((str(tag), 'years')) writer.writerows(((k,'|'.join((str(y) for y in v))) for k,v in seriesDict.items())) if pandasMode: panDict = {'entry' : [], 'count' : [], 'year' : []} for entry, year in seriesList: panDict['entry'].append(entry) panDict['year'].append(year) panDict['count'].append(seriesDict[entry][year]) return panDict elif giveYears: return seriesList else: return [e for e,c in seriesList]
python
def timeSeries(self, tag = None, outputFile = None, giveYears = True, greatestFirst = True, limitTo = False, pandasMode = True): seriesDict = {} for R in self: #This should be faster than using get, since get is a wrapper for __getitem__ try: year = R['year'] except KeyError: continue if tag is None: seriesDict[R] = {year : 1} else: try: val = R[tag] except KeyError: continue if not isinstance(val, list): val = [val] for entry in val: if limitTo and entry not in limitTo: continue if entry in seriesDict: try: seriesDict[entry][year] += 1 except KeyError: seriesDict[entry][year] = 1 else: seriesDict[entry] = {year : 1} seriesList = [] for e, yd in seriesDict.items(): seriesList += [(e, y) for y in yd.keys()] seriesList = sorted(seriesList, key = lambda x: x[1], reverse = greatestFirst) if outputFile is not None: with open(outputFile, 'w') as f: writer = csv.writer(f, dialect = 'excel') writer.writerow((str(tag), 'years')) writer.writerows(((k,'|'.join((str(y) for y in v))) for k,v in seriesDict.items())) if pandasMode: panDict = {'entry' : [], 'count' : [], 'year' : []} for entry, year in seriesList: panDict['entry'].append(entry) panDict['year'].append(year) panDict['count'].append(seriesDict[entry][year]) return panDict elif giveYears: return seriesList else: return [e for e,c in seriesList]
[ "def", "timeSeries", "(", "self", ",", "tag", "=", "None", ",", "outputFile", "=", "None", ",", "giveYears", "=", "True", ",", "greatestFirst", "=", "True", ",", "limitTo", "=", "False", ",", "pandasMode", "=", "True", ")", ":", "seriesDict", "=", "{",...
Creates an pandas dict of the ordered list of all the values of _tag_, with and ranked by the year the occurred in, multiple year occurrences will create multiple entries. A list can also be returned with the the counts or years added or it can be written to a file. If no _tag_ is given the `Records` in the collection will be used # Parameters _tag_ : `optional str` > Default `None`, if provided the tag will be ordered _outputFile_ : `optional str` > A file path to write a csv with 2 columns, one the tag values the other their years _giveYears_ : `optional bool` > Default `True`, if `True` the retuned list will be composed of tuples the first values being the tag value and the second their years. _greatestFirst_ : `optional bool` > Default `True`, if `True` the returned list will be ordered with the highest years first, otherwise the lowest years will be first. _pandasMode_ : `optional bool` > Default `True`, if `True` a `dict` ready for pandas will be returned, otherwise a list _limitTo_ : `optional list[values]` > Default `None`, if a list is provided only those values in the list will be counted or returned # Returns `dict[str:list[value]] or list[str]` > A `dict` or `list` will be returned depending on if _pandasMode_ is `True`
[ "Creates", "an", "pandas", "dict", "of", "the", "ordered", "list", "of", "all", "the", "values", "of", "_tag_", "with", "and", "ranked", "by", "the", "year", "the", "occurred", "in", "multiple", "year", "occurrences", "will", "create", "multiple", "entries",...
8162bf95e66bb6f9916081338e6e2a6132faff75
https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/mkCollection.py#L665-L747
19,886
networks-lab/metaknowledge
metaknowledge/mkCollection.py
CollectionWithIDs.cooccurrenceCounts
def cooccurrenceCounts(self, keyTag, *countedTags): """Counts the number of times values from any of the _countedTags_ occurs with _keyTag_. The counts are retuned as a dictionary with the values of _keyTag_ mapping to dictionaries with each of the _countedTags_ values mapping to thier counts. # Parameters _keyTag_ : `str` > The tag used as the key for the returned dictionary _*countedTags_ : `str, str, str, ...` > The tags used as the key for the returned dictionary's values # Returns `dict[str:dict[str:int]]` > The dictionary of counts """ if not isinstance(keyTag, str): raise TagError("'{}' is not a string it cannot be used as a tag.".format(keyTag)) if len(countedTags) < 1: TagError("You need to provide atleast one tag") for tag in countedTags: if not isinstance(tag, str): raise TagError("'{}' is not a string it cannot be used as a tag.".format(tag)) occurenceDict = {} progArgs = (0, "Starting to count the co-occurrences of '{}' and' {}'".format(keyTag, "','".join(countedTags))) if metaknowledge.VERBOSE_MODE: progKwargs = {'dummy' : False} else: progKwargs = {'dummy' : True} with _ProgressBar(*progArgs, **progKwargs) as PBar: for i, R in enumerate(self): PBar.updateVal(i / len(self), "Analyzing {}".format(R)) keyVal = R.get(keyTag) if keyVal is None: continue if not isinstance(keyVal, list): keyVal = [keyVal] for key in keyVal: if key not in occurenceDict: occurenceDict[key] = {} for tag in countedTags: tagval = R.get(tag) if tagval is None: continue if not isinstance(tagval, list): tagval = [tagval] for val in tagval: for key in keyVal: try: occurenceDict[key][val] += 1 except KeyError: occurenceDict[key][val] = 1 PBar.finish("Done extracting the co-occurrences of '{}' and '{}'".format(keyTag, "','".join(countedTags))) return occurenceDict
python
def cooccurrenceCounts(self, keyTag, *countedTags): if not isinstance(keyTag, str): raise TagError("'{}' is not a string it cannot be used as a tag.".format(keyTag)) if len(countedTags) < 1: TagError("You need to provide atleast one tag") for tag in countedTags: if not isinstance(tag, str): raise TagError("'{}' is not a string it cannot be used as a tag.".format(tag)) occurenceDict = {} progArgs = (0, "Starting to count the co-occurrences of '{}' and' {}'".format(keyTag, "','".join(countedTags))) if metaknowledge.VERBOSE_MODE: progKwargs = {'dummy' : False} else: progKwargs = {'dummy' : True} with _ProgressBar(*progArgs, **progKwargs) as PBar: for i, R in enumerate(self): PBar.updateVal(i / len(self), "Analyzing {}".format(R)) keyVal = R.get(keyTag) if keyVal is None: continue if not isinstance(keyVal, list): keyVal = [keyVal] for key in keyVal: if key not in occurenceDict: occurenceDict[key] = {} for tag in countedTags: tagval = R.get(tag) if tagval is None: continue if not isinstance(tagval, list): tagval = [tagval] for val in tagval: for key in keyVal: try: occurenceDict[key][val] += 1 except KeyError: occurenceDict[key][val] = 1 PBar.finish("Done extracting the co-occurrences of '{}' and '{}'".format(keyTag, "','".join(countedTags))) return occurenceDict
[ "def", "cooccurrenceCounts", "(", "self", ",", "keyTag", ",", "*", "countedTags", ")", ":", "if", "not", "isinstance", "(", "keyTag", ",", "str", ")", ":", "raise", "TagError", "(", "\"'{}' is not a string it cannot be used as a tag.\"", ".", "format", "(", "key...
Counts the number of times values from any of the _countedTags_ occurs with _keyTag_. The counts are retuned as a dictionary with the values of _keyTag_ mapping to dictionaries with each of the _countedTags_ values mapping to thier counts. # Parameters _keyTag_ : `str` > The tag used as the key for the returned dictionary _*countedTags_ : `str, str, str, ...` > The tags used as the key for the returned dictionary's values # Returns `dict[str:dict[str:int]]` > The dictionary of counts
[ "Counts", "the", "number", "of", "times", "values", "from", "any", "of", "the", "_countedTags_", "occurs", "with", "_keyTag_", ".", "The", "counts", "are", "retuned", "as", "a", "dictionary", "with", "the", "values", "of", "_keyTag_", "mapping", "to", "dicti...
8162bf95e66bb6f9916081338e6e2a6132faff75
https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/mkCollection.py#L749-L806
19,887
networks-lab/metaknowledge
metaknowledge/diffusion.py
makeNodeID
def makeNodeID(Rec, ndType, extras = None): """Helper to make a node ID, extras is currently not used""" if ndType == 'raw': recID = Rec else: recID = Rec.get(ndType) if recID is None: pass elif isinstance(recID, list): recID = tuple(recID) else: recID = recID extraDict = {} if extras: for tag in extras: if tag == "raw": extraDict['Tag'] = Rec else: extraDict['Tag'] = Rec.get(tag) return recID, extraDict
python
def makeNodeID(Rec, ndType, extras = None): if ndType == 'raw': recID = Rec else: recID = Rec.get(ndType) if recID is None: pass elif isinstance(recID, list): recID = tuple(recID) else: recID = recID extraDict = {} if extras: for tag in extras: if tag == "raw": extraDict['Tag'] = Rec else: extraDict['Tag'] = Rec.get(tag) return recID, extraDict
[ "def", "makeNodeID", "(", "Rec", ",", "ndType", ",", "extras", "=", "None", ")", ":", "if", "ndType", "==", "'raw'", ":", "recID", "=", "Rec", "else", ":", "recID", "=", "Rec", ".", "get", "(", "ndType", ")", "if", "recID", "is", "None", ":", "pa...
Helper to make a node ID, extras is currently not used
[ "Helper", "to", "make", "a", "node", "ID", "extras", "is", "currently", "not", "used" ]
8162bf95e66bb6f9916081338e6e2a6132faff75
https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/diffusion.py#L351-L370
19,888
networks-lab/metaknowledge
docs/mkdsupport.py
pandoc_process
def pandoc_process(app, what, name, obj, options, lines): """"Convert docstrings in Markdown into reStructureText using pandoc """ if not lines: return None input_format = app.config.mkdsupport_use_parser output_format = 'rst' # Since default encoding for sphinx.ext.autodoc is unicode and pypandoc.convert_text, which will always return a # unicode string, expects unicode or utf-8 encodes string, there is on need for dealing with coding text = SEP.join(lines) text = pypandoc.convert_text(text, output_format, format=input_format) # The 'lines' in Sphinx is a list of strings and the value should be changed del lines[:] lines.extend(text.split(SEP))
python
def pandoc_process(app, what, name, obj, options, lines): "if not lines: return None input_format = app.config.mkdsupport_use_parser output_format = 'rst' # Since default encoding for sphinx.ext.autodoc is unicode and pypandoc.convert_text, which will always return a # unicode string, expects unicode or utf-8 encodes string, there is on need for dealing with coding text = SEP.join(lines) text = pypandoc.convert_text(text, output_format, format=input_format) # The 'lines' in Sphinx is a list of strings and the value should be changed del lines[:] lines.extend(text.split(SEP))
[ "def", "pandoc_process", "(", "app", ",", "what", ",", "name", ",", "obj", ",", "options", ",", "lines", ")", ":", "if", "not", "lines", ":", "return", "None", "input_format", "=", "app", ".", "config", ".", "mkdsupport_use_parser", "output_format", "=", ...
Convert docstrings in Markdown into reStructureText using pandoc
[ "Convert", "docstrings", "in", "Markdown", "into", "reStructureText", "using", "pandoc" ]
8162bf95e66bb6f9916081338e6e2a6132faff75
https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/docs/mkdsupport.py#L26-L43
19,889
networks-lab/metaknowledge
metaknowledge/medline/tagProcessing/specialFunctions.py
beginningPage
def beginningPage(R): """As pages may not be given as numbers this is the most accurate this function can be""" p = R['PG'] if p.startswith('suppl '): p = p[6:] return p.split(' ')[0].split('-')[0].replace(';', '')
python
def beginningPage(R): p = R['PG'] if p.startswith('suppl '): p = p[6:] return p.split(' ')[0].split('-')[0].replace(';', '')
[ "def", "beginningPage", "(", "R", ")", ":", "p", "=", "R", "[", "'PG'", "]", "if", "p", ".", "startswith", "(", "'suppl '", ")", ":", "p", "=", "p", "[", "6", ":", "]", "return", "p", ".", "split", "(", "' '", ")", "[", "0", "]", ".", "spli...
As pages may not be given as numbers this is the most accurate this function can be
[ "As", "pages", "may", "not", "be", "given", "as", "numbers", "this", "is", "the", "most", "accurate", "this", "function", "can", "be" ]
8162bf95e66bb6f9916081338e6e2a6132faff75
https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/medline/tagProcessing/specialFunctions.py#L27-L32
19,890
networks-lab/metaknowledge
metaknowledge/mkRecord.py
Record.copy
def copy(self): """Correctly copies the `Record` # Returns `Record` > A completely decoupled copy of the original """ c = copy.copy(self) c._fieldDict = c._fieldDict.copy() return c
python
def copy(self): c = copy.copy(self) c._fieldDict = c._fieldDict.copy() return c
[ "def", "copy", "(", "self", ")", ":", "c", "=", "copy", ".", "copy", "(", "self", ")", "c", ".", "_fieldDict", "=", "c", ".", "_fieldDict", ".", "copy", "(", ")", "return", "c" ]
Correctly copies the `Record` # Returns `Record` > A completely decoupled copy of the original
[ "Correctly", "copies", "the", "Record" ]
8162bf95e66bb6f9916081338e6e2a6132faff75
https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/mkRecord.py#L202-L213
19,891
networks-lab/metaknowledge
metaknowledge/mkRecord.py
ExtendedRecord.values
def values(self, raw = False): """Like `values` for dicts but with a `raw` option # Parameters _raw_ : `optional [bool]` > Default `False`, if `True` the `ValuesView` contains the raw values # Returns `ValuesView` > The values of the record """ if raw: return self._fieldDict.values() else: return collections.abc.Mapping.values(self)
python
def values(self, raw = False): if raw: return self._fieldDict.values() else: return collections.abc.Mapping.values(self)
[ "def", "values", "(", "self", ",", "raw", "=", "False", ")", ":", "if", "raw", ":", "return", "self", ".", "_fieldDict", ".", "values", "(", ")", "else", ":", "return", "collections", ".", "abc", ".", "Mapping", ".", "values", "(", "self", ")" ]
Like `values` for dicts but with a `raw` option # Parameters _raw_ : `optional [bool]` > Default `False`, if `True` the `ValuesView` contains the raw values # Returns `ValuesView` > The values of the record
[ "Like", "values", "for", "dicts", "but", "with", "a", "raw", "option" ]
8162bf95e66bb6f9916081338e6e2a6132faff75
https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/mkRecord.py#L402-L420
19,892
networks-lab/metaknowledge
metaknowledge/mkRecord.py
ExtendedRecord.items
def items(self, raw = False): """Like `items` for dicts but with a `raw` option # Parameters _raw_ : `optional [bool]` > Default `False`, if `True` the `KeysView` contains the raw values as the values # Returns `KeysView` > The key-value pairs of the record """ if raw: return self._fieldDict.items() else: return collections.abc.Mapping.items(self)
python
def items(self, raw = False): if raw: return self._fieldDict.items() else: return collections.abc.Mapping.items(self)
[ "def", "items", "(", "self", ",", "raw", "=", "False", ")", ":", "if", "raw", ":", "return", "self", ".", "_fieldDict", ".", "items", "(", ")", "else", ":", "return", "collections", ".", "abc", ".", "Mapping", ".", "items", "(", "self", ")" ]
Like `items` for dicts but with a `raw` option # Parameters _raw_ : `optional [bool]` > Default `False`, if `True` the `KeysView` contains the raw values as the values # Returns `KeysView` > The key-value pairs of the record
[ "Like", "items", "for", "dicts", "but", "with", "a", "raw", "option" ]
8162bf95e66bb6f9916081338e6e2a6132faff75
https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/mkRecord.py#L424-L442
19,893
networks-lab/metaknowledge
metaknowledge/mkRecord.py
ExtendedRecord.getCitations
def getCitations(self, field = None, values = None, pandasFriendly = True): """Creates a pandas ready dict with each row a different citation and columns containing the original string, year, journal and author's name. There are also options to filter the output citations with _field_ and _values_ # Parameters _field_ : `optional str` > Default `None`, if given all citations missing the named field will be dropped. _values_ : `optional str or list[str]` > Default `None`, if _field_ is also given only those citations with one of the strings given in _values_ will be included. > e.g. to get only citations from 1990 or 1991: `field = year, values = [1991, 1990]` _pandasFriendly_ : `optional bool` > Default `True`, if `False` a list of the citations will be returned instead of the more complicated pandas dict # Returns `dict` > A pandas ready dict with all the citations """ retCites = [] if values is not None: if isinstance(values, (str, int, float)) or not isinstance(values, collections.abc.Container): values = [values] if field is not None: for cite in self.get('citations', []): try: targetVal = getattr(cite, field) if values is None or targetVal in values: retCites.append(cite) except AttributeError: pass else: retCites = self.get('citations', []) if pandasFriendly: return _pandasPrep(retCites, False) return retCites
python
def getCitations(self, field = None, values = None, pandasFriendly = True): retCites = [] if values is not None: if isinstance(values, (str, int, float)) or not isinstance(values, collections.abc.Container): values = [values] if field is not None: for cite in self.get('citations', []): try: targetVal = getattr(cite, field) if values is None or targetVal in values: retCites.append(cite) except AttributeError: pass else: retCites = self.get('citations', []) if pandasFriendly: return _pandasPrep(retCites, False) return retCites
[ "def", "getCitations", "(", "self", ",", "field", "=", "None", ",", "values", "=", "None", ",", "pandasFriendly", "=", "True", ")", ":", "retCites", "=", "[", "]", "if", "values", "is", "not", "None", ":", "if", "isinstance", "(", "values", ",", "(",...
Creates a pandas ready dict with each row a different citation and columns containing the original string, year, journal and author's name. There are also options to filter the output citations with _field_ and _values_ # Parameters _field_ : `optional str` > Default `None`, if given all citations missing the named field will be dropped. _values_ : `optional str or list[str]` > Default `None`, if _field_ is also given only those citations with one of the strings given in _values_ will be included. > e.g. to get only citations from 1990 or 1991: `field = year, values = [1991, 1990]` _pandasFriendly_ : `optional bool` > Default `True`, if `False` a list of the citations will be returned instead of the more complicated pandas dict # Returns `dict` > A pandas ready dict with all the citations
[ "Creates", "a", "pandas", "ready", "dict", "with", "each", "row", "a", "different", "citation", "and", "columns", "containing", "the", "original", "string", "year", "journal", "and", "author", "s", "name", "." ]
8162bf95e66bb6f9916081338e6e2a6132faff75
https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/mkRecord.py#L546-L589
19,894
networks-lab/metaknowledge
metaknowledge/mkRecord.py
ExtendedRecord.subDict
def subDict(self, tags, raw = False): """Creates a dict of values of _tags_ from the Record. The tags are the keys and the values are the values. If the tag is missing the value will be `None`. # Parameters _tags_ : `list[str]` > The list of tags requested _raw_ : `optional [bool]` >default `False` if `True` the retuned values of the dict will be unprocessed # Returns `dict` > A dictionary with the keys _tags_ and the values from the record """ retDict = {} for tag in tags: retDict[tag] = self.get(tag, raw = raw) return retDict
python
def subDict(self, tags, raw = False): retDict = {} for tag in tags: retDict[tag] = self.get(tag, raw = raw) return retDict
[ "def", "subDict", "(", "self", ",", "tags", ",", "raw", "=", "False", ")", ":", "retDict", "=", "{", "}", "for", "tag", "in", "tags", ":", "retDict", "[", "tag", "]", "=", "self", ".", "get", "(", "tag", ",", "raw", "=", "raw", ")", "return", ...
Creates a dict of values of _tags_ from the Record. The tags are the keys and the values are the values. If the tag is missing the value will be `None`. # Parameters _tags_ : `list[str]` > The list of tags requested _raw_ : `optional [bool]` >default `False` if `True` the retuned values of the dict will be unprocessed # Returns `dict` > A dictionary with the keys _tags_ and the values from the record
[ "Creates", "a", "dict", "of", "values", "of", "_tags_", "from", "the", "Record", ".", "The", "tags", "are", "the", "keys", "and", "the", "values", "are", "the", "values", ".", "If", "the", "tag", "is", "missing", "the", "value", "will", "be", "None", ...
8162bf95e66bb6f9916081338e6e2a6132faff75
https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/mkRecord.py#L591-L613
19,895
networks-lab/metaknowledge
metaknowledge/mkRecord.py
ExtendedRecord.authGenders
def authGenders(self, countsOnly = False, fractionsMode = False, _countsTuple = False): """Creates a dict mapping `'Male'`, `'Female'` and `'Unknown'` to lists of the names of all the authors. # Parameters _countsOnly_ : `optional bool` > Default `False`, if `True` the counts (lengths of the lists) will be given instead of the lists of names _fractionsMode_ : `optional bool` > Default `False`, if `True` the fraction counts (lengths of the lists divided by the total number of authors) will be given instead of the lists of names. This supersedes _countsOnly_ # Returns `dict[str:str or int]` > The mapping of genders to author's names or counts """ authDict = recordGenders(self) if _countsTuple or countsOnly or fractionsMode: rawList = list(authDict.values()) countsList = [] for k in ('Male','Female','Unknown'): countsList.append(rawList.count(k)) if fractionsMode: tot = sum(countsList) for i in range(3): countsList.append(countsList.pop(0) / tot) if _countsTuple: return tuple(countsList) else: return {'Male' : countsList[0], 'Female' : countsList[1], 'Unknown' : countsList[2]} else: return authDict
python
def authGenders(self, countsOnly = False, fractionsMode = False, _countsTuple = False): authDict = recordGenders(self) if _countsTuple or countsOnly or fractionsMode: rawList = list(authDict.values()) countsList = [] for k in ('Male','Female','Unknown'): countsList.append(rawList.count(k)) if fractionsMode: tot = sum(countsList) for i in range(3): countsList.append(countsList.pop(0) / tot) if _countsTuple: return tuple(countsList) else: return {'Male' : countsList[0], 'Female' : countsList[1], 'Unknown' : countsList[2]} else: return authDict
[ "def", "authGenders", "(", "self", ",", "countsOnly", "=", "False", ",", "fractionsMode", "=", "False", ",", "_countsTuple", "=", "False", ")", ":", "authDict", "=", "recordGenders", "(", "self", ")", "if", "_countsTuple", "or", "countsOnly", "or", "fraction...
Creates a dict mapping `'Male'`, `'Female'` and `'Unknown'` to lists of the names of all the authors. # Parameters _countsOnly_ : `optional bool` > Default `False`, if `True` the counts (lengths of the lists) will be given instead of the lists of names _fractionsMode_ : `optional bool` > Default `False`, if `True` the fraction counts (lengths of the lists divided by the total number of authors) will be given instead of the lists of names. This supersedes _countsOnly_ # Returns `dict[str:str or int]` > The mapping of genders to author's names or counts
[ "Creates", "a", "dict", "mapping", "Male", "Female", "and", "Unknown", "to", "lists", "of", "the", "names", "of", "all", "the", "authors", "." ]
8162bf95e66bb6f9916081338e6e2a6132faff75
https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/mkRecord.py#L660-L695
19,896
networks-lab/metaknowledge
metaknowledge/proquest/proQuestHandlers.py
proQuestParser
def proQuestParser(proFile): """Parses a ProQuest file, _proFile_, to extract the individual entries. A ProQuest file has three sections, first a list of the contained entries, second the full metadata and finally a bibtex formatted entry for the record. This parser only uses the first two as the bibtex contains no information the second section does not. Also, the first section is only used to verify the second section. The returned [ProQuestRecord](../classes/ProQuestRecord.html#metaknowledge.proquest.ProQuestRecord) contains the data from the second section, with the same key strings as ProQuest uses and the unlabeled sections are called in order, `'Name'`, `'Author'` and `'url'`. # Parameters _proFile_ : `str` > A path to a valid ProQuest file, use [isProQuestFile](#metaknowledge.proquest.proQuestHandlers.isProQuestFile) to verify # Returns `set[ProQuestRecord]` > Records for each of the entries """ #assumes the file is ProQuest nameDict = {} recSet = set() error = None lineNum = 0 try: with open(proFile, 'r', encoding = 'utf-8') as openfile: f = enumerate(openfile, start = 1) for i in range(12): lineNum, line = next(f) # f is file so it *should* end, or at least cause a parser error eventually while True: lineNum, line = next(f) lineNum, line = next(f) if line == 'Bibliography\n': for i in range(3): lineNum, line = next(f) break else: s = line.split('. ') nameDict[int(s[0])] = '. '.join(s[1:])[:-1] while True: #import pdb; pdb.set_trace() lineNum, line = next(f) if line == 'Bibliography\n': break elif line.startswith('Document '): n = int(line[9:].split(' of ')[0]) R = ProQuestRecord(f, sFile = proFile, sLine = lineNum) if R.get('Title') != nameDict[n]: error = BadProQuestFile("The numbering of the titles at the beginning of the file does not match the records inside. Line {} has a record titled '{}' with number {}, the name should be '{}'.".format(lineNum, R.get('Title', "TITLE MISSING"), n, nameDict[n])) raise StopIteration recSet.add(R) lineNum, line = next(f) else: #Parsing failed error = BadProQuestFile("The file '{}' has parts of it that are unparsable starting at line: {}. It is likely that the seperators between the records are incorrect".format(proFile, lineNum)) raise StopIteration except (UnicodeDecodeError, StopIteration, ValueError) as e: if error is None: error = BadProQuestFile("The file '{}' has parts of it that are unparsable starting at line: {}.\nThe error was: '{}'".format(proFile, lineNum, e)) return recSet, error
python
def proQuestParser(proFile): #assumes the file is ProQuest nameDict = {} recSet = set() error = None lineNum = 0 try: with open(proFile, 'r', encoding = 'utf-8') as openfile: f = enumerate(openfile, start = 1) for i in range(12): lineNum, line = next(f) # f is file so it *should* end, or at least cause a parser error eventually while True: lineNum, line = next(f) lineNum, line = next(f) if line == 'Bibliography\n': for i in range(3): lineNum, line = next(f) break else: s = line.split('. ') nameDict[int(s[0])] = '. '.join(s[1:])[:-1] while True: #import pdb; pdb.set_trace() lineNum, line = next(f) if line == 'Bibliography\n': break elif line.startswith('Document '): n = int(line[9:].split(' of ')[0]) R = ProQuestRecord(f, sFile = proFile, sLine = lineNum) if R.get('Title') != nameDict[n]: error = BadProQuestFile("The numbering of the titles at the beginning of the file does not match the records inside. Line {} has a record titled '{}' with number {}, the name should be '{}'.".format(lineNum, R.get('Title', "TITLE MISSING"), n, nameDict[n])) raise StopIteration recSet.add(R) lineNum, line = next(f) else: #Parsing failed error = BadProQuestFile("The file '{}' has parts of it that are unparsable starting at line: {}. It is likely that the seperators between the records are incorrect".format(proFile, lineNum)) raise StopIteration except (UnicodeDecodeError, StopIteration, ValueError) as e: if error is None: error = BadProQuestFile("The file '{}' has parts of it that are unparsable starting at line: {}.\nThe error was: '{}'".format(proFile, lineNum, e)) return recSet, error
[ "def", "proQuestParser", "(", "proFile", ")", ":", "#assumes the file is ProQuest", "nameDict", "=", "{", "}", "recSet", "=", "set", "(", ")", "error", "=", "None", "lineNum", "=", "0", "try", ":", "with", "open", "(", "proFile", ",", "'r'", ",", "encodi...
Parses a ProQuest file, _proFile_, to extract the individual entries. A ProQuest file has three sections, first a list of the contained entries, second the full metadata and finally a bibtex formatted entry for the record. This parser only uses the first two as the bibtex contains no information the second section does not. Also, the first section is only used to verify the second section. The returned [ProQuestRecord](../classes/ProQuestRecord.html#metaknowledge.proquest.ProQuestRecord) contains the data from the second section, with the same key strings as ProQuest uses and the unlabeled sections are called in order, `'Name'`, `'Author'` and `'url'`. # Parameters _proFile_ : `str` > A path to a valid ProQuest file, use [isProQuestFile](#metaknowledge.proquest.proQuestHandlers.isProQuestFile) to verify # Returns `set[ProQuestRecord]` > Records for each of the entries
[ "Parses", "a", "ProQuest", "file", "_proFile_", "to", "extract", "the", "individual", "entries", "." ]
8162bf95e66bb6f9916081338e6e2a6132faff75
https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/proquest/proQuestHandlers.py#L42-L100
19,897
networks-lab/metaknowledge
metaknowledge/grants/nsfGrant.py
NSFGrant.getInvestigators
def getInvestigators(self, tags = None, seperator = ";", _getTag = False): """Returns a list of the names of investigators. The optional arguments are ignored. # Returns `list [str]` > A list of all the found investigator's names """ if tags is None: tags = ['Investigator'] elif isinstance(tags, str): tags = ['Investigator', tags] else: tags.append('Investigator') return super().getInvestigators(tags = tags, seperator = seperator, _getTag = _getTag)
python
def getInvestigators(self, tags = None, seperator = ";", _getTag = False): if tags is None: tags = ['Investigator'] elif isinstance(tags, str): tags = ['Investigator', tags] else: tags.append('Investigator') return super().getInvestigators(tags = tags, seperator = seperator, _getTag = _getTag)
[ "def", "getInvestigators", "(", "self", ",", "tags", "=", "None", ",", "seperator", "=", "\";\"", ",", "_getTag", "=", "False", ")", ":", "if", "tags", "is", "None", ":", "tags", "=", "[", "'Investigator'", "]", "elif", "isinstance", "(", "tags", ",", ...
Returns a list of the names of investigators. The optional arguments are ignored. # Returns `list [str]` > A list of all the found investigator's names
[ "Returns", "a", "list", "of", "the", "names", "of", "investigators", ".", "The", "optional", "arguments", "are", "ignored", "." ]
8162bf95e66bb6f9916081338e6e2a6132faff75
https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/grants/nsfGrant.py#L22-L37
19,898
networks-lab/metaknowledge
metaknowledge/genders/nameGender.py
nameStringGender
def nameStringGender(s, noExcept = False): """Expects `first, last`""" global mappingDict try: first = s.split(', ')[1].split(' ')[0].title() except IndexError: if noExcept: return 'Unknown' else: return GenderException("The given String: '{}' does not have a last name, first name pair in with a ', ' seperation.".format(s)) if mappingDict is None: mappingDict = getMapping() return mappingDict.get(first, 'Unknown')
python
def nameStringGender(s, noExcept = False): global mappingDict try: first = s.split(', ')[1].split(' ')[0].title() except IndexError: if noExcept: return 'Unknown' else: return GenderException("The given String: '{}' does not have a last name, first name pair in with a ', ' seperation.".format(s)) if mappingDict is None: mappingDict = getMapping() return mappingDict.get(first, 'Unknown')
[ "def", "nameStringGender", "(", "s", ",", "noExcept", "=", "False", ")", ":", "global", "mappingDict", "try", ":", "first", "=", "s", ".", "split", "(", "', '", ")", "[", "1", "]", ".", "split", "(", "' '", ")", "[", "0", "]", ".", "title", "(", ...
Expects `first, last`
[ "Expects", "first", "last" ]
8162bf95e66bb6f9916081338e6e2a6132faff75
https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/genders/nameGender.py#L54-L66
19,899
networks-lab/metaknowledge
metaknowledge/journalAbbreviations/backend.py
j9urlGenerator
def j9urlGenerator(nameDict = False): """How to get all the urls for the WOS Journal Title Abbreviations. Each is varies by only a few characters. These are the currently in use urls they may change. They are of the form: > "https://images.webofknowledge.com/images/help/WOS/{VAL}_abrvjt.html" > Where {VAL} is a capital letter or the string "0-9" # Returns `list[str]` > A list of all the url's strings """ start = "https://images.webofknowledge.com/images/help/WOS/" end = "_abrvjt.html" if nameDict: urls = {"0-9" : start + "0-9" + end} for c in string.ascii_uppercase: urls[c] = start + c + end else: urls = [start + "0-9" + end] for c in string.ascii_uppercase: urls.append(start + c + end) return urls
python
def j9urlGenerator(nameDict = False): start = "https://images.webofknowledge.com/images/help/WOS/" end = "_abrvjt.html" if nameDict: urls = {"0-9" : start + "0-9" + end} for c in string.ascii_uppercase: urls[c] = start + c + end else: urls = [start + "0-9" + end] for c in string.ascii_uppercase: urls.append(start + c + end) return urls
[ "def", "j9urlGenerator", "(", "nameDict", "=", "False", ")", ":", "start", "=", "\"https://images.webofknowledge.com/images/help/WOS/\"", "end", "=", "\"_abrvjt.html\"", "if", "nameDict", ":", "urls", "=", "{", "\"0-9\"", ":", "start", "+", "\"0-9\"", "+", "end", ...
How to get all the urls for the WOS Journal Title Abbreviations. Each is varies by only a few characters. These are the currently in use urls they may change. They are of the form: > "https://images.webofknowledge.com/images/help/WOS/{VAL}_abrvjt.html" > Where {VAL} is a capital letter or the string "0-9" # Returns `list[str]` > A list of all the url's strings
[ "How", "to", "get", "all", "the", "urls", "for", "the", "WOS", "Journal", "Title", "Abbreviations", ".", "Each", "is", "varies", "by", "only", "a", "few", "characters", ".", "These", "are", "the", "currently", "in", "use", "urls", "they", "may", "change"...
8162bf95e66bb6f9916081338e6e2a6132faff75
https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/journalAbbreviations/backend.py#L14-L38