hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 958k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 2
classes | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c47596b8a5035d0ebdff520ba15dc9448d843dc | 7,887 | py | Python | sphinx/builders/singlehtml.py | choldgraf/sphinx | 97d2f9fbf8eab478908af981c1a36aed1d75a4ce | [
"BSD-2-Clause"
] | null | null | null | sphinx/builders/singlehtml.py | choldgraf/sphinx | 97d2f9fbf8eab478908af981c1a36aed1d75a4ce | [
"BSD-2-Clause"
] | null | null | null | sphinx/builders/singlehtml.py | choldgraf/sphinx | 97d2f9fbf8eab478908af981c1a36aed1d75a4ce | [
"BSD-2-Clause"
] | null | null | null | """
sphinx.builders.singlehtml
~~~~~~~~~~~~~~~~~~~~~~~~~~
Single HTML builders.
:copyright: Copyright 2007-2020 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from os import path
from typing import Any, Dict, List, Tuple, Union
from docutils import nodes
from docutils.nodes import Node
from sphinx.application import Sphinx
from sphinx.builders.html import StandaloneHTMLBuilder
from sphinx.deprecation import RemovedInSphinx40Warning, deprecated_alias
from sphinx.environment.adapters.toctree import TocTree
from sphinx.locale import __
from sphinx.util import logging
from sphinx.util import progress_message
from sphinx.util.console import darkgreen # type: ignore
from sphinx.util.nodes import inline_all_toctrees
logger = logging.getLogger(__name__)
class SingleFileHTMLBuilder(StandaloneHTMLBuilder):
"""
A StandaloneHTMLBuilder subclass that puts the whole document tree on one
HTML page.
"""
name = 'singlehtml'
epilog = __('The HTML page is in %(outdir)s.')
copysource = False
def get_outdated_docs(self) -> Union[str, List[str]]: # type: ignore
return 'all documents'
def get_target_uri(self, docname: str, typ: str = None) -> str:
if docname in self.env.all_docs:
# all references are on the same page...
return self.config.master_doc + self.out_suffix + \
'#document-' + docname
else:
# chances are this is a html_additional_page
return docname + self.out_suffix
def get_relative_uri(self, from_: str, to: str, typ: str = None) -> str:
# ignore source
return self.get_target_uri(to, typ)
def fix_refuris(self, tree: Node) -> None:
# fix refuris with double anchor
fname = self.config.master_doc + self.out_suffix
for refnode in tree.traverse(nodes.reference):
if 'refuri' not in refnode:
continue
refuri = refnode['refuri']
hashindex = refuri.find('#')
if hashindex < 0:
continue
hashindex = refuri.find('#', hashindex + 1)
if hashindex >= 0:
refnode['refuri'] = fname + refuri[hashindex:]
def _get_local_toctree(self, docname: str, collapse: bool = True, **kwds: Any) -> str:
if 'includehidden' not in kwds:
kwds['includehidden'] = False
toctree = TocTree(self.env).get_toctree_for(docname, self, collapse, **kwds)
if toctree is not None:
self.fix_refuris(toctree)
return self.render_partial(toctree)['fragment']
def assemble_doctree(self) -> nodes.document:
master = self.config.master_doc
tree = self.env.get_doctree(master)
tree = inline_all_toctrees(self, set(), master, tree, darkgreen, [master])
tree['docname'] = master
self.env.resolve_references(tree, master, self)
self.fix_refuris(tree)
return tree
def assemble_toc_secnumbers(self) -> Dict[str, Dict[str, Tuple[int, ...]]]:
# Assemble toc_secnumbers to resolve section numbers on SingleHTML.
# Merge all secnumbers to single secnumber.
#
# Note: current Sphinx has refid confliction in singlehtml mode.
# To avoid the problem, it replaces key of secnumbers to
# tuple of docname and refid.
#
# There are related codes in inline_all_toctres() and
# HTMLTranslter#add_secnumber().
new_secnumbers = {} # type: Dict[str, Tuple[int, ...]]
for docname, secnums in self.env.toc_secnumbers.items():
for id, secnum in secnums.items():
alias = "%s/%s" % (docname, id)
new_secnumbers[alias] = secnum
return {self.config.master_doc: new_secnumbers}
def assemble_toc_fignumbers(self) -> Dict[str, Dict[str, Dict[str, Tuple[int, ...]]]]:
# Assemble toc_fignumbers to resolve figure numbers on SingleHTML.
# Merge all fignumbers to single fignumber.
#
# Note: current Sphinx has refid confliction in singlehtml mode.
# To avoid the problem, it replaces key of secnumbers to
# tuple of docname and refid.
#
# There are related codes in inline_all_toctres() and
# HTMLTranslter#add_fignumber().
new_fignumbers = {} # type: Dict[str, Dict[str, Tuple[int, ...]]]
# {'foo': {'figure': {'id2': (2,), 'id1': (1,)}}, 'bar': {'figure': {'id1': (3,)}}}
for docname, fignumlist in self.env.toc_fignumbers.items():
for figtype, fignums in fignumlist.items():
alias = "%s/%s" % (docname, figtype)
new_fignumbers.setdefault(alias, {})
for id, fignum in fignums.items():
new_fignumbers[alias][id] = fignum
return {self.config.master_doc: new_fignumbers}
def get_doc_context(self, docname: str, body: str, metatags: str) -> Dict:
# no relation links...
toctree = TocTree(self.env).get_toctree_for(self.config.master_doc, self, False)
# if there is no toctree, toc is None
if toctree:
self.fix_refuris(toctree)
toc = self.render_partial(toctree)['fragment']
display_toc = True
else:
toc = ''
display_toc = False
return {
'parents': [],
'prev': None,
'next': None,
'docstitle': None,
'title': self.config.html_title,
'meta': None,
'body': body,
'metatags': metatags,
'rellinks': [],
'sourcename': '',
'toc': toc,
'display_toc': display_toc,
}
def write(self, *ignored: Any) -> None:
docnames = self.env.all_docs
with progress_message(__('preparing documents')):
self.prepare_writing(docnames) # type: ignore
with progress_message(__('assembling single document')):
doctree = self.assemble_doctree()
self.env.toc_secnumbers = self.assemble_toc_secnumbers()
self.env.toc_fignumbers = self.assemble_toc_fignumbers()
with progress_message(__('writing')):
self.write_doc_serialized(self.config.master_doc, doctree)
self.write_doc(self.config.master_doc, doctree)
def finish(self) -> None:
self.write_additional_files()
self.copy_image_files()
self.copy_download_files()
self.copy_static_files()
self.copy_extra_files()
self.write_buildinfo()
self.dump_inventory()
@progress_message(__('writing additional files'))
def write_additional_files(self) -> None:
# no indices or search pages are supported
# additional pages from conf.py
for pagename, template in self.config.html_additional_pages.items():
logger.info(' ' + pagename, nonl=True)
self.handle_page(pagename, {}, template)
if self.config.html_use_opensearch:
logger.info(' opensearch', nonl=True)
fn = path.join(self.outdir, '_static', 'opensearch.xml')
self.handle_page('opensearch', {}, 'opensearch.xml', outfilename=fn)
# for compatibility
deprecated_alias('sphinx.builders.html',
{
'SingleFileHTMLBuilder': SingleFileHTMLBuilder,
},
RemovedInSphinx40Warning)
def setup(app: Sphinx) -> Dict[str, Any]:
app.setup_extension('sphinx.builders.html')
app.add_builder(SingleFileHTMLBuilder)
app.add_config_value('singlehtml_sidebars', lambda self: self.html_sidebars, 'html')
return {
'version': 'builtin',
'parallel_read_safe': True,
'parallel_write_safe': True,
}
| 37.557143 | 91 | 0.613034 |
from os import path
from typing import Any, Dict, List, Tuple, Union
from docutils import nodes
from docutils.nodes import Node
from sphinx.application import Sphinx
from sphinx.builders.html import StandaloneHTMLBuilder
from sphinx.deprecation import RemovedInSphinx40Warning, deprecated_alias
from sphinx.environment.adapters.toctree import TocTree
from sphinx.locale import __
from sphinx.util import logging
from sphinx.util import progress_message
from sphinx.util.console import darkgreen
from sphinx.util.nodes import inline_all_toctrees
logger = logging.getLogger(__name__)
class SingleFileHTMLBuilder(StandaloneHTMLBuilder):
name = 'singlehtml'
epilog = __('The HTML page is in %(outdir)s.')
copysource = False
def get_outdated_docs(self) -> Union[str, List[str]]:
return 'all documents'
def get_target_uri(self, docname: str, typ: str = None) -> str:
if docname in self.env.all_docs:
return self.config.master_doc + self.out_suffix + \
'#document-' + docname
else:
return docname + self.out_suffix
def get_relative_uri(self, from_: str, to: str, typ: str = None) -> str:
return self.get_target_uri(to, typ)
def fix_refuris(self, tree: Node) -> None:
fname = self.config.master_doc + self.out_suffix
for refnode in tree.traverse(nodes.reference):
if 'refuri' not in refnode:
continue
refuri = refnode['refuri']
hashindex = refuri.find('#')
if hashindex < 0:
continue
hashindex = refuri.find('#', hashindex + 1)
if hashindex >= 0:
refnode['refuri'] = fname + refuri[hashindex:]
def _get_local_toctree(self, docname: str, collapse: bool = True, **kwds: Any) -> str:
if 'includehidden' not in kwds:
kwds['includehidden'] = False
toctree = TocTree(self.env).get_toctree_for(docname, self, collapse, **kwds)
if toctree is not None:
self.fix_refuris(toctree)
return self.render_partial(toctree)['fragment']
def assemble_doctree(self) -> nodes.document:
master = self.config.master_doc
tree = self.env.get_doctree(master)
tree = inline_all_toctrees(self, set(), master, tree, darkgreen, [master])
tree['docname'] = master
self.env.resolve_references(tree, master, self)
self.fix_refuris(tree)
return tree
def assemble_toc_secnumbers(self) -> Dict[str, Dict[str, Tuple[int, ...]]]:
umbers = {}
for docname, secnums in self.env.toc_secnumbers.items():
for id, secnum in secnums.items():
alias = "%s/%s" % (docname, id)
new_secnumbers[alias] = secnum
return {self.config.master_doc: new_secnumbers}
def assemble_toc_fignumbers(self) -> Dict[str, Dict[str, Dict[str, Tuple[int, ...]]]]:
umbers = {}
for docname, fignumlist in self.env.toc_fignumbers.items():
for figtype, fignums in fignumlist.items():
alias = "%s/%s" % (docname, figtype)
new_fignumbers.setdefault(alias, {})
for id, fignum in fignums.items():
new_fignumbers[alias][id] = fignum
return {self.config.master_doc: new_fignumbers}
def get_doc_context(self, docname: str, body: str, metatags: str) -> Dict:
toctree = TocTree(self.env).get_toctree_for(self.config.master_doc, self, False)
if toctree:
self.fix_refuris(toctree)
toc = self.render_partial(toctree)['fragment']
display_toc = True
else:
toc = ''
display_toc = False
return {
'parents': [],
'prev': None,
'next': None,
'docstitle': None,
'title': self.config.html_title,
'meta': None,
'body': body,
'metatags': metatags,
'rellinks': [],
'sourcename': '',
'toc': toc,
'display_toc': display_toc,
}
def write(self, *ignored: Any) -> None:
docnames = self.env.all_docs
with progress_message(__('preparing documents')):
self.prepare_writing(docnames)
with progress_message(__('assembling single document')):
doctree = self.assemble_doctree()
self.env.toc_secnumbers = self.assemble_toc_secnumbers()
self.env.toc_fignumbers = self.assemble_toc_fignumbers()
with progress_message(__('writing')):
self.write_doc_serialized(self.config.master_doc, doctree)
self.write_doc(self.config.master_doc, doctree)
def finish(self) -> None:
self.write_additional_files()
self.copy_image_files()
self.copy_download_files()
self.copy_static_files()
self.copy_extra_files()
self.write_buildinfo()
self.dump_inventory()
@progress_message(__('writing additional files'))
def write_additional_files(self) -> None:
for pagename, template in self.config.html_additional_pages.items():
logger.info(' ' + pagename, nonl=True)
self.handle_page(pagename, {}, template)
if self.config.html_use_opensearch:
logger.info(' opensearch', nonl=True)
fn = path.join(self.outdir, '_static', 'opensearch.xml')
self.handle_page('opensearch', {}, 'opensearch.xml', outfilename=fn)
deprecated_alias('sphinx.builders.html',
{
'SingleFileHTMLBuilder': SingleFileHTMLBuilder,
},
RemovedInSphinx40Warning)
def setup(app: Sphinx) -> Dict[str, Any]:
app.setup_extension('sphinx.builders.html')
app.add_builder(SingleFileHTMLBuilder)
app.add_config_value('singlehtml_sidebars', lambda self: self.html_sidebars, 'html')
return {
'version': 'builtin',
'parallel_read_safe': True,
'parallel_write_safe': True,
}
| true | true |
1c4759c0cc109175a0ac69b07dc02aafad9b54f6 | 26,867 | py | Python | src/test/isolation2/sql_isolation_testcase.py | kalensk/gpdb | 52d17ad2057c0b74360e4693f683cc537178d86a | [
"PostgreSQL",
"Apache-2.0"
] | null | null | null | src/test/isolation2/sql_isolation_testcase.py | kalensk/gpdb | 52d17ad2057c0b74360e4693f683cc537178d86a | [
"PostgreSQL",
"Apache-2.0"
] | null | null | null | src/test/isolation2/sql_isolation_testcase.py | kalensk/gpdb | 52d17ad2057c0b74360e4693f683cc537178d86a | [
"PostgreSQL",
"Apache-2.0"
] | null | null | null | """
Copyright (c) 2004-Present Pivotal Software, Inc.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import pygresql.pg
import os
import subprocess
import re
import multiprocessing
import tempfile
import time
import sys
import socket
from optparse import OptionParser
import traceback
def is_digit(n):
try:
int(n)
return True
except ValueError:
return False
def load_helper_file(helper_file):
with open(helper_file) as file:
return "".join(file.readlines()).strip()
def parse_include_statement(sql):
include_statement, command = sql.split(None, 1)
stripped_command = command.strip()
if stripped_command.endswith(";"):
return stripped_command.replace(";", "")
else:
raise SyntaxError("expected 'include: %s' to end with a semicolon." % stripped_command)
class SQLIsolationExecutor(object):
def __init__(self, dbname=''):
self.processes = {}
# The re.S flag makes the "." in the regex match newlines.
# When matched against a command in process_command(), all
# lines in the command are matched and sent as SQL query.
self.command_pattern = re.compile(r"^(-?\d+|[*])([&\\<\\>USIq]*?)\:(.*)", re.S)
if dbname:
self.dbname = dbname
else:
self.dbname = os.environ.get('PGDATABASE')
class SQLConnection(object):
def __init__(self, out_file, name, mode, dbname):
self.name = name
self.mode = mode
self.out_file = out_file
self.dbname = dbname
parent_conn, child_conn = multiprocessing.Pipe(True)
self.p = multiprocessing.Process(target=self.session_process, args=(child_conn,))
self.pipe = parent_conn
self.has_open = False
self.p.start()
# Close "our" copy of the child's handle, so that if the child dies,
# recv() on the pipe will fail.
child_conn.close();
self.out_file = out_file
def session_process(self, pipe):
sp = SQLIsolationExecutor.SQLSessionProcess(self.name,
self.mode, pipe, self.dbname)
sp.do()
def query(self, command):
print >>self.out_file
self.out_file.flush()
if len(command.strip()) == 0:
return
if self.has_open:
raise Exception("Cannot query command while waiting for results")
self.pipe.send((command, False))
r = self.pipe.recv()
if r is None:
raise Exception("Execution failed")
print >>self.out_file, r.rstrip()
def fork(self, command, blocking):
print >>self.out_file, " <waiting ...>"
self.pipe.send((command, True))
if blocking:
time.sleep(0.5)
if self.pipe.poll(0):
p = self.pipe.recv()
raise Exception("Forked command is not blocking; got output: %s" % p.strip())
self.has_open = True
def join(self):
r = None
print >>self.out_file, " <... completed>"
if self.has_open:
r = self.pipe.recv()
if r is None:
raise Exception("Execution failed")
print >>self.out_file, r.rstrip()
self.has_open = False
def stop(self):
self.pipe.send(("", False))
self.p.join()
if self.has_open:
raise Exception("Should not finish test case while waiting for results")
def quit(self):
print >>self.out_file, "... <quitting>"
self.stop()
def terminate(self):
self.pipe.close()
self.p.terminate()
class SQLSessionProcess(object):
def __init__(self, name, mode, pipe, dbname):
"""
Constructor
"""
self.name = name
self.mode = mode
self.pipe = pipe
self.dbname = dbname
if self.mode == "utility":
(hostname, port) = self.get_hostname_port(name, 'p')
self.con = self.connectdb(given_dbname=self.dbname,
given_host=hostname,
given_port=port,
given_opt="-c gp_session_role=utility")
elif self.mode == "standby":
# Connect to standby even when it's role is recorded
# as mirror. This is useful for scenarios where a
# test needs to promote a standby without using
# gpactivatestandby.
(hostname, port) = self.get_hostname_port(name, 'm')
self.con = self.connectdb(given_dbname=self.dbname,
given_host=hostname,
given_port=port)
else:
self.con = self.connectdb(self.dbname)
def connectdb(self, given_dbname, given_host = None, given_port = None, given_opt = None):
con = None
retry = 1000
while retry:
try:
if (given_port is None):
con = pygresql.pg.connect(host= given_host,
opt= given_opt,
dbname= given_dbname)
else:
con = pygresql.pg.connect(host= given_host,
port= given_port,
opt= given_opt,
dbname= given_dbname)
break
except Exception as e:
if (("the database system is starting up" in str(e) or
"the database system is in recovery mode" in str(e)) and
retry > 1):
retry -= 1
time.sleep(0.1)
else:
raise
return con
def get_hostname_port(self, contentid, role):
"""
Gets the port number/hostname combination of the
contentid and role
"""
query = ("SELECT hostname, port FROM gp_segment_configuration WHERE"
" content = %s AND role = '%s'") % (contentid, role)
con = self.connectdb(self.dbname)
r = con.query(query).getresult()
if len(r) == 0:
raise Exception("Invalid content %s" % contentid)
if r[0][0] == socket.gethostname():
return (None, int(r[0][1]))
return (r[0][0], int(r[0][1]))
# Print out a pygresql result set (a Query object, after the query
# has been executed), in a format that imitates the default
# formatting of psql. This isn't a perfect imitation: we left-justify
# all the fields and headers, whereas psql centers the header, and
# right-justifies numeric fields. But this is close enough, to make
# gpdiff.pl recognize the result sets as such. (We used to just call
# str(r), and let PyGreSQL do the formatting. But even though
# PyGreSQL's default formatting is close to psql's, it's not close
# enough.)
def printout_result(self, r):
widths = []
# Figure out the widths of each column.
fields = r.listfields()
for f in fields:
widths.append(len(str(f)))
rset = r.getresult()
for row in rset:
colno = 0
for col in row:
if col is None:
col = ""
widths[colno] = max(widths[colno], len(str(col)))
colno = colno + 1
# Start printing. Header first.
result = ""
colno = 0
for f in fields:
if colno > 0:
result += "|"
result += " " + f.ljust(widths[colno]) + " "
colno = colno + 1
result += "\n"
# Then the bar ("----+----")
colno = 0
for f in fields:
if colno > 0:
result += "+"
result += "".ljust(widths[colno] + 2, "-")
colno = colno + 1
result += "\n"
# Then the result set itself
for row in rset:
colno = 0
for col in row:
if colno > 0:
result += "|"
if col is None:
col = ""
result += " " + str(col).ljust(widths[colno]) + " "
colno = colno + 1
result += "\n"
# Finally, the row count
if len(rset) == 1:
result += "(1 row)\n"
else:
result += "(" + str(len(rset)) +" rows)\n"
return result
def execute_command(self, command):
"""
Executes a given command
"""
try:
r = self.con.query(command)
if r and type(r) == str:
echo_content = command[:-1].partition(" ")[0].upper()
return "%s %s" % (echo_content, r)
elif r:
return self.printout_result(r)
else:
echo_content = command[:-1].partition(" ")[0].upper()
return echo_content
except Exception as e:
return str(e)
def do(self):
"""
Process loop.
Ends when the command None is received
"""
(c, wait) = self.pipe.recv()
while c:
if wait:
time.sleep(0.1)
r = self.execute_command(c)
self.pipe.send(r)
r = None
(c, wait) = self.pipe.recv()
def get_process(self, out_file, name, mode="", dbname=""):
"""
Gets or creates the process by the given name
"""
if len(name) > 0 and not is_digit(name):
raise Exception("Name should be a number")
if len(name) > 0 and mode != "utility" and int(name) >= 1024:
raise Exception("Session name should be smaller than 1024 unless it is utility mode number")
if not (name, mode) in self.processes:
if not dbname:
dbname = self.dbname
self.processes[(name, mode)] = SQLIsolationExecutor.SQLConnection(out_file, name, mode, dbname)
return self.processes[(name, mode)]
def quit_process(self, out_file, name, mode="", dbname=""):
"""
Quits a process with the given name
"""
if len(name) > 0 and not is_digit(name):
raise Exception("Name should be a number")
if len(name) > 0 and mode != "utility" and int(name) >= 1024:
raise Exception("Session name should be smaller than 1024 unless it is utility mode number")
if not (name, mode) in self.processes:
raise Exception("Sessions not started cannot be quit")
self.processes[(name, mode)].quit()
del self.processes[(name, mode)]
def get_all_primary_contentids(self, dbname):
"""
Retrieves all primary content IDs (including the master). Intended for
use by *U queries.
"""
if not dbname:
dbname = self.dbname
con = pygresql.pg.connect(dbname=dbname)
result = con.query("SELECT content FROM gp_segment_configuration WHERE role = 'p'").getresult()
if len(result) == 0:
raise Exception("Invalid gp_segment_configuration contents")
return [int(content[0]) for content in result]
def process_command(self, command, output_file):
"""
Processes the given command.
The command at this point still includes the isolation behavior
flags, e.g. which session to use.
"""
process_name = ""
sql = command
flag = ""
con_mode = ""
dbname = ""
m = self.command_pattern.match(command)
if m:
process_name = m.groups()[0]
flag = m.groups()[1]
if flag and flag[0] == "U":
con_mode = "utility"
elif flag and flag[0] == "S":
if len(flag) > 1:
flag = flag[1:]
con_mode = "standby"
sql = m.groups()[2]
sql = sql.lstrip()
# If db_name is specifed , it should be of the following syntax:
# 1:@db_name <db_name>: <sql>
if sql.startswith('@db_name'):
sql_parts = sql.split(':', 2)
if not len(sql_parts) == 2:
raise Exception("Invalid syntax with dbname, should be of the form 1:@db_name <db_name>: <sql>")
if not sql_parts[0].startswith('@db_name'):
raise Exception("Invalid syntax with dbname, should be of the form 1:@db_name <db_name>: <sql>")
if not len(sql_parts[0].split()) == 2:
raise Exception("Invalid syntax with dbname, should be of the form 1:@db_name <db_name>: <sql>")
dbname = sql_parts[0].split()[1].strip()
if not dbname:
raise Exception("Invalid syntax with dbname, should be of the form 1:@db_name <db_name>: <sql>")
sql = sql_parts[1]
if not flag:
if sql.startswith('!'):
sql = sql[1:]
# Check for execution mode. E.g.
# !\retcode path/to/executable --option1 --option2 ...
#
# At the moment, we only recognize the \retcode mode, which
# ignores all program output in the diff (it's still printed)
# and adds the return code.
mode = None
if sql.startswith('\\'):
mode, sql = sql.split(None, 1)
if mode != '\\retcode':
raise Exception('Invalid execution mode: {}'.format(mode))
cmd_output = subprocess.Popen(sql.strip(), stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=True)
stdout, _ = cmd_output.communicate()
print >> output_file
if mode == '\\retcode':
print >> output_file, '-- start_ignore'
print >> output_file, stdout
if mode == '\\retcode':
print >> output_file, '-- end_ignore'
print >> output_file, '(exited with code {})'.format(cmd_output.returncode)
elif sql.startswith('include:'):
helper_file = parse_include_statement(sql)
self.get_process(
output_file,
process_name,
dbname=dbname
).query(
load_helper_file(helper_file)
)
else:
self.get_process(output_file, process_name, con_mode, dbname=dbname).query(sql.strip())
elif flag == "&":
self.get_process(output_file, process_name, con_mode, dbname=dbname).fork(sql.strip(), True)
elif flag == ">":
self.get_process(output_file, process_name, con_mode, dbname=dbname).fork(sql.strip(), False)
elif flag == "<":
if len(sql) > 0:
raise Exception("No query should be given on join")
self.get_process(output_file, process_name, con_mode, dbname=dbname).join()
elif flag == "q":
if len(sql) > 0:
raise Exception("No query should be given on quit")
self.quit_process(output_file, process_name, con_mode, dbname=dbname)
elif flag == "U":
if process_name == '*':
process_names = [str(content) for content in self.get_all_primary_contentids(dbname)]
else:
process_names = [process_name]
for name in process_names:
self.get_process(output_file, name, con_mode, dbname=dbname).query(sql.strip())
elif flag == "U&":
self.get_process(output_file, process_name, con_mode, dbname=dbname).fork(sql.strip(), True)
elif flag == "U<":
if len(sql) > 0:
raise Exception("No query should be given on join")
self.get_process(output_file, process_name, con_mode, dbname=dbname).join()
elif flag == "Uq":
if len(sql) > 0:
raise Exception("No query should be given on quit")
self.quit_process(output_file, process_name, con_mode, dbname=dbname)
elif flag == "S":
self.get_process(output_file, process_name, con_mode, dbname=dbname).query(sql.strip())
else:
raise Exception("Invalid isolation flag")
def process_isolation_file(self, sql_file, output_file):
"""
Processes the given sql file and writes the output
to output file
"""
try:
command = ""
for line in sql_file:
#tinctest.logger.info("re.match: %s" %re.match(r"^\d+[q\\<]:$", line))
print >>output_file, line.strip(),
if line[0] == "!":
command_part = line # shell commands can use -- for multichar options like --include
else:
command_part = line.partition("--")[0] # remove comment from line
if command_part == "" or command_part == "\n":
print >>output_file
elif command_part.endswith(";\n") or re.match(r"^\d+[q\\<]:$", line) or re.match(r"^-?\d+[SU][q\\<]:$", line):
command += command_part
try:
self.process_command(command, output_file)
except Exception as e:
print >>output_file, "FAILED: ", e
command = ""
else:
command += command_part
for process in self.processes.values():
process.stop()
except:
for process in self.processes.values():
process.terminate()
raise
finally:
for process in self.processes.values():
process.terminate()
class SQLIsolationTestCase:
"""
The isolation test case allows a fine grained control of interleaved
executing transactions. This is mainly used to test isolation behavior.
[<#>[flag]:] <sql> | ! <shell scripts or command>
#: either an integer indicating a unique session, or a content-id if
followed by U (for utility-mode connections). In 'U' mode, the
content-id can alternatively be an asterisk '*' to perform a
utility-mode query on the master and all primaries.
flag:
&: expect blocking behavior
>: running in background without blocking
<: join an existing session
q: quit the given session
U: connect in utility mode to primary contentid from gp_segment_configuration
U&: expect blocking behavior in utility mode (does not currently support an asterisk target)
U<: join an existing utility mode session (does not currently support an asterisk target)
I: include a file of sql statements (useful for loading reusable functions)
An example is:
Execute BEGIN in transaction 1
Execute BEGIN in transaction 2
Execute INSERT in transaction 2
Execute SELECT in transaction 1
Execute COMMIT in transaction 2
Execute SELECT in transaction 1
The isolation tests are specified identical to sql-scripts in normal
SQLTestCases. However, it is possible to prefix a SQL line with
an tranaction identifier followed by a colon (":").
The above example would be defined by
1: BEGIN;
2: BEGIN;
2: INSERT INTO a VALUES (1);
1: SELECT * FROM a;
2: COMMIT;
1: SELECT * FROM a;
Blocking behavior can be tested by forking and joining.
1: BEGIN;
2: BEGIN;
1: DELETE FROM foo WHERE a = 4;
2&: DELETE FROM foo WHERE a = 4;
1: COMMIT;
2<:
2: COMMIT;
2& forks the command. It is executed in the background. If the
command is NOT blocking at this point, it is considered an error.
2< joins the background command and outputs the result of the
command execution.
Session ids should be smaller than 1024.
2U: Executes a utility command connected to port 40000.
One difference to SQLTestCase is the output of INSERT.
SQLTestCase would output "INSERT 0 1" if one tuple is inserted.
SQLIsolationTestCase would output "INSERT 1". As the
SQLIsolationTestCase needs to have a more fine-grained control
over the execution order than possible with PSQL, it uses
the pygresql python library instead.
Connecting to a specific database:
1. If you specify a db_name metadata in the sql file, connect to that database in all open sessions.
2. If you want a specific session to be connected to a specific database , specify the sql as follows:
1:@db_name testdb: <sql>
2:@db_name test2db: <sql>
1: <sql>
2: <sql>
etc
Here session 1 will be connected to testdb and session 2 will be connected to test2db. You can specify @db_name only at the beginning of the session. For eg:, following would error out:
1:@db_name testdb: <sql>
2:@db_name test2db: <sql>
1: @db_name testdb: <sql>
2: <sql>
etc
Quitting sessions:
By default, all opened sessions will be stopped only at the end of the sql file execution. If you want to explicitly quit a session
in the middle of the test execution, you can specify a flag 'q' with the session identifier. For eg:
1:@db_name testdb: <sql>
2:@db_name test2db: <sql>
1: <sql>
2: <sql>
1q:
2: <sql>
3: <sql>
2q:
3: <sql>
2: @db_name test: <sql>
1q: ---> Will quit the session established with testdb.
2q: ---> Will quit the session established with test2db.
The subsequent 2: @db_name test: <sql> will open a new session with the database test and execute the sql against that session.
Catalog Modification:
Some tests are easier to write if it's possible to modify a system
catalog across the *entire* cluster. To perform a utility-mode query on
all segments and the master, you can use *U commands:
*U: SET allow_system_table_mods = true;
*U: UPDATE pg_catalog.<table> SET <column> = <value> WHERE <cond>;
Since the number of query results returned by a *U command depends on
the developer's cluster configuration, it can be useful to wrap them in
a start_/end_ignore block. (Unfortunately, this also hides legitimate
failures; a better long-term solution is needed.)
Block/join flags are not currently supported with *U.
Including files:
-- example contents for file.sql: create function some_test_function() returning void ...
include: path/to/some/file.sql;
select some_helper_function();
"""
def run_sql_file(self, sql_file, out_file = None, out_dir = None, optimizer = None):
"""
Given a sql file and an ans file, this adds the specified gucs (self.gucs) to the sql file , runs the sql
against the test case database (self.db_name) and verifies the output with the ans file.
If an 'init_file' exists in the same location as the sql_file, this will be used
while doing gpdiff.
"""
# Add gucs to the test sql and form the actual sql file to be run
if not out_dir:
out_dir = self.get_out_dir()
if not os.path.exists(out_dir):
TINCSystem.make_dirs(out_dir, ignore_exists_error = True)
if optimizer is None:
gucs_sql_file = os.path.join(out_dir, os.path.basename(sql_file))
else:
# sql file will be <basename>_opt.sql or <basename>_planner.sql based on optimizer
gucs_sql_file = os.path.join(out_dir, os.path.basename(sql_file).replace('.sql', '_%s.sql' %self._optimizer_suffix(optimizer)))
self._add_gucs_to_sql_file(sql_file, gucs_sql_file, optimizer)
self.test_artifacts.append(gucs_sql_file)
if not out_file:
if optimizer is None:
out_file = os.path.join(self.get_out_dir(), os.path.basename(sql_file).replace('.sql', '.out'))
else:
# out file will be *_opt.out or *_planner.out based on optimizer
out_file = os.path.join(self.get_out_dir(), os.path.basename(sql_file).replace('.sql', '_%s.out' %self._optimizer_suffix(optimizer)))
self.test_artifacts.append(out_file)
executor = SQLIsolationExecutor(dbname=self.db_name)
with open(out_file, "w") as f:
executor.process_isolation_file(open(sql_file), f)
f.flush()
if out_file[-2:] == '.t':
out_file = out_file[:-2]
return out_file
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("--dbname", dest="dbname",
help="connect to database DBNAME", metavar="DBNAME")
(options, args) = parser.parse_args()
executor = SQLIsolationExecutor(dbname=options.dbname)
executor.process_isolation_file(sys.stdin, sys.stdout)
| 40.1 | 193 | 0.544274 |
import pygresql.pg
import os
import subprocess
import re
import multiprocessing
import tempfile
import time
import sys
import socket
from optparse import OptionParser
import traceback
def is_digit(n):
try:
int(n)
return True
except ValueError:
return False
def load_helper_file(helper_file):
with open(helper_file) as file:
return "".join(file.readlines()).strip()
def parse_include_statement(sql):
include_statement, command = sql.split(None, 1)
stripped_command = command.strip()
if stripped_command.endswith(";"):
return stripped_command.replace(";", "")
else:
raise SyntaxError("expected 'include: %s' to end with a semicolon." % stripped_command)
class SQLIsolationExecutor(object):
def __init__(self, dbname=''):
self.processes = {}
self.command_pattern = re.compile(r"^(-?\d+|[*])([&\\<\\>USIq]*?)\:(.*)", re.S)
if dbname:
self.dbname = dbname
else:
self.dbname = os.environ.get('PGDATABASE')
class SQLConnection(object):
def __init__(self, out_file, name, mode, dbname):
self.name = name
self.mode = mode
self.out_file = out_file
self.dbname = dbname
parent_conn, child_conn = multiprocessing.Pipe(True)
self.p = multiprocessing.Process(target=self.session_process, args=(child_conn,))
self.pipe = parent_conn
self.has_open = False
self.p.start()
# recv() on the pipe will fail.
child_conn.close();
self.out_file = out_file
def session_process(self, pipe):
sp = SQLIsolationExecutor.SQLSessionProcess(self.name,
self.mode, pipe, self.dbname)
sp.do()
def query(self, command):
print >>self.out_file
self.out_file.flush()
if len(command.strip()) == 0:
return
if self.has_open:
raise Exception("Cannot query command while waiting for results")
self.pipe.send((command, False))
r = self.pipe.recv()
if r is None:
raise Exception("Execution failed")
print >>self.out_file, r.rstrip()
def fork(self, command, blocking):
print >>self.out_file, " <waiting ...>"
self.pipe.send((command, True))
if blocking:
time.sleep(0.5)
if self.pipe.poll(0):
p = self.pipe.recv()
raise Exception("Forked command is not blocking; got output: %s" % p.strip())
self.has_open = True
def join(self):
r = None
print >>self.out_file, " <... completed>"
if self.has_open:
r = self.pipe.recv()
if r is None:
raise Exception("Execution failed")
print >>self.out_file, r.rstrip()
self.has_open = False
def stop(self):
self.pipe.send(("", False))
self.p.join()
if self.has_open:
raise Exception("Should not finish test case while waiting for results")
def quit(self):
print >>self.out_file, "... <quitting>"
self.stop()
def terminate(self):
self.pipe.close()
self.p.terminate()
class SQLSessionProcess(object):
def __init__(self, name, mode, pipe, dbname):
self.name = name
self.mode = mode
self.pipe = pipe
self.dbname = dbname
if self.mode == "utility":
(hostname, port) = self.get_hostname_port(name, 'p')
self.con = self.connectdb(given_dbname=self.dbname,
given_host=hostname,
given_port=port,
given_opt="-c gp_session_role=utility")
elif self.mode == "standby":
# Connect to standby even when it's role is recorded
(hostname, port) = self.get_hostname_port(name, 'm')
self.con = self.connectdb(given_dbname=self.dbname,
given_host=hostname,
given_port=port)
else:
self.con = self.connectdb(self.dbname)
def connectdb(self, given_dbname, given_host = None, given_port = None, given_opt = None):
con = None
retry = 1000
while retry:
try:
if (given_port is None):
con = pygresql.pg.connect(host= given_host,
opt= given_opt,
dbname= given_dbname)
else:
con = pygresql.pg.connect(host= given_host,
port= given_port,
opt= given_opt,
dbname= given_dbname)
break
except Exception as e:
if (("the database system is starting up" in str(e) or
"the database system is in recovery mode" in str(e)) and
retry > 1):
retry -= 1
time.sleep(0.1)
else:
raise
return con
def get_hostname_port(self, contentid, role):
query = ("SELECT hostname, port FROM gp_segment_configuration WHERE"
" content = %s AND role = '%s'") % (contentid, role)
con = self.connectdb(self.dbname)
r = con.query(query).getresult()
if len(r) == 0:
raise Exception("Invalid content %s" % contentid)
if r[0][0] == socket.gethostname():
return (None, int(r[0][1]))
return (r[0][0], int(r[0][1]))
# all the fields and headers, whereas psql centers the header, and
# right-justifies numeric fields. But this is close enough, to make
# gpdiff.pl recognize the result sets as such. (We used to just call
# str(r), and let PyGreSQL do the formatting. But even though
# PyGreSQL's default formatting is close to psql's, it's not close
def printout_result(self, r):
widths = []
fields = r.listfields()
for f in fields:
widths.append(len(str(f)))
rset = r.getresult()
for row in rset:
colno = 0
for col in row:
if col is None:
col = ""
widths[colno] = max(widths[colno], len(str(col)))
colno = colno + 1
result = ""
colno = 0
for f in fields:
if colno > 0:
result += "|"
result += " " + f.ljust(widths[colno]) + " "
colno = colno + 1
result += "\n"
colno = 0
for f in fields:
if colno > 0:
result += "+"
result += "".ljust(widths[colno] + 2, "-")
colno = colno + 1
result += "\n"
for row in rset:
colno = 0
for col in row:
if colno > 0:
result += "|"
if col is None:
col = ""
result += " " + str(col).ljust(widths[colno]) + " "
colno = colno + 1
result += "\n"
if len(rset) == 1:
result += "(1 row)\n"
else:
result += "(" + str(len(rset)) +" rows)\n"
return result
def execute_command(self, command):
try:
r = self.con.query(command)
if r and type(r) == str:
echo_content = command[:-1].partition(" ")[0].upper()
return "%s %s" % (echo_content, r)
elif r:
return self.printout_result(r)
else:
echo_content = command[:-1].partition(" ")[0].upper()
return echo_content
except Exception as e:
return str(e)
def do(self):
(c, wait) = self.pipe.recv()
while c:
if wait:
time.sleep(0.1)
r = self.execute_command(c)
self.pipe.send(r)
r = None
(c, wait) = self.pipe.recv()
def get_process(self, out_file, name, mode="", dbname=""):
if len(name) > 0 and not is_digit(name):
raise Exception("Name should be a number")
if len(name) > 0 and mode != "utility" and int(name) >= 1024:
raise Exception("Session name should be smaller than 1024 unless it is utility mode number")
if not (name, mode) in self.processes:
if not dbname:
dbname = self.dbname
self.processes[(name, mode)] = SQLIsolationExecutor.SQLConnection(out_file, name, mode, dbname)
return self.processes[(name, mode)]
def quit_process(self, out_file, name, mode="", dbname=""):
if len(name) > 0 and not is_digit(name):
raise Exception("Name should be a number")
if len(name) > 0 and mode != "utility" and int(name) >= 1024:
raise Exception("Session name should be smaller than 1024 unless it is utility mode number")
if not (name, mode) in self.processes:
raise Exception("Sessions not started cannot be quit")
self.processes[(name, mode)].quit()
del self.processes[(name, mode)]
def get_all_primary_contentids(self, dbname):
if not dbname:
dbname = self.dbname
con = pygresql.pg.connect(dbname=dbname)
result = con.query("SELECT content FROM gp_segment_configuration WHERE role = 'p'").getresult()
if len(result) == 0:
raise Exception("Invalid gp_segment_configuration contents")
return [int(content[0]) for content in result]
def process_command(self, command, output_file):
process_name = ""
sql = command
flag = ""
con_mode = ""
dbname = ""
m = self.command_pattern.match(command)
if m:
process_name = m.groups()[0]
flag = m.groups()[1]
if flag and flag[0] == "U":
con_mode = "utility"
elif flag and flag[0] == "S":
if len(flag) > 1:
flag = flag[1:]
con_mode = "standby"
sql = m.groups()[2]
sql = sql.lstrip()
if sql.startswith('@db_name'):
sql_parts = sql.split(':', 2)
if not len(sql_parts) == 2:
raise Exception("Invalid syntax with dbname, should be of the form 1:@db_name <db_name>: <sql>")
if not sql_parts[0].startswith('@db_name'):
raise Exception("Invalid syntax with dbname, should be of the form 1:@db_name <db_name>: <sql>")
if not len(sql_parts[0].split()) == 2:
raise Exception("Invalid syntax with dbname, should be of the form 1:@db_name <db_name>: <sql>")
dbname = sql_parts[0].split()[1].strip()
if not dbname:
raise Exception("Invalid syntax with dbname, should be of the form 1:@db_name <db_name>: <sql>")
sql = sql_parts[1]
if not flag:
if sql.startswith('!'):
sql = sql[1:]
# and adds the return code.
mode = None
if sql.startswith('\\'):
mode, sql = sql.split(None, 1)
if mode != '\\retcode':
raise Exception('Invalid execution mode: {}'.format(mode))
cmd_output = subprocess.Popen(sql.strip(), stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=True)
stdout, _ = cmd_output.communicate()
print >> output_file
if mode == '\\retcode':
print >> output_file, '-- start_ignore'
print >> output_file, stdout
if mode == '\\retcode':
print >> output_file, '-- end_ignore'
print >> output_file, '(exited with code {})'.format(cmd_output.returncode)
elif sql.startswith('include:'):
helper_file = parse_include_statement(sql)
self.get_process(
output_file,
process_name,
dbname=dbname
).query(
load_helper_file(helper_file)
)
else:
self.get_process(output_file, process_name, con_mode, dbname=dbname).query(sql.strip())
elif flag == "&":
self.get_process(output_file, process_name, con_mode, dbname=dbname).fork(sql.strip(), True)
elif flag == ">":
self.get_process(output_file, process_name, con_mode, dbname=dbname).fork(sql.strip(), False)
elif flag == "<":
if len(sql) > 0:
raise Exception("No query should be given on join")
self.get_process(output_file, process_name, con_mode, dbname=dbname).join()
elif flag == "q":
if len(sql) > 0:
raise Exception("No query should be given on quit")
self.quit_process(output_file, process_name, con_mode, dbname=dbname)
elif flag == "U":
if process_name == '*':
process_names = [str(content) for content in self.get_all_primary_contentids(dbname)]
else:
process_names = [process_name]
for name in process_names:
self.get_process(output_file, name, con_mode, dbname=dbname).query(sql.strip())
elif flag == "U&":
self.get_process(output_file, process_name, con_mode, dbname=dbname).fork(sql.strip(), True)
elif flag == "U<":
if len(sql) > 0:
raise Exception("No query should be given on join")
self.get_process(output_file, process_name, con_mode, dbname=dbname).join()
elif flag == "Uq":
if len(sql) > 0:
raise Exception("No query should be given on quit")
self.quit_process(output_file, process_name, con_mode, dbname=dbname)
elif flag == "S":
self.get_process(output_file, process_name, con_mode, dbname=dbname).query(sql.strip())
else:
raise Exception("Invalid isolation flag")
def process_isolation_file(self, sql_file, output_file):
try:
command = ""
for line in sql_file:
#tinctest.logger.info("re.match: %s" %re.match(r"^\d+[q\\<]:$", line))
print >>output_file, line.strip(),
if line[0] == "!":
command_part = line # shell commands can use -- for multichar options like --include
else:
command_part = line.partition("--")[0] # remove comment from line
if command_part == "" or command_part == "\n":
print >>output_file
elif command_part.endswith(";\n") or re.match(r"^\d+[q\\<]:$", line) or re.match(r"^-?\d+[SU][q\\<]:$", line):
command += command_part
try:
self.process_command(command, output_file)
except Exception as e:
print >>output_file, "FAILED: ", e
command = ""
else:
command += command_part
for process in self.processes.values():
process.stop()
except:
for process in self.processes.values():
process.terminate()
raise
finally:
for process in self.processes.values():
process.terminate()
class SQLIsolationTestCase:
def run_sql_file(self, sql_file, out_file = None, out_dir = None, optimizer = None):
# Add gucs to the test sql and form the actual sql file to be run
if not out_dir:
out_dir = self.get_out_dir()
if not os.path.exists(out_dir):
TINCSystem.make_dirs(out_dir, ignore_exists_error = True)
if optimizer is None:
gucs_sql_file = os.path.join(out_dir, os.path.basename(sql_file))
else:
# sql file will be <basename>_opt.sql or <basename>_planner.sql based on optimizer
gucs_sql_file = os.path.join(out_dir, os.path.basename(sql_file).replace('.sql', '_%s.sql' %self._optimizer_suffix(optimizer)))
self._add_gucs_to_sql_file(sql_file, gucs_sql_file, optimizer)
self.test_artifacts.append(gucs_sql_file)
if not out_file:
if optimizer is None:
out_file = os.path.join(self.get_out_dir(), os.path.basename(sql_file).replace('.sql', '.out'))
else:
# out file will be *_opt.out or *_planner.out based on optimizer
out_file = os.path.join(self.get_out_dir(), os.path.basename(sql_file).replace('.sql', '_%s.out' %self._optimizer_suffix(optimizer)))
self.test_artifacts.append(out_file)
executor = SQLIsolationExecutor(dbname=self.db_name)
with open(out_file, "w") as f:
executor.process_isolation_file(open(sql_file), f)
f.flush()
if out_file[-2:] == '.t':
out_file = out_file[:-2]
return out_file
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("--dbname", dest="dbname",
help="connect to database DBNAME", metavar="DBNAME")
(options, args) = parser.parse_args()
executor = SQLIsolationExecutor(dbname=options.dbname)
executor.process_isolation_file(sys.stdin, sys.stdout)
| true | true |
1c475a28b1d83edba4b3c614df0405e3f55f79f0 | 53,813 | py | Python | lib/sqlalchemy/sql/sqltypes.py | mjpieters/sqlalchemy | a8efeb6c052330b7b8d44960132d638b08d42d18 | [
"MIT"
] | null | null | null | lib/sqlalchemy/sql/sqltypes.py | mjpieters/sqlalchemy | a8efeb6c052330b7b8d44960132d638b08d42d18 | [
"MIT"
] | null | null | null | lib/sqlalchemy/sql/sqltypes.py | mjpieters/sqlalchemy | a8efeb6c052330b7b8d44960132d638b08d42d18 | [
"MIT"
] | null | null | null | # sql/sqltypes.py
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""SQL specific types.
"""
import datetime as dt
import codecs
from .type_api import TypeEngine, TypeDecorator, to_instance
from .elements import quoted_name, type_coerce
from .default_comparator import _DefaultColumnComparator
from .. import exc, util, processors
from .base import _bind_or_error, SchemaEventTarget
from . import operators
from .. import event
from ..util import pickle
import decimal
if util.jython:
import array
class _DateAffinity(object):
"""Mixin date/time specific expression adaptations.
Rules are implemented within Date,Time,Interval,DateTime, Numeric,
Integer. Based on http://www.postgresql.org/docs/current/static
/functions-datetime.html.
"""
@property
def _expression_adaptations(self):
raise NotImplementedError()
class Comparator(TypeEngine.Comparator):
_blank_dict = util.immutabledict()
def _adapt_expression(self, op, other_comparator):
othertype = other_comparator.type._type_affinity
return op, \
to_instance(self.type._expression_adaptations.get(op, self._blank_dict).\
get(othertype, NULLTYPE))
comparator_factory = Comparator
class Concatenable(object):
"""A mixin that marks a type as supporting 'concatenation',
typically strings."""
class Comparator(TypeEngine.Comparator):
def _adapt_expression(self, op, other_comparator):
if op is operators.add and isinstance(other_comparator,
(Concatenable.Comparator, NullType.Comparator)):
return operators.concat_op, self.expr.type
else:
return op, self.expr.type
comparator_factory = Comparator
class String(Concatenable, TypeEngine):
"""The base for all string and character types.
In SQL, corresponds to VARCHAR. Can also take Python unicode objects
and encode to the database's encoding in bind params (and the reverse for
result sets.)
The `length` field is usually required when the `String` type is
used within a CREATE TABLE statement, as VARCHAR requires a length
on most databases.
"""
__visit_name__ = 'string'
def __init__(self, length=None, collation=None,
convert_unicode=False,
unicode_error=None,
_warn_on_bytestring=False
):
"""
Create a string-holding type.
:param length: optional, a length for the column for use in
DDL and CAST expressions. May be safely omitted if no ``CREATE
TABLE`` will be issued. Certain databases may require a
``length`` for use in DDL, and will raise an exception when
the ``CREATE TABLE`` DDL is issued if a ``VARCHAR``
with no length is included. Whether the value is
interpreted as bytes or characters is database specific.
:param collation: Optional, a column-level collation for
use in DDL and CAST expressions. Renders using the
COLLATE keyword supported by SQLite, MySQL, and Postgresql.
E.g.::
>>> from sqlalchemy import cast, select, String
>>> print select([cast('some string', String(collation='utf8'))])
SELECT CAST(:param_1 AS VARCHAR COLLATE utf8) AS anon_1
.. versionadded:: 0.8 Added support for COLLATE to all
string types.
:param convert_unicode: When set to ``True``, the
:class:`.String` type will assume that
input is to be passed as Python ``unicode`` objects,
and results returned as Python ``unicode`` objects.
If the DBAPI in use does not support Python unicode
(which is fewer and fewer these days), SQLAlchemy
will encode/decode the value, using the
value of the ``encoding`` parameter passed to
:func:`.create_engine` as the encoding.
When using a DBAPI that natively supports Python
unicode objects, this flag generally does not
need to be set. For columns that are explicitly
intended to store non-ASCII data, the :class:`.Unicode`
or :class:`.UnicodeText`
types should be used regardless, which feature
the same behavior of ``convert_unicode`` but
also indicate an underlying column type that
directly supports unicode, such as ``NVARCHAR``.
For the extremely rare case that Python ``unicode``
is to be encoded/decoded by SQLAlchemy on a backend
that does natively support Python ``unicode``,
the value ``force`` can be passed here which will
cause SQLAlchemy's encode/decode services to be
used unconditionally.
:param unicode_error: Optional, a method to use to handle Unicode
conversion errors. Behaves like the ``errors`` keyword argument to
the standard library's ``string.decode()`` functions. This flag
requires that ``convert_unicode`` is set to ``force`` - otherwise,
SQLAlchemy is not guaranteed to handle the task of unicode
conversion. Note that this flag adds significant performance
overhead to row-fetching operations for backends that already
return unicode objects natively (which most DBAPIs do). This
flag should only be used as a last resort for reading
strings from a column with varied or corrupted encodings.
"""
if unicode_error is not None and convert_unicode != 'force':
raise exc.ArgumentError("convert_unicode must be 'force' "
"when unicode_error is set.")
self.length = length
self.collation = collation
self.convert_unicode = convert_unicode
self.unicode_error = unicode_error
self._warn_on_bytestring = _warn_on_bytestring
def literal_processor(self, dialect):
def process(value):
value = value.replace("'", "''")
return "'%s'" % value
return process
def bind_processor(self, dialect):
if self.convert_unicode or dialect.convert_unicode:
if dialect.supports_unicode_binds and \
self.convert_unicode != 'force':
if self._warn_on_bytestring:
def process(value):
if isinstance(value, util.binary_type):
util.warn("Unicode type received non-unicode bind "
"param value.")
return value
return process
else:
return None
else:
encoder = codecs.getencoder(dialect.encoding)
warn_on_bytestring = self._warn_on_bytestring
def process(value):
if isinstance(value, util.text_type):
return encoder(value, self.unicode_error)[0]
elif warn_on_bytestring and value is not None:
util.warn("Unicode type received non-unicode bind "
"param value")
return value
return process
else:
return None
def result_processor(self, dialect, coltype):
wants_unicode = self.convert_unicode or dialect.convert_unicode
needs_convert = wants_unicode and \
(dialect.returns_unicode_strings is not True or
self.convert_unicode == 'force')
if needs_convert:
to_unicode = processors.to_unicode_processor_factory(
dialect.encoding, self.unicode_error)
if dialect.returns_unicode_strings:
# we wouldn't be here unless convert_unicode='force'
# was specified, or the driver has erratic unicode-returning
# habits. since we will be getting back unicode
# in most cases, we check for it (decode will fail).
def process(value):
if isinstance(value, util.text_type):
return value
else:
return to_unicode(value)
return process
else:
# here, we assume that the object is not unicode,
# avoiding expensive isinstance() check.
return to_unicode
else:
return None
@property
def python_type(self):
if self.convert_unicode:
return util.text_type
else:
return str
def get_dbapi_type(self, dbapi):
return dbapi.STRING
class Text(String):
"""A variably sized string type.
In SQL, usually corresponds to CLOB or TEXT. Can also take Python
unicode objects and encode to the database's encoding in bind
params (and the reverse for result sets.) In general, TEXT objects
do not have a length; while some databases will accept a length
argument here, it will be rejected by others.
"""
__visit_name__ = 'text'
class Unicode(String):
"""A variable length Unicode string type.
The :class:`.Unicode` type is a :class:`.String` subclass
that assumes input and output as Python ``unicode`` data,
and in that regard is equivalent to the usage of the
``convert_unicode`` flag with the :class:`.String` type.
However, unlike plain :class:`.String`, it also implies an
underlying column type that is explicitly supporting of non-ASCII
data, such as ``NVARCHAR`` on Oracle and SQL Server.
This can impact the output of ``CREATE TABLE`` statements
and ``CAST`` functions at the dialect level, and can
also affect the handling of bound parameters in some
specific DBAPI scenarios.
The encoding used by the :class:`.Unicode` type is usually
determined by the DBAPI itself; most modern DBAPIs
feature support for Python ``unicode`` objects as bound
values and result set values, and the encoding should
be configured as detailed in the notes for the target
DBAPI in the :ref:`dialect_toplevel` section.
For those DBAPIs which do not support, or are not configured
to accommodate Python ``unicode`` objects
directly, SQLAlchemy does the encoding and decoding
outside of the DBAPI. The encoding in this scenario
is determined by the ``encoding`` flag passed to
:func:`.create_engine`.
When using the :class:`.Unicode` type, it is only appropriate
to pass Python ``unicode`` objects, and not plain ``str``.
If a plain ``str`` is passed under Python 2, a warning
is emitted. If you notice your application emitting these warnings but
you're not sure of the source of them, the Python
``warnings`` filter, documented at
http://docs.python.org/library/warnings.html,
can be used to turn these warnings into exceptions
which will illustrate a stack trace::
import warnings
warnings.simplefilter('error')
For an application that wishes to pass plain bytestrings
and Python ``unicode`` objects to the ``Unicode`` type
equally, the bytestrings must first be decoded into
unicode. The recipe at :ref:`coerce_to_unicode` illustrates
how this is done.
See also:
:class:`.UnicodeText` - unlengthed textual counterpart
to :class:`.Unicode`.
"""
__visit_name__ = 'unicode'
def __init__(self, length=None, **kwargs):
"""
Create a :class:`.Unicode` object.
Parameters are the same as that of :class:`.String`,
with the exception that ``convert_unicode``
defaults to ``True``.
"""
kwargs.setdefault('convert_unicode', True)
kwargs.setdefault('_warn_on_bytestring', True)
super(Unicode, self).__init__(length=length, **kwargs)
class UnicodeText(Text):
"""An unbounded-length Unicode string type.
See :class:`.Unicode` for details on the unicode
behavior of this object.
Like :class:`.Unicode`, usage the :class:`.UnicodeText` type implies a
unicode-capable type being used on the backend, such as
``NCLOB``, ``NTEXT``.
"""
__visit_name__ = 'unicode_text'
def __init__(self, length=None, **kwargs):
"""
Create a Unicode-converting Text type.
Parameters are the same as that of :class:`.Text`,
with the exception that ``convert_unicode``
defaults to ``True``.
"""
kwargs.setdefault('convert_unicode', True)
kwargs.setdefault('_warn_on_bytestring', True)
super(UnicodeText, self).__init__(length=length, **kwargs)
class Integer(_DateAffinity, TypeEngine):
"""A type for ``int`` integers."""
__visit_name__ = 'integer'
def get_dbapi_type(self, dbapi):
return dbapi.NUMBER
@property
def python_type(self):
return int
def literal_processor(self, dialect):
def process(value):
return str(value)
return process
@util.memoized_property
def _expression_adaptations(self):
# TODO: need a dictionary object that will
# handle operators generically here, this is incomplete
return {
operators.add: {
Date: Date,
Integer: self.__class__,
Numeric: Numeric,
},
operators.mul: {
Interval: Interval,
Integer: self.__class__,
Numeric: Numeric,
},
operators.div: {
Integer: self.__class__,
Numeric: Numeric,
},
operators.truediv: {
Integer: self.__class__,
Numeric: Numeric,
},
operators.sub: {
Integer: self.__class__,
Numeric: Numeric,
},
}
class SmallInteger(Integer):
"""A type for smaller ``int`` integers.
Typically generates a ``SMALLINT`` in DDL, and otherwise acts like
a normal :class:`.Integer` on the Python side.
"""
__visit_name__ = 'small_integer'
class BigInteger(Integer):
"""A type for bigger ``int`` integers.
Typically generates a ``BIGINT`` in DDL, and otherwise acts like
a normal :class:`.Integer` on the Python side.
"""
__visit_name__ = 'big_integer'
class Numeric(_DateAffinity, TypeEngine):
"""A type for fixed precision numbers.
Typically generates DECIMAL or NUMERIC. Returns
``decimal.Decimal`` objects by default, applying
conversion as needed.
.. note::
The `cdecimal <http://pypi.python.org/pypi/cdecimal/>`_ library
is a high performing alternative to Python's built-in
``decimal.Decimal`` type, which performs very poorly in high volume
situations. SQLAlchemy 0.7 is tested against ``cdecimal`` and supports
it fully. The type is not necessarily supported by DBAPI
implementations however, most of which contain an import for plain
``decimal`` in their source code, even though some such as psycopg2
provide hooks for alternate adapters. SQLAlchemy imports ``decimal``
globally as well. The most straightforward and
foolproof way to use "cdecimal" given current DBAPI and Python support
is to patch it directly into sys.modules before anything else is
imported::
import sys
import cdecimal
sys.modules["decimal"] = cdecimal
While the global patch is a little ugly, it's particularly
important to use just one decimal library at a time since
Python Decimal and cdecimal Decimal objects
are not currently compatible *with each other*::
>>> import cdecimal
>>> import decimal
>>> decimal.Decimal("10") == cdecimal.Decimal("10")
False
SQLAlchemy will provide more natural support of
cdecimal if and when it becomes a standard part of Python
installations and is supported by all DBAPIs.
"""
__visit_name__ = 'numeric'
_default_decimal_return_scale = 10
def __init__(self, precision=None, scale=None,
decimal_return_scale=None, asdecimal=True):
"""
Construct a Numeric.
:param precision: the numeric precision for use in DDL ``CREATE
TABLE``.
:param scale: the numeric scale for use in DDL ``CREATE TABLE``.
:param asdecimal: default True. Return whether or not
values should be sent as Python Decimal objects, or
as floats. Different DBAPIs send one or the other based on
datatypes - the Numeric type will ensure that return values
are one or the other across DBAPIs consistently.
:param decimal_return_scale: Default scale to use when converting
from floats to Python decimals. Floating point values will typically
be much longer due to decimal inaccuracy, and most floating point
database types don't have a notion of "scale", so by default the
float type looks for the first ten decimal places when converting.
Specfiying this value will override that length. Types which
do include an explicit ".scale" value, such as the base :class:`.Numeric`
as well as the MySQL float types, will use the value of ".scale"
as the default for decimal_return_scale, if not otherwise specified.
.. versionadded:: 0.9.0
When using the ``Numeric`` type, care should be taken to ensure
that the asdecimal setting is apppropriate for the DBAPI in use -
when Numeric applies a conversion from Decimal->float or float->
Decimal, this conversion incurs an additional performance overhead
for all result columns received.
DBAPIs that return Decimal natively (e.g. psycopg2) will have
better accuracy and higher performance with a setting of ``True``,
as the native translation to Decimal reduces the amount of floating-
point issues at play, and the Numeric type itself doesn't need
to apply any further conversions. However, another DBAPI which
returns floats natively *will* incur an additional conversion
overhead, and is still subject to floating point data loss - in
which case ``asdecimal=False`` will at least remove the extra
conversion overhead.
"""
self.precision = precision
self.scale = scale
self.decimal_return_scale = decimal_return_scale
self.asdecimal = asdecimal
@property
def _effective_decimal_return_scale(self):
if self.decimal_return_scale is not None:
return self.decimal_return_scale
elif getattr(self, "scale", None) is not None:
return self.scale
else:
return self._default_decimal_return_scale
def get_dbapi_type(self, dbapi):
return dbapi.NUMBER
def literal_processor(self, dialect):
def process(value):
return str(value)
return process
@property
def python_type(self):
if self.asdecimal:
return decimal.Decimal
else:
return float
def bind_processor(self, dialect):
if dialect.supports_native_decimal:
return None
else:
return processors.to_float
def result_processor(self, dialect, coltype):
if self.asdecimal:
if dialect.supports_native_decimal:
# we're a "numeric", DBAPI will give us Decimal directly
return None
else:
util.warn('Dialect %s+%s does *not* support Decimal '
'objects natively, and SQLAlchemy must '
'convert from floating point - rounding '
'errors and other issues may occur. Please '
'consider storing Decimal numbers as strings '
'or integers on this platform for lossless '
'storage.' % (dialect.name, dialect.driver))
# we're a "numeric", DBAPI returns floats, convert.
return processors.to_decimal_processor_factory(
decimal.Decimal,
self.scale if self.scale is not None
else self._default_decimal_return_scale)
else:
if dialect.supports_native_decimal:
return processors.to_float
else:
return None
@util.memoized_property
def _expression_adaptations(self):
return {
operators.mul: {
Interval: Interval,
Numeric: self.__class__,
Integer: self.__class__,
},
operators.div: {
Numeric: self.__class__,
Integer: self.__class__,
},
operators.truediv: {
Numeric: self.__class__,
Integer: self.__class__,
},
operators.add: {
Numeric: self.__class__,
Integer: self.__class__,
},
operators.sub: {
Numeric: self.__class__,
Integer: self.__class__,
}
}
class Float(Numeric):
"""A type for ``float`` numbers.
Returns Python ``float`` objects by default, applying
conversion as needed.
"""
__visit_name__ = 'float'
scale = None
def __init__(self, precision=None, asdecimal=False,
decimal_return_scale=None, **kwargs):
"""
Construct a Float.
:param precision: the numeric precision for use in DDL ``CREATE
TABLE``.
:param asdecimal: the same flag as that of :class:`.Numeric`, but
defaults to ``False``. Note that setting this flag to ``True``
results in floating point conversion.
:param decimal_return_scale: Default scale to use when converting
from floats to Python decimals. Floating point values will typically
be much longer due to decimal inaccuracy, and most floating point
database types don't have a notion of "scale", so by default the
float type looks for the first ten decimal places when converting.
Specfiying this value will override that length. Note that the
MySQL float types, which do include "scale", will use "scale"
as the default for decimal_return_scale, if not otherwise specified.
.. versionadded:: 0.9.0
:param \**kwargs: deprecated. Additional arguments here are ignored
by the default :class:`.Float` type. For database specific
floats that support additional arguments, see that dialect's
documentation for details, such as
:class:`sqlalchemy.dialects.mysql.FLOAT`.
"""
self.precision = precision
self.asdecimal = asdecimal
self.decimal_return_scale = decimal_return_scale
if kwargs:
util.warn_deprecated("Additional keyword arguments "
"passed to Float ignored.")
def result_processor(self, dialect, coltype):
if self.asdecimal:
return processors.to_decimal_processor_factory(
decimal.Decimal,
self._effective_decimal_return_scale)
else:
return None
@util.memoized_property
def _expression_adaptations(self):
return {
operators.mul: {
Interval: Interval,
Numeric: self.__class__,
},
operators.div: {
Numeric: self.__class__,
},
operators.truediv: {
Numeric: self.__class__,
},
operators.add: {
Numeric: self.__class__,
},
operators.sub: {
Numeric: self.__class__,
}
}
class DateTime(_DateAffinity, TypeEngine):
"""A type for ``datetime.datetime()`` objects.
Date and time types return objects from the Python ``datetime``
module. Most DBAPIs have built in support for the datetime
module, with the noted exception of SQLite. In the case of
SQLite, date and time types are stored as strings which are then
converted back to datetime objects when rows are returned.
"""
__visit_name__ = 'datetime'
def __init__(self, timezone=False):
"""Construct a new :class:`.DateTime`.
:param timezone: boolean. If True, and supported by the
backend, will produce 'TIMESTAMP WITH TIMEZONE'. For backends
that don't support timezone aware timestamps, has no
effect.
"""
self.timezone = timezone
def get_dbapi_type(self, dbapi):
return dbapi.DATETIME
@property
def python_type(self):
return dt.datetime
@util.memoized_property
def _expression_adaptations(self):
return {
operators.add: {
Interval: self.__class__,
},
operators.sub: {
Interval: self.__class__,
DateTime: Interval,
},
}
class Date(_DateAffinity, TypeEngine):
"""A type for ``datetime.date()`` objects."""
__visit_name__ = 'date'
def get_dbapi_type(self, dbapi):
return dbapi.DATETIME
@property
def python_type(self):
return dt.date
@util.memoized_property
def _expression_adaptations(self):
return {
operators.add: {
Integer: self.__class__,
Interval: DateTime,
Time: DateTime,
},
operators.sub: {
# date - integer = date
Integer: self.__class__,
# date - date = integer.
Date: Integer,
Interval: DateTime,
# date - datetime = interval,
# this one is not in the PG docs
# but works
DateTime: Interval,
},
}
class Time(_DateAffinity, TypeEngine):
"""A type for ``datetime.time()`` objects."""
__visit_name__ = 'time'
def __init__(self, timezone=False):
self.timezone = timezone
def get_dbapi_type(self, dbapi):
return dbapi.DATETIME
@property
def python_type(self):
return dt.time
@util.memoized_property
def _expression_adaptations(self):
return {
operators.add: {
Date: DateTime,
Interval: self.__class__
},
operators.sub: {
Time: Interval,
Interval: self.__class__,
},
}
class _Binary(TypeEngine):
"""Define base behavior for binary types."""
def __init__(self, length=None):
self.length = length
def literal_processor(self, dialect):
def process(value):
value = value.decode(self.dialect.encoding).replace("'", "''")
return "'%s'" % value
return process
@property
def python_type(self):
return util.binary_type
# Python 3 - sqlite3 doesn't need the `Binary` conversion
# here, though pg8000 does to indicate "bytea"
def bind_processor(self, dialect):
DBAPIBinary = dialect.dbapi.Binary
def process(value):
if value is not None:
return DBAPIBinary(value)
else:
return None
return process
# Python 3 has native bytes() type
# both sqlite3 and pg8000 seem to return it,
# psycopg2 as of 2.5 returns 'memoryview'
if util.py2k:
def result_processor(self, dialect, coltype):
if util.jython:
def process(value):
if value is not None:
if isinstance(value, array.array):
return value.tostring()
return str(value)
else:
return None
else:
process = processors.to_str
return process
else:
def result_processor(self, dialect, coltype):
def process(value):
if value is not None:
value = bytes(value)
return value
return process
def coerce_compared_value(self, op, value):
"""See :meth:`.TypeEngine.coerce_compared_value` for a description."""
if isinstance(value, util.string_types):
return self
else:
return super(_Binary, self).coerce_compared_value(op, value)
def get_dbapi_type(self, dbapi):
return dbapi.BINARY
class LargeBinary(_Binary):
"""A type for large binary byte data.
The Binary type generates BLOB or BYTEA when tables are created,
and also converts incoming values using the ``Binary`` callable
provided by each DB-API.
"""
__visit_name__ = 'large_binary'
def __init__(self, length=None):
"""
Construct a LargeBinary type.
:param length: optional, a length for the column for use in
DDL statements, for those BLOB types that accept a length
(i.e. MySQL). It does *not* produce a small BINARY/VARBINARY
type - use the BINARY/VARBINARY types specifically for those.
May be safely omitted if no ``CREATE
TABLE`` will be issued. Certain databases may require a
*length* for use in DDL, and will raise an exception when
the ``CREATE TABLE`` DDL is issued.
"""
_Binary.__init__(self, length=length)
class Binary(LargeBinary):
"""Deprecated. Renamed to LargeBinary."""
def __init__(self, *arg, **kw):
util.warn_deprecated('The Binary type has been renamed to '
'LargeBinary.')
LargeBinary.__init__(self, *arg, **kw)
class SchemaType(SchemaEventTarget):
"""Mark a type as possibly requiring schema-level DDL for usage.
Supports types that must be explicitly created/dropped (i.e. PG ENUM type)
as well as types that are complimented by table or schema level
constraints, triggers, and other rules.
:class:`.SchemaType` classes can also be targets for the
:meth:`.DDLEvents.before_parent_attach` and
:meth:`.DDLEvents.after_parent_attach` events, where the events fire off
surrounding the association of the type object with a parent
:class:`.Column`.
.. seealso::
:class:`.Enum`
:class:`.Boolean`
"""
def __init__(self, **kw):
name = kw.pop('name', None)
if name is not None:
self.name = quoted_name(name, kw.pop('quote', None))
else:
self.name = None
self.schema = kw.pop('schema', None)
self.metadata = kw.pop('metadata', None)
self.inherit_schema = kw.pop('inherit_schema', False)
if self.metadata:
event.listen(
self.metadata,
"before_create",
util.portable_instancemethod(self._on_metadata_create)
)
event.listen(
self.metadata,
"after_drop",
util.portable_instancemethod(self._on_metadata_drop)
)
def _set_parent(self, column):
column._on_table_attach(util.portable_instancemethod(self._set_table))
def _set_table(self, column, table):
if self.inherit_schema:
self.schema = table.schema
event.listen(
table,
"before_create",
util.portable_instancemethod(
self._on_table_create)
)
event.listen(
table,
"after_drop",
util.portable_instancemethod(self._on_table_drop)
)
if self.metadata is None:
# TODO: what's the difference between self.metadata
# and table.metadata here ?
event.listen(
table.metadata,
"before_create",
util.portable_instancemethod(self._on_metadata_create)
)
event.listen(
table.metadata,
"after_drop",
util.portable_instancemethod(self._on_metadata_drop)
)
def copy(self, **kw):
return self.adapt(self.__class__)
def adapt(self, impltype, **kw):
schema = kw.pop('schema', self.schema)
metadata = kw.pop('metadata', self.metadata)
return impltype(name=self.name,
schema=schema,
metadata=metadata,
inherit_schema=self.inherit_schema,
**kw
)
@property
def bind(self):
return self.metadata and self.metadata.bind or None
def create(self, bind=None, checkfirst=False):
"""Issue CREATE ddl for this type, if applicable."""
if bind is None:
bind = _bind_or_error(self)
t = self.dialect_impl(bind.dialect)
if t.__class__ is not self.__class__ and isinstance(t, SchemaType):
t.create(bind=bind, checkfirst=checkfirst)
def drop(self, bind=None, checkfirst=False):
"""Issue DROP ddl for this type, if applicable."""
if bind is None:
bind = _bind_or_error(self)
t = self.dialect_impl(bind.dialect)
if t.__class__ is not self.__class__ and isinstance(t, SchemaType):
t.drop(bind=bind, checkfirst=checkfirst)
def _on_table_create(self, target, bind, **kw):
t = self.dialect_impl(bind.dialect)
if t.__class__ is not self.__class__ and isinstance(t, SchemaType):
t._on_table_create(target, bind, **kw)
def _on_table_drop(self, target, bind, **kw):
t = self.dialect_impl(bind.dialect)
if t.__class__ is not self.__class__ and isinstance(t, SchemaType):
t._on_table_drop(target, bind, **kw)
def _on_metadata_create(self, target, bind, **kw):
t = self.dialect_impl(bind.dialect)
if t.__class__ is not self.__class__ and isinstance(t, SchemaType):
t._on_metadata_create(target, bind, **kw)
def _on_metadata_drop(self, target, bind, **kw):
t = self.dialect_impl(bind.dialect)
if t.__class__ is not self.__class__ and isinstance(t, SchemaType):
t._on_metadata_drop(target, bind, **kw)
class Enum(String, SchemaType):
"""Generic Enum Type.
The Enum type provides a set of possible string values which the
column is constrained towards.
By default, uses the backend's native ENUM type if available,
else uses VARCHAR + a CHECK constraint.
.. seealso::
:class:`~.postgresql.ENUM` - PostgreSQL-specific type,
which has additional functionality.
"""
__visit_name__ = 'enum'
def __init__(self, *enums, **kw):
"""Construct an enum.
Keyword arguments which don't apply to a specific backend are ignored
by that backend.
:param \*enums: string or unicode enumeration labels. If unicode
labels are present, the `convert_unicode` flag is auto-enabled.
:param convert_unicode: Enable unicode-aware bind parameter and
result-set processing for this Enum's data. This is set
automatically based on the presence of unicode label strings.
:param metadata: Associate this type directly with a ``MetaData``
object. For types that exist on the target database as an
independent schema construct (Postgresql), this type will be
created and dropped within ``create_all()`` and ``drop_all()``
operations. If the type is not associated with any ``MetaData``
object, it will associate itself with each ``Table`` in which it is
used, and will be created when any of those individual tables are
created, after a check is performed for it's existence. The type is
only dropped when ``drop_all()`` is called for that ``Table``
object's metadata, however.
:param name: The name of this type. This is required for Postgresql
and any future supported database which requires an explicitly
named type, or an explicitly named constraint in order to generate
the type and/or a table that uses it.
:param native_enum: Use the database's native ENUM type when
available. Defaults to True. When False, uses VARCHAR + check
constraint for all backends.
:param schema: Schema name of this type. For types that exist on the
target database as an independent schema construct (Postgresql),
this parameter specifies the named schema in which the type is
present.
.. note::
The ``schema`` of the :class:`.Enum` type does not
by default make use of the ``schema`` established on the
owning :class:`.Table`. If this behavior is desired,
set the ``inherit_schema`` flag to ``True``.
:param quote: Set explicit quoting preferences for the type's name.
:param inherit_schema: When ``True``, the "schema" from the owning
:class:`.Table` will be copied to the "schema" attribute of this
:class:`.Enum`, replacing whatever value was passed for the
``schema`` attribute. This also takes effect when using the
:meth:`.Table.tometadata` operation.
.. versionadded:: 0.8
"""
self.enums = enums
self.native_enum = kw.pop('native_enum', True)
convert_unicode = kw.pop('convert_unicode', None)
if convert_unicode is None:
for e in enums:
if isinstance(e, util.text_type):
convert_unicode = True
break
else:
convert_unicode = False
if self.enums:
length = max(len(x) for x in self.enums)
else:
length = 0
String.__init__(self,
length=length,
convert_unicode=convert_unicode,
)
SchemaType.__init__(self, **kw)
def __repr__(self):
return util.generic_repr(self, [
("native_enum", True),
("name", None)
])
def _should_create_constraint(self, compiler):
return not self.native_enum or \
not compiler.dialect.supports_native_enum
@util.dependencies("sqlalchemy.sql.schema")
def _set_table(self, schema, column, table):
if self.native_enum:
SchemaType._set_table(self, column, table)
e = schema.CheckConstraint(
type_coerce(column, self).in_(self.enums),
name=self.name,
_create_rule=util.portable_instancemethod(
self._should_create_constraint)
)
table.append_constraint(e)
def adapt(self, impltype, **kw):
schema = kw.pop('schema', self.schema)
metadata = kw.pop('metadata', self.metadata)
if issubclass(impltype, Enum):
return impltype(name=self.name,
schema=schema,
metadata=metadata,
convert_unicode=self.convert_unicode,
native_enum=self.native_enum,
inherit_schema=self.inherit_schema,
*self.enums,
**kw
)
else:
return super(Enum, self).adapt(impltype, **kw)
class PickleType(TypeDecorator):
"""Holds Python objects, which are serialized using pickle.
PickleType builds upon the Binary type to apply Python's
``pickle.dumps()`` to incoming objects, and ``pickle.loads()`` on
the way out, allowing any pickleable Python object to be stored as
a serialized binary field.
To allow ORM change events to propagate for elements associated
with :class:`.PickleType`, see :ref:`mutable_toplevel`.
"""
impl = LargeBinary
def __init__(self, protocol=pickle.HIGHEST_PROTOCOL,
pickler=None, comparator=None):
"""
Construct a PickleType.
:param protocol: defaults to ``pickle.HIGHEST_PROTOCOL``.
:param pickler: defaults to cPickle.pickle or pickle.pickle if
cPickle is not available. May be any object with
pickle-compatible ``dumps` and ``loads`` methods.
:param comparator: a 2-arg callable predicate used
to compare values of this type. If left as ``None``,
the Python "equals" operator is used to compare values.
"""
self.protocol = protocol
self.pickler = pickler or pickle
self.comparator = comparator
super(PickleType, self).__init__()
def __reduce__(self):
return PickleType, (self.protocol,
None,
self.comparator)
def bind_processor(self, dialect):
impl_processor = self.impl.bind_processor(dialect)
dumps = self.pickler.dumps
protocol = self.protocol
if impl_processor:
def process(value):
if value is not None:
value = dumps(value, protocol)
return impl_processor(value)
else:
def process(value):
if value is not None:
value = dumps(value, protocol)
return value
return process
def result_processor(self, dialect, coltype):
impl_processor = self.impl.result_processor(dialect, coltype)
loads = self.pickler.loads
if impl_processor:
def process(value):
value = impl_processor(value)
if value is None:
return None
return loads(value)
else:
def process(value):
if value is None:
return None
return loads(value)
return process
def compare_values(self, x, y):
if self.comparator:
return self.comparator(x, y)
else:
return x == y
class Boolean(TypeEngine, SchemaType):
"""A bool datatype.
Boolean typically uses BOOLEAN or SMALLINT on the DDL side, and on
the Python side deals in ``True`` or ``False``.
"""
__visit_name__ = 'boolean'
def __init__(self, create_constraint=True, name=None):
"""Construct a Boolean.
:param create_constraint: defaults to True. If the boolean
is generated as an int/smallint, also create a CHECK constraint
on the table that ensures 1 or 0 as a value.
:param name: if a CHECK constraint is generated, specify
the name of the constraint.
"""
self.create_constraint = create_constraint
self.name = name
def _should_create_constraint(self, compiler):
return not compiler.dialect.supports_native_boolean
@util.dependencies("sqlalchemy.sql.schema")
def _set_table(self, schema, column, table):
if not self.create_constraint:
return
e = schema.CheckConstraint(
type_coerce(column, self).in_([0, 1]),
name=self.name,
_create_rule=util.portable_instancemethod(
self._should_create_constraint)
)
table.append_constraint(e)
@property
def python_type(self):
return bool
def bind_processor(self, dialect):
if dialect.supports_native_boolean:
return None
else:
return processors.boolean_to_int
def result_processor(self, dialect, coltype):
if dialect.supports_native_boolean:
return None
else:
return processors.int_to_boolean
class Interval(_DateAffinity, TypeDecorator):
"""A type for ``datetime.timedelta()`` objects.
The Interval type deals with ``datetime.timedelta`` objects. In
PostgreSQL, the native ``INTERVAL`` type is used; for others, the
value is stored as a date which is relative to the "epoch"
(Jan. 1, 1970).
Note that the ``Interval`` type does not currently provide date arithmetic
operations on platforms which do not support interval types natively. Such
operations usually require transformation of both sides of the expression
(such as, conversion of both sides into integer epoch values first) which
currently is a manual procedure (such as via
:attr:`~sqlalchemy.sql.expression.func`).
"""
impl = DateTime
epoch = dt.datetime.utcfromtimestamp(0)
def __init__(self, native=True,
second_precision=None,
day_precision=None):
"""Construct an Interval object.
:param native: when True, use the actual
INTERVAL type provided by the database, if
supported (currently Postgresql, Oracle).
Otherwise, represent the interval data as
an epoch value regardless.
:param second_precision: For native interval types
which support a "fractional seconds precision" parameter,
i.e. Oracle and Postgresql
:param day_precision: for native interval types which
support a "day precision" parameter, i.e. Oracle.
"""
super(Interval, self).__init__()
self.native = native
self.second_precision = second_precision
self.day_precision = day_precision
def adapt(self, cls, **kw):
if self.native and hasattr(cls, '_adapt_from_generic_interval'):
return cls._adapt_from_generic_interval(self, **kw)
else:
return self.__class__(
native=self.native,
second_precision=self.second_precision,
day_precision=self.day_precision,
**kw)
@property
def python_type(self):
return dt.timedelta
def bind_processor(self, dialect):
impl_processor = self.impl.bind_processor(dialect)
epoch = self.epoch
if impl_processor:
def process(value):
if value is not None:
value = epoch + value
return impl_processor(value)
else:
def process(value):
if value is not None:
value = epoch + value
return value
return process
def result_processor(self, dialect, coltype):
impl_processor = self.impl.result_processor(dialect, coltype)
epoch = self.epoch
if impl_processor:
def process(value):
value = impl_processor(value)
if value is None:
return None
return value - epoch
else:
def process(value):
if value is None:
return None
return value - epoch
return process
@util.memoized_property
def _expression_adaptations(self):
return {
operators.add: {
Date: DateTime,
Interval: self.__class__,
DateTime: DateTime,
Time: Time,
},
operators.sub: {
Interval: self.__class__
},
operators.mul: {
Numeric: self.__class__
},
operators.truediv: {
Numeric: self.__class__
},
operators.div: {
Numeric: self.__class__
}
}
@property
def _type_affinity(self):
return Interval
def coerce_compared_value(self, op, value):
"""See :meth:`.TypeEngine.coerce_compared_value` for a description."""
return self.impl.coerce_compared_value(op, value)
class REAL(Float):
"""The SQL REAL type."""
__visit_name__ = 'REAL'
class FLOAT(Float):
"""The SQL FLOAT type."""
__visit_name__ = 'FLOAT'
class NUMERIC(Numeric):
"""The SQL NUMERIC type."""
__visit_name__ = 'NUMERIC'
class DECIMAL(Numeric):
"""The SQL DECIMAL type."""
__visit_name__ = 'DECIMAL'
class INTEGER(Integer):
"""The SQL INT or INTEGER type."""
__visit_name__ = 'INTEGER'
INT = INTEGER
class SMALLINT(SmallInteger):
"""The SQL SMALLINT type."""
__visit_name__ = 'SMALLINT'
class BIGINT(BigInteger):
"""The SQL BIGINT type."""
__visit_name__ = 'BIGINT'
class TIMESTAMP(DateTime):
"""The SQL TIMESTAMP type."""
__visit_name__ = 'TIMESTAMP'
def get_dbapi_type(self, dbapi):
return dbapi.TIMESTAMP
class DATETIME(DateTime):
"""The SQL DATETIME type."""
__visit_name__ = 'DATETIME'
class DATE(Date):
"""The SQL DATE type."""
__visit_name__ = 'DATE'
class TIME(Time):
"""The SQL TIME type."""
__visit_name__ = 'TIME'
class TEXT(Text):
"""The SQL TEXT type."""
__visit_name__ = 'TEXT'
class CLOB(Text):
"""The CLOB type.
This type is found in Oracle and Informix.
"""
__visit_name__ = 'CLOB'
class VARCHAR(String):
"""The SQL VARCHAR type."""
__visit_name__ = 'VARCHAR'
class NVARCHAR(Unicode):
"""The SQL NVARCHAR type."""
__visit_name__ = 'NVARCHAR'
class CHAR(String):
"""The SQL CHAR type."""
__visit_name__ = 'CHAR'
class NCHAR(Unicode):
"""The SQL NCHAR type."""
__visit_name__ = 'NCHAR'
class BLOB(LargeBinary):
"""The SQL BLOB type."""
__visit_name__ = 'BLOB'
class BINARY(_Binary):
"""The SQL BINARY type."""
__visit_name__ = 'BINARY'
class VARBINARY(_Binary):
"""The SQL VARBINARY type."""
__visit_name__ = 'VARBINARY'
class BOOLEAN(Boolean):
"""The SQL BOOLEAN type."""
__visit_name__ = 'BOOLEAN'
class NullType(TypeEngine):
"""An unknown type.
:class:`.NullType` is used as a default type for those cases where
a type cannot be determined, including:
* During table reflection, when the type of a column is not recognized
by the :class:`.Dialect`
* When constructing SQL expressions using plain Python objects of
unknown types (e.g. ``somecolumn == my_special_object``)
* When a new :class:`.Column` is created, and the given type is passed
as ``None`` or is not passed at all.
The :class:`.NullType` can be used within SQL expression invocation
without issue, it just has no behavior either at the expression construction
level or at the bind-parameter/result processing level. :class:`.NullType`
will result in a :exc:`.CompileError` if the compiler is asked to render
the type itself, such as if it is used in a :func:`.cast` operation
or within a schema creation operation such as that invoked by
:meth:`.MetaData.create_all` or the :class:`.CreateTable` construct.
"""
__visit_name__ = 'null'
_isnull = True
def literal_processor(self, dialect):
def process(value):
return "NULL"
return process
class Comparator(TypeEngine.Comparator):
def _adapt_expression(self, op, other_comparator):
if isinstance(other_comparator, NullType.Comparator) or \
not operators.is_commutative(op):
return op, self.expr.type
else:
return other_comparator._adapt_expression(op, self)
comparator_factory = Comparator
NULLTYPE = NullType()
BOOLEANTYPE = Boolean()
STRINGTYPE = String()
INTEGERTYPE = Integer()
_type_map = {
int: Integer(),
float: Numeric(),
bool: BOOLEANTYPE,
decimal.Decimal: Numeric(),
dt.date: Date(),
dt.datetime: DateTime(),
dt.time: Time(),
dt.timedelta: Interval(),
util.NoneType: NULLTYPE
}
if util.py3k:
_type_map[bytes] = LargeBinary()
_type_map[str] = Unicode()
else:
_type_map[unicode] = Unicode()
_type_map[str] = String()
# back-assign to type_api
from . import type_api
type_api.BOOLEANTYPE = BOOLEANTYPE
type_api.STRINGTYPE = STRINGTYPE
type_api.INTEGERTYPE = INTEGERTYPE
type_api.NULLTYPE = NULLTYPE
type_api._type_map = _type_map
# this one, there's all kinds of ways to play it, but at the EOD
# there's just a giant dependency cycle between the typing system and
# the expression element system, as you might expect. We can use
# importlaters or whatnot, but the typing system just necessarily has
# to have some kind of connection like this. right now we're injecting the
# _DefaultColumnComparator implementation into the TypeEngine.Comparator interface.
# Alternatively TypeEngine.Comparator could have an "impl" injected, though
# just injecting the base is simpler, error free, and more performant.
class Comparator(_DefaultColumnComparator):
BOOLEANTYPE = BOOLEANTYPE
TypeEngine.Comparator.__bases__ = (Comparator, ) + TypeEngine.Comparator.__bases__
| 33.115692 | 93 | 0.607474 |
import datetime as dt
import codecs
from .type_api import TypeEngine, TypeDecorator, to_instance
from .elements import quoted_name, type_coerce
from .default_comparator import _DefaultColumnComparator
from .. import exc, util, processors
from .base import _bind_or_error, SchemaEventTarget
from . import operators
from .. import event
from ..util import pickle
import decimal
if util.jython:
import array
class _DateAffinity(object):
@property
def _expression_adaptations(self):
raise NotImplementedError()
class Comparator(TypeEngine.Comparator):
_blank_dict = util.immutabledict()
def _adapt_expression(self, op, other_comparator):
othertype = other_comparator.type._type_affinity
return op, \
to_instance(self.type._expression_adaptations.get(op, self._blank_dict).\
get(othertype, NULLTYPE))
comparator_factory = Comparator
class Concatenable(object):
class Comparator(TypeEngine.Comparator):
def _adapt_expression(self, op, other_comparator):
if op is operators.add and isinstance(other_comparator,
(Concatenable.Comparator, NullType.Comparator)):
return operators.concat_op, self.expr.type
else:
return op, self.expr.type
comparator_factory = Comparator
class String(Concatenable, TypeEngine):
__visit_name__ = 'string'
def __init__(self, length=None, collation=None,
convert_unicode=False,
unicode_error=None,
_warn_on_bytestring=False
):
if unicode_error is not None and convert_unicode != 'force':
raise exc.ArgumentError("convert_unicode must be 'force' "
"when unicode_error is set.")
self.length = length
self.collation = collation
self.convert_unicode = convert_unicode
self.unicode_error = unicode_error
self._warn_on_bytestring = _warn_on_bytestring
def literal_processor(self, dialect):
def process(value):
value = value.replace("'", "''")
return "'%s'" % value
return process
def bind_processor(self, dialect):
if self.convert_unicode or dialect.convert_unicode:
if dialect.supports_unicode_binds and \
self.convert_unicode != 'force':
if self._warn_on_bytestring:
def process(value):
if isinstance(value, util.binary_type):
util.warn("Unicode type received non-unicode bind "
"param value.")
return value
return process
else:
return None
else:
encoder = codecs.getencoder(dialect.encoding)
warn_on_bytestring = self._warn_on_bytestring
def process(value):
if isinstance(value, util.text_type):
return encoder(value, self.unicode_error)[0]
elif warn_on_bytestring and value is not None:
util.warn("Unicode type received non-unicode bind "
"param value")
return value
return process
else:
return None
def result_processor(self, dialect, coltype):
wants_unicode = self.convert_unicode or dialect.convert_unicode
needs_convert = wants_unicode and \
(dialect.returns_unicode_strings is not True or
self.convert_unicode == 'force')
if needs_convert:
to_unicode = processors.to_unicode_processor_factory(
dialect.encoding, self.unicode_error)
if dialect.returns_unicode_strings:
# we wouldn't be here unless convert_unicode='force'
def process(value):
if isinstance(value, util.text_type):
return value
else:
return to_unicode(value)
return process
else:
return to_unicode
else:
return None
@property
def python_type(self):
if self.convert_unicode:
return util.text_type
else:
return str
def get_dbapi_type(self, dbapi):
return dbapi.STRING
class Text(String):
__visit_name__ = 'text'
class Unicode(String):
__visit_name__ = 'unicode'
def __init__(self, length=None, **kwargs):
kwargs.setdefault('convert_unicode', True)
kwargs.setdefault('_warn_on_bytestring', True)
super(Unicode, self).__init__(length=length, **kwargs)
class UnicodeText(Text):
__visit_name__ = 'unicode_text'
def __init__(self, length=None, **kwargs):
kwargs.setdefault('convert_unicode', True)
kwargs.setdefault('_warn_on_bytestring', True)
super(UnicodeText, self).__init__(length=length, **kwargs)
class Integer(_DateAffinity, TypeEngine):
__visit_name__ = 'integer'
def get_dbapi_type(self, dbapi):
return dbapi.NUMBER
@property
def python_type(self):
return int
def literal_processor(self, dialect):
def process(value):
return str(value)
return process
@util.memoized_property
def _expression_adaptations(self):
return {
operators.add: {
Date: Date,
Integer: self.__class__,
Numeric: Numeric,
},
operators.mul: {
Interval: Interval,
Integer: self.__class__,
Numeric: Numeric,
},
operators.div: {
Integer: self.__class__,
Numeric: Numeric,
},
operators.truediv: {
Integer: self.__class__,
Numeric: Numeric,
},
operators.sub: {
Integer: self.__class__,
Numeric: Numeric,
},
}
class SmallInteger(Integer):
__visit_name__ = 'small_integer'
class BigInteger(Integer):
__visit_name__ = 'big_integer'
class Numeric(_DateAffinity, TypeEngine):
__visit_name__ = 'numeric'
_default_decimal_return_scale = 10
def __init__(self, precision=None, scale=None,
decimal_return_scale=None, asdecimal=True):
self.precision = precision
self.scale = scale
self.decimal_return_scale = decimal_return_scale
self.asdecimal = asdecimal
@property
def _effective_decimal_return_scale(self):
if self.decimal_return_scale is not None:
return self.decimal_return_scale
elif getattr(self, "scale", None) is not None:
return self.scale
else:
return self._default_decimal_return_scale
def get_dbapi_type(self, dbapi):
return dbapi.NUMBER
def literal_processor(self, dialect):
def process(value):
return str(value)
return process
@property
def python_type(self):
if self.asdecimal:
return decimal.Decimal
else:
return float
def bind_processor(self, dialect):
if dialect.supports_native_decimal:
return None
else:
return processors.to_float
def result_processor(self, dialect, coltype):
if self.asdecimal:
if dialect.supports_native_decimal:
return None
else:
util.warn('Dialect %s+%s does *not* support Decimal '
'objects natively, and SQLAlchemy must '
'convert from floating point - rounding '
'errors and other issues may occur. Please '
'consider storing Decimal numbers as strings '
'or integers on this platform for lossless '
'storage.' % (dialect.name, dialect.driver))
# we're a "numeric", DBAPI returns floats, convert.
return processors.to_decimal_processor_factory(
decimal.Decimal,
self.scale if self.scale is not None
else self._default_decimal_return_scale)
else:
if dialect.supports_native_decimal:
return processors.to_float
else:
return None
@util.memoized_property
def _expression_adaptations(self):
return {
operators.mul: {
Interval: Interval,
Numeric: self.__class__,
Integer: self.__class__,
},
operators.div: {
Numeric: self.__class__,
Integer: self.__class__,
},
operators.truediv: {
Numeric: self.__class__,
Integer: self.__class__,
},
operators.add: {
Numeric: self.__class__,
Integer: self.__class__,
},
operators.sub: {
Numeric: self.__class__,
Integer: self.__class__,
}
}
class Float(Numeric):
__visit_name__ = 'float'
scale = None
def __init__(self, precision=None, asdecimal=False,
decimal_return_scale=None, **kwargs):
self.precision = precision
self.asdecimal = asdecimal
self.decimal_return_scale = decimal_return_scale
if kwargs:
util.warn_deprecated("Additional keyword arguments "
"passed to Float ignored.")
def result_processor(self, dialect, coltype):
if self.asdecimal:
return processors.to_decimal_processor_factory(
decimal.Decimal,
self._effective_decimal_return_scale)
else:
return None
@util.memoized_property
def _expression_adaptations(self):
return {
operators.mul: {
Interval: Interval,
Numeric: self.__class__,
},
operators.div: {
Numeric: self.__class__,
},
operators.truediv: {
Numeric: self.__class__,
},
operators.add: {
Numeric: self.__class__,
},
operators.sub: {
Numeric: self.__class__,
}
}
class DateTime(_DateAffinity, TypeEngine):
__visit_name__ = 'datetime'
def __init__(self, timezone=False):
self.timezone = timezone
def get_dbapi_type(self, dbapi):
return dbapi.DATETIME
@property
def python_type(self):
return dt.datetime
@util.memoized_property
def _expression_adaptations(self):
return {
operators.add: {
Interval: self.__class__,
},
operators.sub: {
Interval: self.__class__,
DateTime: Interval,
},
}
class Date(_DateAffinity, TypeEngine):
__visit_name__ = 'date'
def get_dbapi_type(self, dbapi):
return dbapi.DATETIME
@property
def python_type(self):
return dt.date
@util.memoized_property
def _expression_adaptations(self):
return {
operators.add: {
Integer: self.__class__,
Interval: DateTime,
Time: DateTime,
},
operators.sub: {
Integer: self.__class__,
Date: Integer,
Interval: DateTime,
DateTime: Interval,
},
}
class Time(_DateAffinity, TypeEngine):
__visit_name__ = 'time'
def __init__(self, timezone=False):
self.timezone = timezone
def get_dbapi_type(self, dbapi):
return dbapi.DATETIME
@property
def python_type(self):
return dt.time
@util.memoized_property
def _expression_adaptations(self):
return {
operators.add: {
Date: DateTime,
Interval: self.__class__
},
operators.sub: {
Time: Interval,
Interval: self.__class__,
},
}
class _Binary(TypeEngine):
def __init__(self, length=None):
self.length = length
def literal_processor(self, dialect):
def process(value):
value = value.decode(self.dialect.encoding).replace("'", "''")
return "'%s'" % value
return process
@property
def python_type(self):
return util.binary_type
# Python 3 - sqlite3 doesn't need the `Binary` conversion
def bind_processor(self, dialect):
DBAPIBinary = dialect.dbapi.Binary
def process(value):
if value is not None:
return DBAPIBinary(value)
else:
return None
return process
if util.py2k:
def result_processor(self, dialect, coltype):
if util.jython:
def process(value):
if value is not None:
if isinstance(value, array.array):
return value.tostring()
return str(value)
else:
return None
else:
process = processors.to_str
return process
else:
def result_processor(self, dialect, coltype):
def process(value):
if value is not None:
value = bytes(value)
return value
return process
def coerce_compared_value(self, op, value):
if isinstance(value, util.string_types):
return self
else:
return super(_Binary, self).coerce_compared_value(op, value)
def get_dbapi_type(self, dbapi):
return dbapi.BINARY
class LargeBinary(_Binary):
__visit_name__ = 'large_binary'
def __init__(self, length=None):
_Binary.__init__(self, length=length)
class Binary(LargeBinary):
def __init__(self, *arg, **kw):
util.warn_deprecated('The Binary type has been renamed to '
'LargeBinary.')
LargeBinary.__init__(self, *arg, **kw)
class SchemaType(SchemaEventTarget):
def __init__(self, **kw):
name = kw.pop('name', None)
if name is not None:
self.name = quoted_name(name, kw.pop('quote', None))
else:
self.name = None
self.schema = kw.pop('schema', None)
self.metadata = kw.pop('metadata', None)
self.inherit_schema = kw.pop('inherit_schema', False)
if self.metadata:
event.listen(
self.metadata,
"before_create",
util.portable_instancemethod(self._on_metadata_create)
)
event.listen(
self.metadata,
"after_drop",
util.portable_instancemethod(self._on_metadata_drop)
)
def _set_parent(self, column):
column._on_table_attach(util.portable_instancemethod(self._set_table))
def _set_table(self, column, table):
if self.inherit_schema:
self.schema = table.schema
event.listen(
table,
"before_create",
util.portable_instancemethod(
self._on_table_create)
)
event.listen(
table,
"after_drop",
util.portable_instancemethod(self._on_table_drop)
)
if self.metadata is None:
# and table.metadata here ?
event.listen(
table.metadata,
"before_create",
util.portable_instancemethod(self._on_metadata_create)
)
event.listen(
table.metadata,
"after_drop",
util.portable_instancemethod(self._on_metadata_drop)
)
def copy(self, **kw):
return self.adapt(self.__class__)
def adapt(self, impltype, **kw):
schema = kw.pop('schema', self.schema)
metadata = kw.pop('metadata', self.metadata)
return impltype(name=self.name,
schema=schema,
metadata=metadata,
inherit_schema=self.inherit_schema,
**kw
)
@property
def bind(self):
return self.metadata and self.metadata.bind or None
def create(self, bind=None, checkfirst=False):
if bind is None:
bind = _bind_or_error(self)
t = self.dialect_impl(bind.dialect)
if t.__class__ is not self.__class__ and isinstance(t, SchemaType):
t.create(bind=bind, checkfirst=checkfirst)
def drop(self, bind=None, checkfirst=False):
if bind is None:
bind = _bind_or_error(self)
t = self.dialect_impl(bind.dialect)
if t.__class__ is not self.__class__ and isinstance(t, SchemaType):
t.drop(bind=bind, checkfirst=checkfirst)
def _on_table_create(self, target, bind, **kw):
t = self.dialect_impl(bind.dialect)
if t.__class__ is not self.__class__ and isinstance(t, SchemaType):
t._on_table_create(target, bind, **kw)
def _on_table_drop(self, target, bind, **kw):
t = self.dialect_impl(bind.dialect)
if t.__class__ is not self.__class__ and isinstance(t, SchemaType):
t._on_table_drop(target, bind, **kw)
def _on_metadata_create(self, target, bind, **kw):
t = self.dialect_impl(bind.dialect)
if t.__class__ is not self.__class__ and isinstance(t, SchemaType):
t._on_metadata_create(target, bind, **kw)
def _on_metadata_drop(self, target, bind, **kw):
t = self.dialect_impl(bind.dialect)
if t.__class__ is not self.__class__ and isinstance(t, SchemaType):
t._on_metadata_drop(target, bind, **kw)
class Enum(String, SchemaType):
__visit_name__ = 'enum'
def __init__(self, *enums, **kw):
self.enums = enums
self.native_enum = kw.pop('native_enum', True)
convert_unicode = kw.pop('convert_unicode', None)
if convert_unicode is None:
for e in enums:
if isinstance(e, util.text_type):
convert_unicode = True
break
else:
convert_unicode = False
if self.enums:
length = max(len(x) for x in self.enums)
else:
length = 0
String.__init__(self,
length=length,
convert_unicode=convert_unicode,
)
SchemaType.__init__(self, **kw)
def __repr__(self):
return util.generic_repr(self, [
("native_enum", True),
("name", None)
])
def _should_create_constraint(self, compiler):
return not self.native_enum or \
not compiler.dialect.supports_native_enum
@util.dependencies("sqlalchemy.sql.schema")
def _set_table(self, schema, column, table):
if self.native_enum:
SchemaType._set_table(self, column, table)
e = schema.CheckConstraint(
type_coerce(column, self).in_(self.enums),
name=self.name,
_create_rule=util.portable_instancemethod(
self._should_create_constraint)
)
table.append_constraint(e)
def adapt(self, impltype, **kw):
schema = kw.pop('schema', self.schema)
metadata = kw.pop('metadata', self.metadata)
if issubclass(impltype, Enum):
return impltype(name=self.name,
schema=schema,
metadata=metadata,
convert_unicode=self.convert_unicode,
native_enum=self.native_enum,
inherit_schema=self.inherit_schema,
*self.enums,
**kw
)
else:
return super(Enum, self).adapt(impltype, **kw)
class PickleType(TypeDecorator):
impl = LargeBinary
def __init__(self, protocol=pickle.HIGHEST_PROTOCOL,
pickler=None, comparator=None):
self.protocol = protocol
self.pickler = pickler or pickle
self.comparator = comparator
super(PickleType, self).__init__()
def __reduce__(self):
return PickleType, (self.protocol,
None,
self.comparator)
def bind_processor(self, dialect):
impl_processor = self.impl.bind_processor(dialect)
dumps = self.pickler.dumps
protocol = self.protocol
if impl_processor:
def process(value):
if value is not None:
value = dumps(value, protocol)
return impl_processor(value)
else:
def process(value):
if value is not None:
value = dumps(value, protocol)
return value
return process
def result_processor(self, dialect, coltype):
impl_processor = self.impl.result_processor(dialect, coltype)
loads = self.pickler.loads
if impl_processor:
def process(value):
value = impl_processor(value)
if value is None:
return None
return loads(value)
else:
def process(value):
if value is None:
return None
return loads(value)
return process
def compare_values(self, x, y):
if self.comparator:
return self.comparator(x, y)
else:
return x == y
class Boolean(TypeEngine, SchemaType):
__visit_name__ = 'boolean'
def __init__(self, create_constraint=True, name=None):
self.create_constraint = create_constraint
self.name = name
def _should_create_constraint(self, compiler):
return not compiler.dialect.supports_native_boolean
@util.dependencies("sqlalchemy.sql.schema")
def _set_table(self, schema, column, table):
if not self.create_constraint:
return
e = schema.CheckConstraint(
type_coerce(column, self).in_([0, 1]),
name=self.name,
_create_rule=util.portable_instancemethod(
self._should_create_constraint)
)
table.append_constraint(e)
@property
def python_type(self):
return bool
def bind_processor(self, dialect):
if dialect.supports_native_boolean:
return None
else:
return processors.boolean_to_int
def result_processor(self, dialect, coltype):
if dialect.supports_native_boolean:
return None
else:
return processors.int_to_boolean
class Interval(_DateAffinity, TypeDecorator):
impl = DateTime
epoch = dt.datetime.utcfromtimestamp(0)
def __init__(self, native=True,
second_precision=None,
day_precision=None):
super(Interval, self).__init__()
self.native = native
self.second_precision = second_precision
self.day_precision = day_precision
def adapt(self, cls, **kw):
if self.native and hasattr(cls, '_adapt_from_generic_interval'):
return cls._adapt_from_generic_interval(self, **kw)
else:
return self.__class__(
native=self.native,
second_precision=self.second_precision,
day_precision=self.day_precision,
**kw)
@property
def python_type(self):
return dt.timedelta
def bind_processor(self, dialect):
impl_processor = self.impl.bind_processor(dialect)
epoch = self.epoch
if impl_processor:
def process(value):
if value is not None:
value = epoch + value
return impl_processor(value)
else:
def process(value):
if value is not None:
value = epoch + value
return value
return process
def result_processor(self, dialect, coltype):
impl_processor = self.impl.result_processor(dialect, coltype)
epoch = self.epoch
if impl_processor:
def process(value):
value = impl_processor(value)
if value is None:
return None
return value - epoch
else:
def process(value):
if value is None:
return None
return value - epoch
return process
@util.memoized_property
def _expression_adaptations(self):
return {
operators.add: {
Date: DateTime,
Interval: self.__class__,
DateTime: DateTime,
Time: Time,
},
operators.sub: {
Interval: self.__class__
},
operators.mul: {
Numeric: self.__class__
},
operators.truediv: {
Numeric: self.__class__
},
operators.div: {
Numeric: self.__class__
}
}
@property
def _type_affinity(self):
return Interval
def coerce_compared_value(self, op, value):
return self.impl.coerce_compared_value(op, value)
class REAL(Float):
__visit_name__ = 'REAL'
class FLOAT(Float):
__visit_name__ = 'FLOAT'
class NUMERIC(Numeric):
__visit_name__ = 'NUMERIC'
class DECIMAL(Numeric):
__visit_name__ = 'DECIMAL'
class INTEGER(Integer):
__visit_name__ = 'INTEGER'
INT = INTEGER
class SMALLINT(SmallInteger):
__visit_name__ = 'SMALLINT'
class BIGINT(BigInteger):
__visit_name__ = 'BIGINT'
class TIMESTAMP(DateTime):
__visit_name__ = 'TIMESTAMP'
def get_dbapi_type(self, dbapi):
return dbapi.TIMESTAMP
class DATETIME(DateTime):
__visit_name__ = 'DATETIME'
class DATE(Date):
__visit_name__ = 'DATE'
class TIME(Time):
__visit_name__ = 'TIME'
class TEXT(Text):
__visit_name__ = 'TEXT'
class CLOB(Text):
__visit_name__ = 'CLOB'
class VARCHAR(String):
__visit_name__ = 'VARCHAR'
class NVARCHAR(Unicode):
__visit_name__ = 'NVARCHAR'
class CHAR(String):
__visit_name__ = 'CHAR'
class NCHAR(Unicode):
__visit_name__ = 'NCHAR'
class BLOB(LargeBinary):
__visit_name__ = 'BLOB'
class BINARY(_Binary):
__visit_name__ = 'BINARY'
class VARBINARY(_Binary):
__visit_name__ = 'VARBINARY'
class BOOLEAN(Boolean):
__visit_name__ = 'BOOLEAN'
class NullType(TypeEngine):
__visit_name__ = 'null'
_isnull = True
def literal_processor(self, dialect):
def process(value):
return "NULL"
return process
class Comparator(TypeEngine.Comparator):
def _adapt_expression(self, op, other_comparator):
if isinstance(other_comparator, NullType.Comparator) or \
not operators.is_commutative(op):
return op, self.expr.type
else:
return other_comparator._adapt_expression(op, self)
comparator_factory = Comparator
NULLTYPE = NullType()
BOOLEANTYPE = Boolean()
STRINGTYPE = String()
INTEGERTYPE = Integer()
_type_map = {
int: Integer(),
float: Numeric(),
bool: BOOLEANTYPE,
decimal.Decimal: Numeric(),
dt.date: Date(),
dt.datetime: DateTime(),
dt.time: Time(),
dt.timedelta: Interval(),
util.NoneType: NULLTYPE
}
if util.py3k:
_type_map[bytes] = LargeBinary()
_type_map[str] = Unicode()
else:
_type_map[unicode] = Unicode()
_type_map[str] = String()
# back-assign to type_api
from . import type_api
type_api.BOOLEANTYPE = BOOLEANTYPE
type_api.STRINGTYPE = STRINGTYPE
type_api.INTEGERTYPE = INTEGERTYPE
type_api.NULLTYPE = NULLTYPE
type_api._type_map = _type_map
# this one, there's all kinds of ways to play it, but at the EOD
# the expression element system, as you might expect. We can use
# importlaters or whatnot, but the typing system just necessarily has
# to have some kind of connection like this. right now we're injecting the
class Comparator(_DefaultColumnComparator):
BOOLEANTYPE = BOOLEANTYPE
TypeEngine.Comparator.__bases__ = (Comparator, ) + TypeEngine.Comparator.__bases__
| true | true |
1c475b01d3f2a15d38e7166284a6e4891d718fa6 | 4,466 | py | Python | tinkt/cmap_utils.py | claydodo/tinkt | dfd07fe7cad34c0d5a1ec0e03a6437a502410918 | [
"Unlicense"
] | null | null | null | tinkt/cmap_utils.py | claydodo/tinkt | dfd07fe7cad34c0d5a1ec0e03a6437a502410918 | [
"Unlicense"
] | null | null | null | tinkt/cmap_utils.py | claydodo/tinkt | dfd07fe7cad34c0d5a1ec0e03a6437a502410918 | [
"Unlicense"
] | null | null | null | # -*- coding:utf-8 -*-
# cmap utils
import six
import numpy as np
from matplotlib import cm as mpl_cm
from matplotlib import colors as mpl_colors
from . import cm as tinkt_cm
CM_FAMILIES = {
'mpl': mpl_cm,
'tinkt': tinkt_cm
}
def set_under_over_bad_colors(cmap, under=None, over=None, bad=None):
if under is not None:
cmap.set_under(under)
if over is not None:
cmap.set_over(over)
if bad is not None:
cmap.set_bad(bad)
return cmap
def get_cmap(base_cmap,
clip_min=None, clip_max=None,
N=None,
sample_points=None,
bad=None, over=None, under=None,
*args, **kwargs):
"""
Get cmap object by name, and optionally tweak it into a new one.
Currently only supports tweaking of continuous cmaps.
:param base_cmap: either a name or a cmap object.
:param clip_min: lower clip point, valid range: 0.0~1.0, default: None.
:param clip_max: upper clip point, valid range: 0.0~1.0, default: None.
:param N: new cmap's color number, default: None (inherits from base_cmap).
:param sample_points: a series of sampling points (0.0~1.0) on the base_cmap. When using this arg, clip_min, clip_max and N are ignored.
:param bad: bad color, default None (inherits from base_cmap)
:param over: over color, default None (inherits from base_cmap)
:param under: under color, default None (inherits from base_cmap)
:return: a cmap object (matplotlib.colors.Colormap)
"""
if isinstance(base_cmap, tuple):
# The tuple-form is for compatibility of old codes using metlib.color.cmap_utils.get_cmap , which read opts from json file.
# Please neglect the complex logics and use named args whenever possible.
return _parse_tuple_form_args_for_get_cmap(base_cmap)
if isinstance(base_cmap, six.string_types):
for cm_family in CM_FAMILIES.values():
try:
base_cmap = getattr(cm_family, base_cmap)
break
except AttributeError:
pass
if not isinstance(base_cmap, mpl_colors.Colormap):
raise RuntimeError(u'Cannot find base_cmap: {}'.format(base_cmap))
if sample_points is not None:
new_name = u'Resampled from {}'.format(base_cmap.name)
new_cmap = mpl_colors.LinearSegmentedColormap.from_list(new_name, base_cmap(sample_points))
elif clip_min is not None or clip_max is not None:
clip_min = 0.0 if clip_min is None else float(clip_min)
clip_max = 0.0 if clip_max is None else float(clip_max)
N = base_cmap.N if N is None else int(N)
sample_points = np.linspace(clip_min, clip_max, N)
new_name = u'Clipped from {}'.format(base_cmap.name)
new_cmap = mpl_colors.LinearSegmentedColormap.from_list(new_name, base_cmap(sample_points))
else:
N = int(N) if N is not None else base_cmap.N
new_cmap = base_cmap._resample(N)
if bad is not None:
new_cmap.set_bad(bad)
elif base_cmap._rgba_bad:
new_cmap.set_bad(base_cmap._rgba_bad)
if over is not None:
new_cmap.set_over(over)
elif base_cmap._rgba_over:
new_cmap.set_over(base_cmap._rgba_over)
if under is not None:
new_cmap.set_under(under)
elif base_cmap._rgba_under:
new_cmap.set_under(base_cmap._rgba_under)
return new_cmap
def _parse_tuple_form_args_for_get_cmap(opts):
# The tuple-form is for compatibility of old codes using metlib.color.cmap_utils.get_cmap, which read opts from json file.
if len(opts) == 1:
return get_cmap(opts[0])
elif len(opts) == 2:
if isinstance(opts[1], (tuple, list, np.ndarray)):
if len(opts[1]) == 0:
return get_cmap(opts[0])
elif len(opts[1]) == 1:
if isinstance(opts[1][0], (tuple, list, np.ndarray)):
return get_cmap(opts[0], sample_points=opts[1][0])
else:
raise ValueError("")
elif len(opts[1]) == 2:
clip_min, clip_max = opts[1]
N = None
elif len(opts[1]) == 3:
clip_min, clip_max, N = opts[1]
else:
return get_cmap(opts[0], sample_points=opts[1])
return get_cmap(opts[0], clip_min=clip_min, clip_max=clip_max, N=N)
else:
raise ValueError("")
else:
raise ValueError("")
| 36.606557 | 140 | 0.638155 |
import six
import numpy as np
from matplotlib import cm as mpl_cm
from matplotlib import colors as mpl_colors
from . import cm as tinkt_cm
CM_FAMILIES = {
'mpl': mpl_cm,
'tinkt': tinkt_cm
}
def set_under_over_bad_colors(cmap, under=None, over=None, bad=None):
if under is not None:
cmap.set_under(under)
if over is not None:
cmap.set_over(over)
if bad is not None:
cmap.set_bad(bad)
return cmap
def get_cmap(base_cmap,
clip_min=None, clip_max=None,
N=None,
sample_points=None,
bad=None, over=None, under=None,
*args, **kwargs):
if isinstance(base_cmap, tuple):
return _parse_tuple_form_args_for_get_cmap(base_cmap)
if isinstance(base_cmap, six.string_types):
for cm_family in CM_FAMILIES.values():
try:
base_cmap = getattr(cm_family, base_cmap)
break
except AttributeError:
pass
if not isinstance(base_cmap, mpl_colors.Colormap):
raise RuntimeError(u'Cannot find base_cmap: {}'.format(base_cmap))
if sample_points is not None:
new_name = u'Resampled from {}'.format(base_cmap.name)
new_cmap = mpl_colors.LinearSegmentedColormap.from_list(new_name, base_cmap(sample_points))
elif clip_min is not None or clip_max is not None:
clip_min = 0.0 if clip_min is None else float(clip_min)
clip_max = 0.0 if clip_max is None else float(clip_max)
N = base_cmap.N if N is None else int(N)
sample_points = np.linspace(clip_min, clip_max, N)
new_name = u'Clipped from {}'.format(base_cmap.name)
new_cmap = mpl_colors.LinearSegmentedColormap.from_list(new_name, base_cmap(sample_points))
else:
N = int(N) if N is not None else base_cmap.N
new_cmap = base_cmap._resample(N)
if bad is not None:
new_cmap.set_bad(bad)
elif base_cmap._rgba_bad:
new_cmap.set_bad(base_cmap._rgba_bad)
if over is not None:
new_cmap.set_over(over)
elif base_cmap._rgba_over:
new_cmap.set_over(base_cmap._rgba_over)
if under is not None:
new_cmap.set_under(under)
elif base_cmap._rgba_under:
new_cmap.set_under(base_cmap._rgba_under)
return new_cmap
def _parse_tuple_form_args_for_get_cmap(opts):
if len(opts) == 1:
return get_cmap(opts[0])
elif len(opts) == 2:
if isinstance(opts[1], (tuple, list, np.ndarray)):
if len(opts[1]) == 0:
return get_cmap(opts[0])
elif len(opts[1]) == 1:
if isinstance(opts[1][0], (tuple, list, np.ndarray)):
return get_cmap(opts[0], sample_points=opts[1][0])
else:
raise ValueError("")
elif len(opts[1]) == 2:
clip_min, clip_max = opts[1]
N = None
elif len(opts[1]) == 3:
clip_min, clip_max, N = opts[1]
else:
return get_cmap(opts[0], sample_points=opts[1])
return get_cmap(opts[0], clip_min=clip_min, clip_max=clip_max, N=N)
else:
raise ValueError("")
else:
raise ValueError("")
| true | true |
1c475e064511372aa11c413ea6aad9da5ab26d2e | 10,185 | py | Python | test_nfc.py | tnoumar/ST-M24SR64-NFC | 6f5b2ec574fb51d3ffc458b562eb0f6df657a6a4 | [
"MIT"
] | null | null | null | test_nfc.py | tnoumar/ST-M24SR64-NFC | 6f5b2ec574fb51d3ffc458b562eb0f6df657a6a4 | [
"MIT"
] | null | null | null | test_nfc.py | tnoumar/ST-M24SR64-NFC | 6f5b2ec574fb51d3ffc458b562eb0f6df657a6a4 | [
"MIT"
] | null | null | null | # Author: Taha NOUMAR tnoumar@enseirb-matmeca.fr
# DATA SHEETS
# https://www.st.com/resource/en/datasheet/m24sr64-y.pdf
# CONFIGURATION
# tag type: M24SR64Y
# eeprom size: 64KBit
# I2C address: 0x56
import machine
import binascii
import utime
def byte0(b):
return b & 0x00FF
def byte1(b):
return (b & 0xFF00) >> 8
class NFCTag():
I2C_ADDRESS_7BIT = 0x56
SYSTEM = 0xE101
CC = 0xE103
NDEF = 0x0001
NDEF_HEADER=[0xd1, 0x01, 0x00, 0x54, 0x02, 0x65, 0x6e]
verbose = True # not to supercharge the user's console
def __init__(self, i2c):
self.i2c = i2c
self.addr = self.I2C_ADDRESS_7BIT
def wait(self, msg):
''' Wait a certain amount of time between operations'''
utime.sleep_ms(500)
if self.verbose:
print("\n" + str(msg))
def write(self, data, crc=False):
"""Write a string of data bytes, with optional CRC"""
if crc:
crc0, crc1 = CRC.compute(data)
data.append(crc0)
data.append(crc1)
data_hex = ""
for i in range(len(data)):
data_hex += hex(data[i]) + " "
print("i2c write: [AC] " + data_hex)
result = self.i2c.writeto(self.addr, bytes(data))
print("write:" + str(result))
if result == 0:
raise RuntimeError("write result:" + str(result))
def read(self, len, checkCrc=False):
"""read a string of data bytes, with optional CRC checking"""
data = bytearray(len)
result = self.i2c.readfrom_into(0x56, data)
if checkCrc:
raise RuntimeError("CRC checking not yet written")
#print("read:" + str(data))
# print('type of data is'+type(data))
# if len(data) == 0:
# raise RuntimeError("read result:" + len(str(data)))
return data
def killRFSelectI2C(self):
"""Kill off any RF session and open an I2C session"""
# tx: [0xAC] 0x52
# rx: TODO
self.wait("Selecting I2C, deselecting RF ...")
self.write([0x52])
def selectNFCT4Application(self, pcb=0x02):
"""Select the NFC app"""
# tx: [0xAC] 0x02 0x00 0xA4 0x04 0x00 0x07 0xD2 0x76 0x00 0x00 0x85 0x01 0x01 0x00 [0x35 0xC0]
# rx: [0xAD] 0x02 0x90 0x00 [0xF1 0x09]
self.write([pcb, 0x00, 0xA4, 0x04, 0x00, 0x07, 0xD2, 0x76,
0x00, 0x00, 0x85, 0x01, 0x01, 0x00], crc=True)
self.wait('Selecting NFC APP ...')
result = self.read(5)
return result
def selectFile(self, fileId, pcb=0x02):
"""Select a nominated file"""
# tx: [0xAC] 0x03 0x00 0xA4 0x00 0x0c 0x02 (0xE101) 0xCCCC
# rx: TODO
self.write([pcb, 0x00, 0xA4, 0x00, 0x0C, 0x02,
byte1(fileId), byte0(fileId)], crc=True)
self.wait('Selecting file ...')
result = self.read(5)
return result
def readBinary(self, offset, length, pcb=0x02):
"""Read binary from the currently selected file"""
# read length
# tx: [0xAD] 0x03 0x00 0xB0 (0x00 0x00) (0x02) 0xCCCC
# rx: TODO
self.write([pcb, 0x00, 0xB0, byte1(offset),
byte0(offset), byte0(length)], crc=True)
self.wait('Reading binary ...')
result = self.read(length+5)
print("readBinary:" + str(result))
return result
def updateBinaryLength(self, data, pcb=0x03):
""" Update binary length in the currently selected file"""
# tx: ERASE BINARY [AC] 03 00 D6 00 00 02 00 00 6B 37
# rx:
self.write([pcb, 0x00, 0xD6, 0x00, 0x00, 0x02,
byte1(data), byte0(data)], crc=True)
utime.sleep(1)
result = self.read(5)
print("updateBinaryLength:"+str(result))
return result
def updateBinary(self, offset, length, data, pcb=0x02):
""" Update binary data in the currently selected file"""
# UPDATE BINARY with HELLO WORLD e.g.
# tx: 0xAC 0x02 0x00 0xD6 0x00 0x02 0x0B 0x68 0x65 0x6C 0x6C 0x6F 0x20 0x77 0x6F 0x72 0x6C 0x64 0x2F 0xFC
# rx:
payload = self.NDEF_HEADER + data
payload[2] = length - 4
self.write([pcb, 0x00, 0xD6, byte1(offset), byte0(
offset), byte0(length)]+payload, crc=True)
self.wait('Updating Binary ...')
result = self.read(5)
print("updateBinary: "+str(result))
return result
def deselect(self):
"""Deselect the I2C (allow RF to come in again)"""
# deselect
# tx: [0xAC] 0xC2 0xE0 B4
# rx: 0xC2 0xE0 0xB4
self.write([0xC2], crc=True)
self.wait('Deselecting I2C, selecting RF ')
result = self.read(3)
return result
def readNDEFFile(self):
'''
select I2C
select NFC application
select CC
read CC file and length
select NDEF file
read NDEF length
read NDEF file
'''
self.killRFSelectI2C()
self.selectNFCT4Application()
self.selectFile(self.CC, pcb=0x03)
data = self.readBinary(0x0000, 0x02, pcb=0x02)
data = self.readBinary(0x0000, 0x0F, pcb=0x03)
self.selectFile(self.NDEF, pcb=0x02)
data = self.readBinary(0x0000, 0x02, pcb=0x03)
ndef_len = (data[1]*256) + data[2]
print("NDEF len:" + str(ndef_len))
data = self.readBinary(0x0002, ndef_len, pcb=0x02)
ndef = data[8:-4]
s = ""
for i in range(len(ndef)):
s += chr(ndef[i])
print("ndef message:" + s)
return s
def eraseNDEFFile(self):
'''
select I2C
select NFC application
select CC
read CC file and length
select NDEF file
set NDEF length to 0
'''
self.killRFSelectI2C()
self.selectNFCT4Application()
self.selectFile(self.CC, pcb=0x03)
data = self.readBinary(0x0000, 0x02, pcb=0x02)
data = self.readBinary(0x0000, 0x0F, pcb=0x03)
self.selectFile(self.NDEF, pcb=0x02)
try:
data = self.updateBinaryLength(0)
print("File erased successfully")
except:
print("error while erasing file")
def writeNDEFFile(self, text):
'''
erase NDEF length
update NDEF message
set new NDEF length
deselect I2C
'''
self.eraseNDEFFile()
# Write hello world in the tag
print("Storing " + text + " in NDEF message")
hex_text = binascii.hexlify(text.encode('utf8'))
hex_list = [0x00 for i in range(0, int((len(hex_text)/2)))]
for i in range(0, int((len(hex_text)/2))):
hex_list[i] = int("0x"+str(hex_text[2*i:2*(i+1)]
).replace("b'", "").replace("'", ""))
data = self.updateBinary(0x0002, len(text), hex_list)
utime.sleep(1)
try:
data = self.updateBinaryLength(len(text))
print("File written successfully")
except:
print("error while writing file")
print("deselecting I2C")
self.deselect()
utime.sleep(2)
# PCB means "protocol control byte",
# Takes 0x02 or 0x03
# CLA is class byte (always 0x00 for these apps)
# INS is the instruction to send
# P1 P2 are parameter 1 and 2,
# Lc is length of command
# Data is the payload of the command
# Le is the length of expected response
# CRC2 is the cyclic redundancy check bytes
#Structure of NDEF message (NFC Data Exchange Format) ########################################################
# Byte 0 Byte 1 Byte 2 Byte 3
# 0x0000 NDEF message length User data User data
# 0x0004 User data User data User data User data
# ... ... ... ... ...
##############################################################################################################
# COMMANDS
# SEL PCB CLA INS P1 P2 Lc Data Le CRC2
# kill RF session, open I2C 0xAC 0x52
# select system file 0xAC 0x02 0x00 0xA4 0x00 0x0c 0x02 0xE101 0xCCCC
# read length 0xAD 0x03 0x00 0xB0 0x00 0x00 0x02 0xCCCC
# read memsize 0xAD 0x03 0x00 0xB0 0x00 0x0F 0x02 0xCCCC
# deselect (Kill I2C, open RF) 0xAC 0xC2 0xE0 0xB4
# erase NDEF len 0xAC 0x03 0x00 0xD6 0x00 0x00 0x02 0x00 0x00 0x6B 0x37
# write HELLO WORLD in tag 0xAC 0x02 0x00 0xD6 0x00 0x02 0x0B 0x68 0x65 0x6C 0x6C 0x6F 0x20 0x77 0x6F 0x72 0x6C 0x64 0x2F 0xFC
#####################################################################################################################################################
class CRC():
def __init__(self, initial=0x6363):
# initialize CRC OBJ
self.initial = initial
def start(self):
self.crc = self.initial
def update(self, data):
# update hex entries for CRC computation
datain = data
data = data ^ ((self.crc) & 0x00FF)
data = data ^ ((data << 4) & 0x00FF)
self.crc = (self.crc >> 8) \
^ (data << 8) \
^ (data << 3) \
^ (data >> 4)
self.crc = self.crc & 0xFFFF
return self.crc
def getCRC(self):
return (self.crc & 0xFF), ((self.crc & 0xFF00) >> 8)
def compute(block):
c = CRC()
c.start()
for i in range(len(block)):
c.update(block[i])
crc0, crc1 = c.getCRC()
return crc0, crc1
tag = NFCTag(machine.I2C(1))
print('(before) text in the tag is '+tag.readNDEFFile())
tag.eraseNDEFFile()
print('text in the tag is '+tag.readNDEFFile())
while True:
pass
| 34.880137 | 149 | 0.525282 |
import machine
import binascii
import utime
def byte0(b):
return b & 0x00FF
def byte1(b):
return (b & 0xFF00) >> 8
class NFCTag():
I2C_ADDRESS_7BIT = 0x56
SYSTEM = 0xE101
CC = 0xE103
NDEF = 0x0001
NDEF_HEADER=[0xd1, 0x01, 0x00, 0x54, 0x02, 0x65, 0x6e]
verbose = True
def __init__(self, i2c):
self.i2c = i2c
self.addr = self.I2C_ADDRESS_7BIT
def wait(self, msg):
utime.sleep_ms(500)
if self.verbose:
print("\n" + str(msg))
def write(self, data, crc=False):
if crc:
crc0, crc1 = CRC.compute(data)
data.append(crc0)
data.append(crc1)
data_hex = ""
for i in range(len(data)):
data_hex += hex(data[i]) + " "
print("i2c write: [AC] " + data_hex)
result = self.i2c.writeto(self.addr, bytes(data))
print("write:" + str(result))
if result == 0:
raise RuntimeError("write result:" + str(result))
def read(self, len, checkCrc=False):
data = bytearray(len)
result = self.i2c.readfrom_into(0x56, data)
if checkCrc:
raise RuntimeError("CRC checking not yet written")
#print("read:" + str(data))
# print('type of data is'+type(data))
# if len(data) == 0:
# raise RuntimeError("read result:" + len(str(data)))
return data
def killRFSelectI2C(self):
# tx: [0xAC] 0x52
# rx: TODO
self.wait("Selecting I2C, deselecting RF ...")
self.write([0x52])
def selectNFCT4Application(self, pcb=0x02):
# tx: [0xAC] 0x02 0x00 0xA4 0x04 0x00 0x07 0xD2 0x76 0x00 0x00 0x85 0x01 0x01 0x00 [0x35 0xC0]
# rx: [0xAD] 0x02 0x90 0x00 [0xF1 0x09]
self.write([pcb, 0x00, 0xA4, 0x04, 0x00, 0x07, 0xD2, 0x76,
0x00, 0x00, 0x85, 0x01, 0x01, 0x00], crc=True)
self.wait('Selecting NFC APP ...')
result = self.read(5)
return result
def selectFile(self, fileId, pcb=0x02):
# tx: [0xAC] 0x03 0x00 0xA4 0x00 0x0c 0x02 (0xE101) 0xCCCC
# rx: TODO
self.write([pcb, 0x00, 0xA4, 0x00, 0x0C, 0x02,
byte1(fileId), byte0(fileId)], crc=True)
self.wait('Selecting file ...')
result = self.read(5)
return result
def readBinary(self, offset, length, pcb=0x02):
# read length
# tx: [0xAD] 0x03 0x00 0xB0 (0x00 0x00) (0x02) 0xCCCC
# rx: TODO
self.write([pcb, 0x00, 0xB0, byte1(offset),
byte0(offset), byte0(length)], crc=True)
self.wait('Reading binary ...')
result = self.read(length+5)
print("readBinary:" + str(result))
return result
def updateBinaryLength(self, data, pcb=0x03):
# tx: ERASE BINARY [AC] 03 00 D6 00 00 02 00 00 6B 37
# rx:
self.write([pcb, 0x00, 0xD6, 0x00, 0x00, 0x02,
byte1(data), byte0(data)], crc=True)
utime.sleep(1)
result = self.read(5)
print("updateBinaryLength:"+str(result))
return result
def updateBinary(self, offset, length, data, pcb=0x02):
# UPDATE BINARY with HELLO WORLD e.g.
# tx: 0xAC 0x02 0x00 0xD6 0x00 0x02 0x0B 0x68 0x65 0x6C 0x6C 0x6F 0x20 0x77 0x6F 0x72 0x6C 0x64 0x2F 0xFC
# rx:
payload = self.NDEF_HEADER + data
payload[2] = length - 4
self.write([pcb, 0x00, 0xD6, byte1(offset), byte0(
offset), byte0(length)]+payload, crc=True)
self.wait('Updating Binary ...')
result = self.read(5)
print("updateBinary: "+str(result))
return result
def deselect(self):
# deselect
# tx: [0xAC] 0xC2 0xE0 B4
# rx: 0xC2 0xE0 0xB4
self.write([0xC2], crc=True)
self.wait('Deselecting I2C, selecting RF ')
result = self.read(3)
return result
def readNDEFFile(self):
self.killRFSelectI2C()
self.selectNFCT4Application()
self.selectFile(self.CC, pcb=0x03)
data = self.readBinary(0x0000, 0x02, pcb=0x02)
data = self.readBinary(0x0000, 0x0F, pcb=0x03)
self.selectFile(self.NDEF, pcb=0x02)
data = self.readBinary(0x0000, 0x02, pcb=0x03)
ndef_len = (data[1]*256) + data[2]
print("NDEF len:" + str(ndef_len))
data = self.readBinary(0x0002, ndef_len, pcb=0x02)
ndef = data[8:-4]
s = ""
for i in range(len(ndef)):
s += chr(ndef[i])
print("ndef message:" + s)
return s
def eraseNDEFFile(self):
self.killRFSelectI2C()
self.selectNFCT4Application()
self.selectFile(self.CC, pcb=0x03)
data = self.readBinary(0x0000, 0x02, pcb=0x02)
data = self.readBinary(0x0000, 0x0F, pcb=0x03)
self.selectFile(self.NDEF, pcb=0x02)
try:
data = self.updateBinaryLength(0)
print("File erased successfully")
except:
print("error while erasing file")
def writeNDEFFile(self, text):
self.eraseNDEFFile()
# Write hello world in the tag
print("Storing " + text + " in NDEF message")
hex_text = binascii.hexlify(text.encode('utf8'))
hex_list = [0x00 for i in range(0, int((len(hex_text)/2)))]
for i in range(0, int((len(hex_text)/2))):
hex_list[i] = int("0x"+str(hex_text[2*i:2*(i+1)]
).replace("b'", "").replace("'", ""))
data = self.updateBinary(0x0002, len(text), hex_list)
utime.sleep(1)
try:
data = self.updateBinaryLength(len(text))
print("File written successfully")
except:
print("error while writing file")
print("deselecting I2C")
self.deselect()
utime.sleep(2)
# PCB means "protocol control byte",
# Takes 0x02 or 0x03
# CLA is class byte (always 0x00 for these apps)
# INS is the instruction to send
# P1 P2 are parameter 1 and 2,
# Lc is length of command
# Data is the payload of the command
# Le is the length of expected response
# CRC2 is the cyclic redundancy check bytes
#Structure of NDEF message (NFC Data Exchange Format) ########################################################
# Byte 0 Byte 1 Byte 2 Byte 3
# 0x0000 NDEF message length User data User data
# 0x0004 User data User data User data User data
# ... ... ... ... ...
##############################################################################################################
# COMMANDS
# SEL PCB CLA INS P1 P2 Lc Data Le CRC2
# kill RF session, open I2C 0xAC 0x52
# select system file 0xAC 0x02 0x00 0xA4 0x00 0x0c 0x02 0xE101 0xCCCC
# read length 0xAD 0x03 0x00 0xB0 0x00 0x00 0x02 0xCCCC
# read memsize 0xAD 0x03 0x00 0xB0 0x00 0x0F 0x02 0xCCCC
# deselect (Kill I2C, open RF) 0xAC 0xC2 0xE0 0xB4
# erase NDEF len 0xAC 0x03 0x00 0xD6 0x00 0x00 0x02 0x00 0x00 0x6B 0x37
# write HELLO WORLD in tag 0xAC 0x02 0x00 0xD6 0x00 0x02 0x0B 0x68 0x65 0x6C 0x6C 0x6F 0x20 0x77 0x6F 0x72 0x6C 0x64 0x2F 0xFC
#####################################################################################################################################################
class CRC():
def __init__(self, initial=0x6363):
# initialize CRC OBJ
self.initial = initial
def start(self):
self.crc = self.initial
def update(self, data):
# update hex entries for CRC computation
datain = data
data = data ^ ((self.crc) & 0x00FF)
data = data ^ ((data << 4) & 0x00FF)
self.crc = (self.crc >> 8) \
^ (data << 8) \
^ (data << 3) \
^ (data >> 4)
self.crc = self.crc & 0xFFFF
return self.crc
def getCRC(self):
return (self.crc & 0xFF), ((self.crc & 0xFF00) >> 8)
def compute(block):
c = CRC()
c.start()
for i in range(len(block)):
c.update(block[i])
crc0, crc1 = c.getCRC()
return crc0, crc1
tag = NFCTag(machine.I2C(1))
print('(before) text in the tag is '+tag.readNDEFFile())
tag.eraseNDEFFile()
print('text in the tag is '+tag.readNDEFFile())
while True:
pass
| true | true |
1c475e204df91f662e807804eaf4a475b120362c | 18,766 | py | Python | OgreVertexBuffer.py | lamogui/ogre_blender_importer | 4742e27909f57598889bdfa8a956001c6776d056 | [
"MIT"
] | 13 | 2016-01-23T08:00:34.000Z | 2022-02-16T10:27:08.000Z | OgreVertexBuffer.py | lamogui/ogre_blender_importer | 4742e27909f57598889bdfa8a956001c6776d056 | [
"MIT"
] | 3 | 2016-09-20T15:22:28.000Z | 2021-05-31T01:25:05.000Z | OgreVertexBuffer.py | lamogui/ogre_blender_importer | 4742e27909f57598889bdfa8a956001c6776d056 | [
"MIT"
] | 9 | 2016-07-13T23:23:55.000Z | 2022-03-24T21:22:53.000Z | from enum import IntEnum;
from struct import unpack_from;
try:
from OgreHardwareBuffer import OgreFakeHardwareBuffer
except ImportError as e:
directory = os.path.dirname(os.path.realpath(__file__));
print("Import error: " + str(e) + " manual compilation" );
srcfile="OgreHardwareBuffer.py"; exec(compile(open(os.path.join(directory,srcfile)).read(), srcfile, 'exec'))
class OgreVertexBuffer(OgreFakeHardwareBuffer):
"""
Just a class to simulate a graphic card memory buffer
"""
def __init__(self, vertexSize, numVertices):
OgreFakeHardwareBuffer.__init__(self);
self._vertexSize = vertexSize;
self._numVertices = numVertices;
@property
def vertexSize(self):
return self._vertexSize;
@property
def numVertices(self):
return self._numVertices;
@property
def sizeInBytes(self):
return self.vertexSize * self.numVertices;
class OgreVertexElementSemantic(IntEnum):
"""
Vertex element semantics, used to identify the meaning of vertex buffer contents
"""
VES_UNKNOWN = 0;
# Position, 3 reals per vertex
VES_POSITION = 1;
# Blending weights
VES_BLEND_WEIGHTS = 2;
# Blending indices
VES_BLEND_INDICES = 3;
# Normal, 3 reals per vertex
VES_NORMAL = 4;
# Diffuse colours
VES_DIFFUSE = 5;
# Specular colours
VES_SPECULAR = 6;
# Texture coordinates
VES_TEXTURE_COORDINATES = 7;
# Binormal (Y axis if normal is Z)
VES_BINORMAL = 8;
# Tangent (X axis if normal is Z)
VES_TANGENT = 9;
# The number of VertexElementSemantic elements (note - the first value VES_POSITION is 1)
VES_COUNT = 9;
def toStr(ves):
if (ves==OgreVertexElementSemantic.VES_UNKNOWN):
return "VES_UNKNOWN";
elif (ves==OgreVertexElementSemantic.VES_POSITION):
return "VES_POSITION";
elif (ves==OgreVertexElementSemantic.VES_BLEND_WEIGHTS):
return "VES_BLEND_WEIGHTS";
elif (ves==OgreVertexElementSemantic.VES_BLEND_INDICES):
return "VES_BLEND_INDICES";
elif (ves==OgreVertexElementSemantic.VES_NORMAL):
return "VES_NORMAL";
elif (ves==OgreVertexElementSemantic.VES_DIFFUSE):
return "VES_DIFFUSE";
elif (ves==OgreVertexElementSemantic.VES_SPECULAR):
return "VES_SPECULAR";
elif (ves==OgreVertexElementSemantic.VES_TEXTURE_COORDINATES):
return "VES_TEXTURE_COORDINATES";
elif (ves==OgreVertexElementSemantic.VES_BINORMAL):
return "VES_BINORMAL";
elif (ves==OgreVertexElementSemantic.VES_TANGENT):
return "VES_TANGENT";
elif (ves==OgreVertexElementSemantic.VES_COUNT):
return "VES_COUNT";
class OgreVertexElementType(IntEnum):
"""
Vertex element type, used to identify the base types of the vertex contents
"""
VET_FLOAT1 = 0;
VET_FLOAT2 = 1;
VET_FLOAT3 = 2;
VET_FLOAT4 = 3;
# alias to more specific colour type - use the current rendersystem's colour packing
VET_COLOUR = 4;
VET_SHORT1 = 5;
VET_SHORT2 = 6;
VET_SHORT3 = 7;
VET_SHORT4 = 8;
VET_UBYTE4 = 9;
# D3D style compact colour
VET_COLOUR_ARGB = 10;
# GL style compact colour
VET_COLOUR_ABGR = 11;
VET_DOUBLE1 = 12;
VET_DOUBLE2 = 13;
VET_DOUBLE3 = 14;
VET_DOUBLE4 = 15;
VET_USHORT1 = 16;
VET_USHORT2 = 17;
VET_USHORT3 = 18;
VET_USHORT4 = 19;
VET_INT1 = 20;
VET_INT2 = 21;
VET_INT3 = 22;
VET_INT4 = 23;
VET_UINT1 = 24;
VET_UINT2 = 25;
VET_UINT3 = 26;
VET_UINT4 = 27;
def toStr(vet):
if (vet==OgreVertexElementType.VET_FLOAT1):
return "VET_FLOAT1";
elif (vet==OgreVertexElementType.VET_FLOAT2):
return "VET_FLOAT2";
elif (vet==OgreVertexElementType.VET_FLOAT3):
return "VET_FLOAT3";
elif (vet==OgreVertexElementType.VET_FLOAT4):
return "VET_FLOAT4";
elif (vet==OgreVertexElementType.VET_COLOUR):
return "VET_COLOUR";
elif (vet==OgreVertexElementType.VET_SHORT1):
return "VET_SHORT1";
elif (vet==OgreVertexElementType.VET_SHORT2):
return "VET_SHORT2";
elif (vet==OgreVertexElementType.VET_SHORT3):
return "VET_SHORT3";
elif (vet==OgreVertexElementType.VET_SHORT4):
return "VET_SHORT4";
elif (vet==OgreVertexElementType.VET_USHORT1):
return "VET_USHORT1";
elif (vet==OgreVertexElementType.VET_USHORT2):
return "VET_USHORT2";
elif (vet==OgreVertexElementType.VET_USHORT3):
return "VET_USHORT3";
elif (vet==OgreVertexElementType.VET_USHORT4):
return "VET_USHORT4";
elif (vet==OgreVertexElementType.VET_UBYTE4):
return "VET_UBYTE4";
elif (vet==OgreVertexElementType.VET_COLOUR_ABGR):
return "VET_COLOUR_ABGR";
elif (vet==OgreVertexElementType.VET_COLOUR_ARGB):
return "VET_COLOUR_ARGB";
elif (vet==OgreVertexElementType.VET_DOUBLE1):
return "VET_COLOUR_DOUBLE1";
elif (vet==OgreVertexElementType.VET_DOUBLE2):
return "VET_COLOUR_DOUBLE2";
elif (vet==OgreVertexElementType.VET_DOUBLE3):
return "VET_COLOUR_DOUBLE3";
elif (vet==OgreVertexElementType.VET_DOUBLE4):
return "VET_COLOUR_DOUBLE4";
elif (vet==OgreVertexElementType.VET_INT1):
return "VET_COLOUR_INT1";
elif (vet==OgreVertexElementType.VET_INT2):
return "VET_COLOUR_INT2";
elif (vet==OgreVertexElementType.VET_INT3):
return "VET_COLOUR_INT3";
elif (vet==OgreVertexElementType.VET_INT4):
return "VET_COLOUR_INT4";
elif (vet==OgreVertexElementType.VET_UINT1):
return "VET_COLOUR_UINT1";
elif (vet==OgreVertexElementType.VET_UINT2):
return "VET_COLOUR_UINT2";
elif (vet==OgreVertexElementType.VET_UINT3):
return "VET_COLOUR_UINT3";
elif (vet==OgreVertexElementType.VET_UINT4):
return "VET_COLOUR_UINT4";
class OgreVertexElement:
"""
This class declares the usage of a single vertex buffer as a component
of a complete VertexDeclaration.
@remarks
Several vertex buffers can be used to supply the input geometry for a
rendering operation, and in each case a vertex buffer can be used in
different ways for different operations; the buffer itself does not
define the semantics (position, normal etc), the VertexElement
class does.
"""
def __init__(self, source, offset, theType, semantic, index):
assert(type(source) is int and type(source) is int and type(index) is int);
self._source = source;
self._offset = offset;
self._type = theType;
self._semantic = semantic;
self._index = index;
def getType(self):
return self._type;
@property
def semantic(self):
return self._semantic;
@property
def index(self):
return self._index;
@property
def offset(self):
return self._offset;
@property
def source(self):
return self._source;
def getTypeSize(t):
if (t==OgreVertexElementType.VET_COLOUR or \
t==OgreVertexElementType.VET_COLOUR_ABGR or \
t==OgreVertexElementType.VET_COLOUR_ARGB):
return 4;
elif (t==OgreVertexElementType.VET_FLOAT1):
return 4*1;
elif (t==OgreVertexElementType.VET_FLOAT2):
return 4*2;
elif (t==OgreVertexElementType.VET_FLOAT3):
return 4*3;
elif (t==OgreVertexElementType.VET_FLOAT4):
return 4*4;
elif (t==OgreVertexElementType.VET_DOUBLE1):
return 8*1;
elif (t==OgreVertexElementType.VET_DOUBLE2):
return 8*2;
elif (t==OgreVertexElementType.VET_DOUBLE3):
return 8*3;
elif (t==OgreVertexElementType.VET_DOUBLE4):
return 8*4;
elif (t==OgreVertexElementType.VET_SHORT1):
return 2*1;
elif (t==OgreVertexElementType.VET_SHORT2):
return 2*2;
elif (t==OgreVertexElementType.VET_SHORT3):
return 2*3;
elif (t==OgreVertexElementType.VET_SHORT4):
return 2*4;
elif (t==OgreVertexElementType.VET_USHORT1):
return 2*1;
elif (t==OgreVertexElementType.VET_USHORT2):
return 2*2;
elif (t==OgreVertexElementType.VET_USHORT3):
return 2*3;
elif (t==OgreVertexElementType.VET_USHORT4):
return 2*4;
elif (t==OgreVertexElementType.VET_INT1):
return 4*1;
elif (t==OgreVertexElementType.VET_INT2):
return 4*2;
elif (t==OgreVertexElementType.VET_INT3):
return 4*3;
elif (t==OgreVertexElementType.VET_INT4):
return 4*4;
elif (t==OgreVertexElementType.VET_UINT1):
return 4*1;
elif (t==OgreVertexElementType.VET_UINT2):
return 4*2;
elif (t==OgreVertexElementType.VET_UINT3):
return 4*3;
elif (t==OgreVertexElementType.VET_UINT4):
return 4*4;
elif (t==OgreVertexElementType.VET_UBYTE4):
return 4;
return 0;
def getTypeCount(t):
if (t==OgreVertexElementType.VET_COLOUR or \
t==OgreVertexElementType.VET_COLOUR_ABGR or \
t==OgreVertexElementType.VET_COLOUR_ARGB or \
t==OgreVertexElementType.VET_FLOAT1 or \
t==OgreVertexElementType.VET_DOUBLE1 or \
t==OgreVertexElementType.VET_SHORT1 or \
t==OgreVertexElementType.VET_USHORT1 or \
t==OgreVertexElementType.VET_INT1 or \
t==OgreVertexElementType.VET_UINT1):
return 1;
elif (t==OgreVertexElementType.VET_FLOAT2 or \
t==OgreVertexElementType.VET_DOUBLE2 or \
t==OgreVertexElementType.VET_SHORT2 or \
t==OgreVertexElementType.VET_USHORT2 or \
t==OgreVertexElementType.VET_INT2 or \
t==OgreVertexElementType.VET_UINT2):
return 2;
elif (t==OgreVertexElementType.VET_FLOAT3 or \
t==OgreVertexElementType.VET_DOUBLE3 or \
t==OgreVertexElementType.VET_SHORT3 or \
t==OgreVertexElementType.VET_USHORT3 or \
t==OgreVertexElementType.VET_INT3 or \
t==OgreVertexElementType.VET_UINT3):
return 3;
elif (t==OgreVertexElementType.VET_FLOAT4 or \
t==OgreVertexElementType.VET_DOUBLE4 or \
t==OgreVertexElementType.VET_SHORT4 or \
t==OgreVertexElementType.VET_USHORT4 or \
t==OgreVertexElementType.VET_INT4 or \
t==OgreVertexElementType.VET_UINT4):
return 4;
raise ValueError("OgreVertexElement.getTypeCount(type): Invalid type");
def getTypePythonUnpackStr(t):
if (t==OgreVertexElementType.VET_COLOUR or \
t==OgreVertexElementType.VET_COLOUR_ABGR or \
t==OgreVertexElementType.VET_COLOUR_ARGB):
raise ValueError("OgreVertexElement.getTypePythonUnpackStr(type): Color unsupported yet");
elif (t==OgreVertexElementType.VET_FLOAT1 or \
t==OgreVertexElementType.VET_FLOAT2 or \
t==OgreVertexElementType.VET_FLOAT3 or \
t==OgreVertexElementType.VET_FLOAT4):
return 'f' * OgreVertexElement.getTypeCount(t);
elif (t==OgreVertexElementType.VET_DOUBLE1 or \
t==OgreVertexElementType.VET_DOUBLE2 or \
t==OgreVertexElementType.VET_DOUBLE3 or \
t==OgreVertexElementType.VET_DOUBLE4):
return 'd' * OgreVertexElement.getTypeCount(t);
elif (t==OgreVertexElementType.VET_SHORT1 or \
t==OgreVertexElementType.VET_SHORT2 or \
t==OgreVertexElementType.VET_SHORT3 or \
t==OgreVertexElementType.VET_SHORT4):
return 'h' * OgreVertexElement.getTypeCount(t);
elif (t==OgreVertexElementType.VET_USHORT1 or \
t==OgreVertexElementType.VET_USHORT2 or \
t==OgreVertexElementType.VET_USHORT3 or \
t==OgreVertexElementType.VET_USHORT4):
return 'H' * OgreVertexElement.getTypeCount(t);
elif (t==OgreVertexElementType.VET_INT1 or \
t==OgreVertexElementType.VET_INT2 or \
t==OgreVertexElementType.VET_INT3 or \
t==OgreVertexElementType.VET_INT4):
return 'i' * OgreVertexElement.getTypeCount(t);
elif (t==OgreVertexElementType.VET_UINT1 or \
t==OgreVertexElementType.VET_UINT2 or \
t==OgreVertexElementType.VET_UINT3 or \
t==OgreVertexElementType.VET_UINT4):
return 'I' * OgreVertexElement.getTypeCount(t);
raise ValueError("OgreVertexElement.getTypePythonUnpackStr(type): Invalid type");
def getBestCoulourVertexElementType():
#Blender use opengl
return OgreVertexElementType.VET_COLOUR_ABGR;
def __eq__(self, other):
if (self._source == other._source and \
self._index == other._index and \
self._offet == other._offset and \
self._semantic == other._semantic and \
self._type == other._type):
return True;
else:
return False;
def getSize(self):
return OgreVertexElement.getTypeSize(self._type);
def extractFromBuffer(self, vertexBufferBinding, dest, endianess):
buf = vertexBufferBinding.getBuffer(self.source);
cmd = "";
#FIXME: endianess not working...
#if (endianess.value == 'big'):
# cmd = '<';
#elif (endianess.value == 'little'):
# cmd = '>';
#else :
# cmd = endianess;
#assert(cmd == '<' or cmd == '>');
cmd = "="
cmd = cmd + OgreVertexElement.getTypePythonUnpackStr(self.getType());
print(cmd);
data = buf.data[self.offset:]
for i in range(buf.numVertices):
v = unpack_from(cmd, data, i * buf.vertexSize);
dest.append(v);
class OgreVertexDeclaration:
"""
This class declares the format of a set of vertex inputs, which
can be issued to the rendering API through a RenderOperation.
@remarks
You should be aware that the ordering and structure of the
VertexDeclaration can be very important on DirectX with older
cards,so if you want to maintain maximum compatibility with
all render systems and all cards you should be careful to follow these
rules:<ol>
<li>VertexElements should be added in the following order, and the order of the
elements within a shared buffer should be as follows:
position, blending weights, normals, diffuse colours, specular colours,
texture coordinates (in order, with no gaps)</li>
<li>You must not have unused gaps in your buffers which are not referenced
by any VertexElement</li>
<li>You must not cause the buffer & offset settings of 2 VertexElements to overlap</li>
</ol>
Whilst GL and more modern graphics cards in D3D will allow you to defy these rules,
sticking to them will ensure that your buffers have the maximum compatibility.
@par
Like the other classes in this functional area, these declarations should be created and
destroyed using the HardwareBufferManager.
"""
def __init__(self):
self._elementList = [];
def getElements(self):
return self._elementList;
def addElement(self, source, offset, theType, semantic, index):
if (theType == OgreVertexElementType.VET_COLOUR):
theType = OgreVertexElement.getBestCoulourVertexElementType();
self._elementList.append(OgreVertexElement(source,offset,theType,semantic,index));
return self._elementList[-1];
def insertElement(self, atPosition, source, offset, theType, semantic, index):
if (atPosition >= len(_elementList)):
return self.addElement(source,offset,theType,semantic,index);
_elementList.insert(atPosition,OgreVertexElement(source,offset,theType,semantic,index));
return _elementList[-1];
def getElement(self, index):
return self._elementList[index];
def removeElement(self, index):
del self._elementList[index];
def removeElementWithSemantic(self, semantic, index):
for i in range(self._elementList):
if (self._elementList[i].semantic == semantic and self._elementList[i].index == index):
del self._elementList[i];
break;
def removeAllElements(self):
self._elementList = [];
def findElementBySemantic(self, sem, index):
for e in self._elementList:
if (e.semantic == sem and e.index == index):
return e;
return None;
def findElementsBySemantic(self,sem):
elements = []
for e in self._elementList:
if (e.semantic == sem):
elements.append(e);
return elements;
def findElementBySource(self,source):
return [e for e in self._elementList if e.source == source];
def getVertexSize(self, source):
sz = 0;
for e in self._elementList:
if (e.source == source):
sz += e.getSize();
return sz;
def vertexElementLess(e1, e2):
if (e1.source < e2.source):
return True;
elif (e1.source == e2.source):
if (e1.semantic < e2.semantic):
return True;
elif (e1.semantic == e2.semantic):
if (e1.index < e2.index):
return True;
return False;
def sort(self):
self._elementList.sort(cmp=OgreVertexDeclaration.vertexElementLess);
def closeGapInSource(self):
if (not self._elementList):
return;
self.sort();
raise NotImplementedError;
class OgreVertexBufferBinding:
"""
This is the legacy of Ogre code. Because ogre separate vertex declarations
from vertex buffer in his file. So this class allow us to associate the
correct declaration with the correct buffer.
"""
def __init__(self):
self._bindingMap = {};
def setBinding(self, index, vbuffer):
self._bindingMap[str(index)]=vbuffer;
def getBuffer(self, source):
return self._bindingMap[str(source)];
def unsetAllBindings(self):
self._bindingMap = {};
| 37.013807 | 113 | 0.632154 | from enum import IntEnum;
from struct import unpack_from;
try:
from OgreHardwareBuffer import OgreFakeHardwareBuffer
except ImportError as e:
directory = os.path.dirname(os.path.realpath(__file__));
print("Import error: " + str(e) + " manual compilation" );
srcfile="OgreHardwareBuffer.py"; exec(compile(open(os.path.join(directory,srcfile)).read(), srcfile, 'exec'))
class OgreVertexBuffer(OgreFakeHardwareBuffer):
def __init__(self, vertexSize, numVertices):
OgreFakeHardwareBuffer.__init__(self);
self._vertexSize = vertexSize;
self._numVertices = numVertices;
@property
def vertexSize(self):
return self._vertexSize;
@property
def numVertices(self):
return self._numVertices;
@property
def sizeInBytes(self):
return self.vertexSize * self.numVertices;
class OgreVertexElementSemantic(IntEnum):
VES_UNKNOWN = 0;
VES_POSITION = 1;
VES_BLEND_WEIGHTS = 2;
VES_BLEND_INDICES = 3;
VES_NORMAL = 4;
VES_DIFFUSE = 5;
VES_SPECULAR = 6;
VES_TEXTURE_COORDINATES = 7;
VES_BINORMAL = 8;
VES_TANGENT = 9;
VES_COUNT = 9;
def toStr(ves):
if (ves==OgreVertexElementSemantic.VES_UNKNOWN):
return "VES_UNKNOWN";
elif (ves==OgreVertexElementSemantic.VES_POSITION):
return "VES_POSITION";
elif (ves==OgreVertexElementSemantic.VES_BLEND_WEIGHTS):
return "VES_BLEND_WEIGHTS";
elif (ves==OgreVertexElementSemantic.VES_BLEND_INDICES):
return "VES_BLEND_INDICES";
elif (ves==OgreVertexElementSemantic.VES_NORMAL):
return "VES_NORMAL";
elif (ves==OgreVertexElementSemantic.VES_DIFFUSE):
return "VES_DIFFUSE";
elif (ves==OgreVertexElementSemantic.VES_SPECULAR):
return "VES_SPECULAR";
elif (ves==OgreVertexElementSemantic.VES_TEXTURE_COORDINATES):
return "VES_TEXTURE_COORDINATES";
elif (ves==OgreVertexElementSemantic.VES_BINORMAL):
return "VES_BINORMAL";
elif (ves==OgreVertexElementSemantic.VES_TANGENT):
return "VES_TANGENT";
elif (ves==OgreVertexElementSemantic.VES_COUNT):
return "VES_COUNT";
class OgreVertexElementType(IntEnum):
VET_FLOAT1 = 0;
VET_FLOAT2 = 1;
VET_FLOAT3 = 2;
VET_FLOAT4 = 3;
VET_COLOUR = 4;
VET_SHORT1 = 5;
VET_SHORT2 = 6;
VET_SHORT3 = 7;
VET_SHORT4 = 8;
VET_UBYTE4 = 9;
# D3D style compact colour
VET_COLOUR_ARGB = 10;
# GL style compact colour
VET_COLOUR_ABGR = 11;
VET_DOUBLE1 = 12;
VET_DOUBLE2 = 13;
VET_DOUBLE3 = 14;
VET_DOUBLE4 = 15;
VET_USHORT1 = 16;
VET_USHORT2 = 17;
VET_USHORT3 = 18;
VET_USHORT4 = 19;
VET_INT1 = 20;
VET_INT2 = 21;
VET_INT3 = 22;
VET_INT4 = 23;
VET_UINT1 = 24;
VET_UINT2 = 25;
VET_UINT3 = 26;
VET_UINT4 = 27;
def toStr(vet):
if (vet==OgreVertexElementType.VET_FLOAT1):
return "VET_FLOAT1";
elif (vet==OgreVertexElementType.VET_FLOAT2):
return "VET_FLOAT2";
elif (vet==OgreVertexElementType.VET_FLOAT3):
return "VET_FLOAT3";
elif (vet==OgreVertexElementType.VET_FLOAT4):
return "VET_FLOAT4";
elif (vet==OgreVertexElementType.VET_COLOUR):
return "VET_COLOUR";
elif (vet==OgreVertexElementType.VET_SHORT1):
return "VET_SHORT1";
elif (vet==OgreVertexElementType.VET_SHORT2):
return "VET_SHORT2";
elif (vet==OgreVertexElementType.VET_SHORT3):
return "VET_SHORT3";
elif (vet==OgreVertexElementType.VET_SHORT4):
return "VET_SHORT4";
elif (vet==OgreVertexElementType.VET_USHORT1):
return "VET_USHORT1";
elif (vet==OgreVertexElementType.VET_USHORT2):
return "VET_USHORT2";
elif (vet==OgreVertexElementType.VET_USHORT3):
return "VET_USHORT3";
elif (vet==OgreVertexElementType.VET_USHORT4):
return "VET_USHORT4";
elif (vet==OgreVertexElementType.VET_UBYTE4):
return "VET_UBYTE4";
elif (vet==OgreVertexElementType.VET_COLOUR_ABGR):
return "VET_COLOUR_ABGR";
elif (vet==OgreVertexElementType.VET_COLOUR_ARGB):
return "VET_COLOUR_ARGB";
elif (vet==OgreVertexElementType.VET_DOUBLE1):
return "VET_COLOUR_DOUBLE1";
elif (vet==OgreVertexElementType.VET_DOUBLE2):
return "VET_COLOUR_DOUBLE2";
elif (vet==OgreVertexElementType.VET_DOUBLE3):
return "VET_COLOUR_DOUBLE3";
elif (vet==OgreVertexElementType.VET_DOUBLE4):
return "VET_COLOUR_DOUBLE4";
elif (vet==OgreVertexElementType.VET_INT1):
return "VET_COLOUR_INT1";
elif (vet==OgreVertexElementType.VET_INT2):
return "VET_COLOUR_INT2";
elif (vet==OgreVertexElementType.VET_INT3):
return "VET_COLOUR_INT3";
elif (vet==OgreVertexElementType.VET_INT4):
return "VET_COLOUR_INT4";
elif (vet==OgreVertexElementType.VET_UINT1):
return "VET_COLOUR_UINT1";
elif (vet==OgreVertexElementType.VET_UINT2):
return "VET_COLOUR_UINT2";
elif (vet==OgreVertexElementType.VET_UINT3):
return "VET_COLOUR_UINT3";
elif (vet==OgreVertexElementType.VET_UINT4):
return "VET_COLOUR_UINT4";
class OgreVertexElement:
def __init__(self, source, offset, theType, semantic, index):
assert(type(source) is int and type(source) is int and type(index) is int);
self._source = source;
self._offset = offset;
self._type = theType;
self._semantic = semantic;
self._index = index;
def getType(self):
return self._type;
@property
def semantic(self):
return self._semantic;
@property
def index(self):
return self._index;
@property
def offset(self):
return self._offset;
@property
def source(self):
return self._source;
def getTypeSize(t):
if (t==OgreVertexElementType.VET_COLOUR or \
t==OgreVertexElementType.VET_COLOUR_ABGR or \
t==OgreVertexElementType.VET_COLOUR_ARGB):
return 4;
elif (t==OgreVertexElementType.VET_FLOAT1):
return 4*1;
elif (t==OgreVertexElementType.VET_FLOAT2):
return 4*2;
elif (t==OgreVertexElementType.VET_FLOAT3):
return 4*3;
elif (t==OgreVertexElementType.VET_FLOAT4):
return 4*4;
elif (t==OgreVertexElementType.VET_DOUBLE1):
return 8*1;
elif (t==OgreVertexElementType.VET_DOUBLE2):
return 8*2;
elif (t==OgreVertexElementType.VET_DOUBLE3):
return 8*3;
elif (t==OgreVertexElementType.VET_DOUBLE4):
return 8*4;
elif (t==OgreVertexElementType.VET_SHORT1):
return 2*1;
elif (t==OgreVertexElementType.VET_SHORT2):
return 2*2;
elif (t==OgreVertexElementType.VET_SHORT3):
return 2*3;
elif (t==OgreVertexElementType.VET_SHORT4):
return 2*4;
elif (t==OgreVertexElementType.VET_USHORT1):
return 2*1;
elif (t==OgreVertexElementType.VET_USHORT2):
return 2*2;
elif (t==OgreVertexElementType.VET_USHORT3):
return 2*3;
elif (t==OgreVertexElementType.VET_USHORT4):
return 2*4;
elif (t==OgreVertexElementType.VET_INT1):
return 4*1;
elif (t==OgreVertexElementType.VET_INT2):
return 4*2;
elif (t==OgreVertexElementType.VET_INT3):
return 4*3;
elif (t==OgreVertexElementType.VET_INT4):
return 4*4;
elif (t==OgreVertexElementType.VET_UINT1):
return 4*1;
elif (t==OgreVertexElementType.VET_UINT2):
return 4*2;
elif (t==OgreVertexElementType.VET_UINT3):
return 4*3;
elif (t==OgreVertexElementType.VET_UINT4):
return 4*4;
elif (t==OgreVertexElementType.VET_UBYTE4):
return 4;
return 0;
def getTypeCount(t):
if (t==OgreVertexElementType.VET_COLOUR or \
t==OgreVertexElementType.VET_COLOUR_ABGR or \
t==OgreVertexElementType.VET_COLOUR_ARGB or \
t==OgreVertexElementType.VET_FLOAT1 or \
t==OgreVertexElementType.VET_DOUBLE1 or \
t==OgreVertexElementType.VET_SHORT1 or \
t==OgreVertexElementType.VET_USHORT1 or \
t==OgreVertexElementType.VET_INT1 or \
t==OgreVertexElementType.VET_UINT1):
return 1;
elif (t==OgreVertexElementType.VET_FLOAT2 or \
t==OgreVertexElementType.VET_DOUBLE2 or \
t==OgreVertexElementType.VET_SHORT2 or \
t==OgreVertexElementType.VET_USHORT2 or \
t==OgreVertexElementType.VET_INT2 or \
t==OgreVertexElementType.VET_UINT2):
return 2;
elif (t==OgreVertexElementType.VET_FLOAT3 or \
t==OgreVertexElementType.VET_DOUBLE3 or \
t==OgreVertexElementType.VET_SHORT3 or \
t==OgreVertexElementType.VET_USHORT3 or \
t==OgreVertexElementType.VET_INT3 or \
t==OgreVertexElementType.VET_UINT3):
return 3;
elif (t==OgreVertexElementType.VET_FLOAT4 or \
t==OgreVertexElementType.VET_DOUBLE4 or \
t==OgreVertexElementType.VET_SHORT4 or \
t==OgreVertexElementType.VET_USHORT4 or \
t==OgreVertexElementType.VET_INT4 or \
t==OgreVertexElementType.VET_UINT4):
return 4;
raise ValueError("OgreVertexElement.getTypeCount(type): Invalid type");
def getTypePythonUnpackStr(t):
if (t==OgreVertexElementType.VET_COLOUR or \
t==OgreVertexElementType.VET_COLOUR_ABGR or \
t==OgreVertexElementType.VET_COLOUR_ARGB):
raise ValueError("OgreVertexElement.getTypePythonUnpackStr(type): Color unsupported yet");
elif (t==OgreVertexElementType.VET_FLOAT1 or \
t==OgreVertexElementType.VET_FLOAT2 or \
t==OgreVertexElementType.VET_FLOAT3 or \
t==OgreVertexElementType.VET_FLOAT4):
return 'f' * OgreVertexElement.getTypeCount(t);
elif (t==OgreVertexElementType.VET_DOUBLE1 or \
t==OgreVertexElementType.VET_DOUBLE2 or \
t==OgreVertexElementType.VET_DOUBLE3 or \
t==OgreVertexElementType.VET_DOUBLE4):
return 'd' * OgreVertexElement.getTypeCount(t);
elif (t==OgreVertexElementType.VET_SHORT1 or \
t==OgreVertexElementType.VET_SHORT2 or \
t==OgreVertexElementType.VET_SHORT3 or \
t==OgreVertexElementType.VET_SHORT4):
return 'h' * OgreVertexElement.getTypeCount(t);
elif (t==OgreVertexElementType.VET_USHORT1 or \
t==OgreVertexElementType.VET_USHORT2 or \
t==OgreVertexElementType.VET_USHORT3 or \
t==OgreVertexElementType.VET_USHORT4):
return 'H' * OgreVertexElement.getTypeCount(t);
elif (t==OgreVertexElementType.VET_INT1 or \
t==OgreVertexElementType.VET_INT2 or \
t==OgreVertexElementType.VET_INT3 or \
t==OgreVertexElementType.VET_INT4):
return 'i' * OgreVertexElement.getTypeCount(t);
elif (t==OgreVertexElementType.VET_UINT1 or \
t==OgreVertexElementType.VET_UINT2 or \
t==OgreVertexElementType.VET_UINT3 or \
t==OgreVertexElementType.VET_UINT4):
return 'I' * OgreVertexElement.getTypeCount(t);
raise ValueError("OgreVertexElement.getTypePythonUnpackStr(type): Invalid type");
def getBestCoulourVertexElementType():
#Blender use opengl
return OgreVertexElementType.VET_COLOUR_ABGR;
def __eq__(self, other):
if (self._source == other._source and \
self._index == other._index and \
self._offet == other._offset and \
self._semantic == other._semantic and \
self._type == other._type):
return True;
else:
return False;
def getSize(self):
return OgreVertexElement.getTypeSize(self._type);
def extractFromBuffer(self, vertexBufferBinding, dest, endianess):
buf = vertexBufferBinding.getBuffer(self.source);
cmd = "";
#FIXME: endianess not working...
#if (endianess.value == 'big'):
# cmd = '<';
#elif (endianess.value == 'little'):
# cmd = '>';
#else :
# cmd = endianess;
#assert(cmd == '<' or cmd == '>');
cmd = "="
cmd = cmd + OgreVertexElement.getTypePythonUnpackStr(self.getType());
print(cmd);
data = buf.data[self.offset:]
for i in range(buf.numVertices):
v = unpack_from(cmd, data, i * buf.vertexSize);
dest.append(v);
class OgreVertexDeclaration:
def __init__(self):
self._elementList = [];
def getElements(self):
return self._elementList;
def addElement(self, source, offset, theType, semantic, index):
if (theType == OgreVertexElementType.VET_COLOUR):
theType = OgreVertexElement.getBestCoulourVertexElementType();
self._elementList.append(OgreVertexElement(source,offset,theType,semantic,index));
return self._elementList[-1];
def insertElement(self, atPosition, source, offset, theType, semantic, index):
if (atPosition >= len(_elementList)):
return self.addElement(source,offset,theType,semantic,index);
_elementList.insert(atPosition,OgreVertexElement(source,offset,theType,semantic,index));
return _elementList[-1];
def getElement(self, index):
return self._elementList[index];
def removeElement(self, index):
del self._elementList[index];
def removeElementWithSemantic(self, semantic, index):
for i in range(self._elementList):
if (self._elementList[i].semantic == semantic and self._elementList[i].index == index):
del self._elementList[i];
break;
def removeAllElements(self):
self._elementList = [];
def findElementBySemantic(self, sem, index):
for e in self._elementList:
if (e.semantic == sem and e.index == index):
return e;
return None;
def findElementsBySemantic(self,sem):
elements = []
for e in self._elementList:
if (e.semantic == sem):
elements.append(e);
return elements;
def findElementBySource(self,source):
return [e for e in self._elementList if e.source == source];
def getVertexSize(self, source):
sz = 0;
for e in self._elementList:
if (e.source == source):
sz += e.getSize();
return sz;
def vertexElementLess(e1, e2):
if (e1.source < e2.source):
return True;
elif (e1.source == e2.source):
if (e1.semantic < e2.semantic):
return True;
elif (e1.semantic == e2.semantic):
if (e1.index < e2.index):
return True;
return False;
def sort(self):
self._elementList.sort(cmp=OgreVertexDeclaration.vertexElementLess);
def closeGapInSource(self):
if (not self._elementList):
return;
self.sort();
raise NotImplementedError;
class OgreVertexBufferBinding:
def __init__(self):
self._bindingMap = {};
def setBinding(self, index, vbuffer):
self._bindingMap[str(index)]=vbuffer;
def getBuffer(self, source):
return self._bindingMap[str(source)];
def unsetAllBindings(self):
self._bindingMap = {};
| true | true |
1c475e3625b49e36e394562fd00fe1877c86b2a5 | 4,692 | py | Python | env/Lib/site-packages/sqlalchemy/dialects/sqlite/pysqlcipher.py | aammjian/cotton | f72b814f795f79a4054688e465c8b0ae5560f3b7 | [
"Apache-2.0"
] | 5,079 | 2015-01-01T03:39:46.000Z | 2022-03-31T07:38:22.000Z | env/Lib/site-packages/sqlalchemy/dialects/sqlite/pysqlcipher.py | aammjian/cotton | f72b814f795f79a4054688e465c8b0ae5560f3b7 | [
"Apache-2.0"
] | 1,623 | 2015-01-01T08:06:24.000Z | 2022-03-30T19:48:52.000Z | env/Lib/site-packages/sqlalchemy/dialects/sqlite/pysqlcipher.py | aammjian/cotton | f72b814f795f79a4054688e465c8b0ae5560f3b7 | [
"Apache-2.0"
] | 2,033 | 2015-01-04T07:18:02.000Z | 2022-03-28T19:55:47.000Z | # sqlite/pysqlcipher.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: sqlite+pysqlcipher
:name: pysqlcipher
:dbapi: pysqlcipher
:connectstring: sqlite+pysqlcipher://:passphrase/file_path[?kdf_iter=<iter>]
:url: https://pypi.python.org/pypi/pysqlcipher
``pysqlcipher`` is a fork of the standard ``pysqlite`` driver to make
use of the `SQLCipher <https://www.zetetic.net/sqlcipher>`_ backend.
``pysqlcipher3`` is a fork of ``pysqlcipher`` for Python 3. This dialect
will attempt to import it if ``pysqlcipher`` is non-present.
.. versionadded:: 1.1.4 - added fallback import for pysqlcipher3
.. versionadded:: 0.9.9 - added pysqlcipher dialect
Driver
------
The driver here is the
`pysqlcipher <https://pypi.python.org/pypi/pysqlcipher>`_
driver, which makes use of the SQLCipher engine. This system essentially
introduces new PRAGMA commands to SQLite which allows the setting of a
passphrase and other encryption parameters, allowing the database
file to be encrypted.
`pysqlcipher3` is a fork of `pysqlcipher` with support for Python 3,
the driver is the same.
Connect Strings
---------------
The format of the connect string is in every way the same as that
of the :mod:`~sqlalchemy.dialects.sqlite.pysqlite` driver, except that the
"password" field is now accepted, which should contain a passphrase::
e = create_engine('sqlite+pysqlcipher://:testing@/foo.db')
For an absolute file path, two leading slashes should be used for the
database name::
e = create_engine('sqlite+pysqlcipher://:testing@//path/to/foo.db')
A selection of additional encryption-related pragmas supported by SQLCipher
as documented at https://www.zetetic.net/sqlcipher/sqlcipher-api/ can be passed
in the query string, and will result in that PRAGMA being called for each
new connection. Currently, ``cipher``, ``kdf_iter``
``cipher_page_size`` and ``cipher_use_hmac`` are supported::
e = create_engine('sqlite+pysqlcipher://:testing@/foo.db?cipher=aes-256-cfb&kdf_iter=64000')
Pooling Behavior
----------------
The driver makes a change to the default pool behavior of pysqlite
as described in :ref:`pysqlite_threading_pooling`. The pysqlcipher driver
has been observed to be significantly slower on connection than the
pysqlite driver, most likely due to the encryption overhead, so the
dialect here defaults to using the :class:`.SingletonThreadPool`
implementation,
instead of the :class:`.NullPool` pool used by pysqlite. As always, the pool
implementation is entirely configurable using the
:paramref:`_sa.create_engine.poolclass` parameter; the :class:`.StaticPool`
may
be more feasible for single-threaded use, or :class:`.NullPool` may be used
to prevent unencrypted connections from being held open for long periods of
time, at the expense of slower startup time for new connections.
""" # noqa
from __future__ import absolute_import
from .pysqlite import SQLiteDialect_pysqlite
from ... import pool
from ...engine import url as _url
class SQLiteDialect_pysqlcipher(SQLiteDialect_pysqlite):
driver = "pysqlcipher"
pragmas = ("kdf_iter", "cipher", "cipher_page_size", "cipher_use_hmac")
@classmethod
def dbapi(cls):
try:
from pysqlcipher import dbapi2 as sqlcipher
except ImportError as e:
try:
from pysqlcipher3 import dbapi2 as sqlcipher
except ImportError:
raise e
return sqlcipher
@classmethod
def get_pool_class(cls, url):
return pool.SingletonThreadPool
def connect(self, *cargs, **cparams):
passphrase = cparams.pop("passphrase", "")
pragmas = dict((key, cparams.pop(key, None)) for key in self.pragmas)
conn = super(SQLiteDialect_pysqlcipher, self).connect(
*cargs, **cparams
)
conn.execute('pragma key="%s"' % passphrase)
for prag, value in pragmas.items():
if value is not None:
conn.execute('pragma %s="%s"' % (prag, value))
return conn
def create_connect_args(self, url):
super_url = _url.URL(
url.drivername,
username=url.username,
host=url.host,
database=url.database,
query=url.query,
)
c_args, opts = super(
SQLiteDialect_pysqlcipher, self
).create_connect_args(super_url)
opts["passphrase"] = url.password
return c_args, opts
dialect = SQLiteDialect_pysqlcipher
| 33.755396 | 96 | 0.702472 |
from __future__ import absolute_import
from .pysqlite import SQLiteDialect_pysqlite
from ... import pool
from ...engine import url as _url
class SQLiteDialect_pysqlcipher(SQLiteDialect_pysqlite):
driver = "pysqlcipher"
pragmas = ("kdf_iter", "cipher", "cipher_page_size", "cipher_use_hmac")
@classmethod
def dbapi(cls):
try:
from pysqlcipher import dbapi2 as sqlcipher
except ImportError as e:
try:
from pysqlcipher3 import dbapi2 as sqlcipher
except ImportError:
raise e
return sqlcipher
@classmethod
def get_pool_class(cls, url):
return pool.SingletonThreadPool
def connect(self, *cargs, **cparams):
passphrase = cparams.pop("passphrase", "")
pragmas = dict((key, cparams.pop(key, None)) for key in self.pragmas)
conn = super(SQLiteDialect_pysqlcipher, self).connect(
*cargs, **cparams
)
conn.execute('pragma key="%s"' % passphrase)
for prag, value in pragmas.items():
if value is not None:
conn.execute('pragma %s="%s"' % (prag, value))
return conn
def create_connect_args(self, url):
super_url = _url.URL(
url.drivername,
username=url.username,
host=url.host,
database=url.database,
query=url.query,
)
c_args, opts = super(
SQLiteDialect_pysqlcipher, self
).create_connect_args(super_url)
opts["passphrase"] = url.password
return c_args, opts
dialect = SQLiteDialect_pysqlcipher
| true | true |
1c475e7b96a4c7661d55f944dc305ea0b892c612 | 2,727 | py | Python | facerec_py/facerec/svm.py | idf/FaceReader | d649bf7ca7f9cf66ac99e81a5187cfcc2b54f49d | [
"MIT"
] | 7 | 2015-04-17T02:12:32.000Z | 2018-08-08T01:29:24.000Z | facerec_py/facerec/svm.py | idf/FaceReader | d649bf7ca7f9cf66ac99e81a5187cfcc2b54f49d | [
"MIT"
] | null | null | null | facerec_py/facerec/svm.py | idf/FaceReader | d649bf7ca7f9cf66ac99e81a5187cfcc2b54f49d | [
"MIT"
] | 4 | 2017-08-26T11:44:20.000Z | 2021-06-13T11:50:11.000Z | from facerec_py.facerec.classifier import SVM
from facerec_py.facerec.validation import KFoldCrossValidation
from facerec_py.facerec.model import PredictableModel
from svmutil import *
from itertools import product
import numpy as np
import logging
def range_f(begin, end, step):
seq = []
while True:
if step == 0: break
if step > 0 and begin > end: break
if step < 0 and begin < end: break
seq.append(begin)
begin = begin + step
return seq
def grid(grid_parameters):
grid = []
for parameter in grid_parameters:
begin, end, step = parameter
grid.append(range_f(begin, end, step))
return product(*grid)
def grid_search(model, X, y, C_range=(-5, 15, 2), gamma_range=(3, -15, -2), k=5, num_cores=1):
if not isinstance(model, PredictableModel):
raise TypeError("GridSearch expects a PredictableModel. If you want to perform optimization on raw data use facerec.feature.Identity to pass unpreprocessed data!")
if not isinstance(model.classifier, SVM):
raise TypeError("GridSearch expects a SVM as classifier. Please use a facerec.classifier.SVM!")
logger = logging.getLogger("facerec.svm.gridsearch")
logger.info("Performing a Grid Search.")
# best parameter combination to return
best_parameter = svm_parameter("-q")
best_parameter.kernel_type = model.classifier.param.kernel_type
best_parameter.nu = model.classifier.param.nu
best_parameter.coef0 = model.classifier.param.coef0
# either no gamma given or kernel is linear (only C to optimize)
if (gamma_range is None) or (model.classifier.param.kernel_type == LINEAR):
gamma_range = (0, 0, 1)
# best validation error so far
best_accuracy = np.finfo('float').min
# create grid (cartesian product of ranges)
g = grid([C_range, gamma_range])
results = []
for p in g:
C, gamma = p
C, gamma = 2**C, 2**gamma
model.classifier.param.C, model.classifier.param.gamma = C, gamma
# perform a k-fold cross validation
cv = KFoldCrossValidation(model=model,k=k)
cv.validate(X,y)
# append parameter into list with accuracies for all parameter combinations
results.append([C, gamma, cv.accuracy])
# store best parameter combination
if cv.accuracy > best_accuracy:
logger.info("best_accuracy=%s" % (cv.accuracy))
best_accuracy = cv.accuracy
best_parameter.C, best_parameter.gamma = C, gamma
logger.info("%d-CV Result = %.2f." % (k, cv.accuracy))
# set best parameter combination to best found
return best_parameter, results
| 35.881579 | 171 | 0.6641 | from facerec_py.facerec.classifier import SVM
from facerec_py.facerec.validation import KFoldCrossValidation
from facerec_py.facerec.model import PredictableModel
from svmutil import *
from itertools import product
import numpy as np
import logging
def range_f(begin, end, step):
seq = []
while True:
if step == 0: break
if step > 0 and begin > end: break
if step < 0 and begin < end: break
seq.append(begin)
begin = begin + step
return seq
def grid(grid_parameters):
grid = []
for parameter in grid_parameters:
begin, end, step = parameter
grid.append(range_f(begin, end, step))
return product(*grid)
def grid_search(model, X, y, C_range=(-5, 15, 2), gamma_range=(3, -15, -2), k=5, num_cores=1):
if not isinstance(model, PredictableModel):
raise TypeError("GridSearch expects a PredictableModel. If you want to perform optimization on raw data use facerec.feature.Identity to pass unpreprocessed data!")
if not isinstance(model.classifier, SVM):
raise TypeError("GridSearch expects a SVM as classifier. Please use a facerec.classifier.SVM!")
logger = logging.getLogger("facerec.svm.gridsearch")
logger.info("Performing a Grid Search.")
best_parameter = svm_parameter("-q")
best_parameter.kernel_type = model.classifier.param.kernel_type
best_parameter.nu = model.classifier.param.nu
best_parameter.coef0 = model.classifier.param.coef0
if (gamma_range is None) or (model.classifier.param.kernel_type == LINEAR):
gamma_range = (0, 0, 1)
best_accuracy = np.finfo('float').min
g = grid([C_range, gamma_range])
results = []
for p in g:
C, gamma = p
C, gamma = 2**C, 2**gamma
model.classifier.param.C, model.classifier.param.gamma = C, gamma
cv = KFoldCrossValidation(model=model,k=k)
cv.validate(X,y)
results.append([C, gamma, cv.accuracy])
if cv.accuracy > best_accuracy:
logger.info("best_accuracy=%s" % (cv.accuracy))
best_accuracy = cv.accuracy
best_parameter.C, best_parameter.gamma = C, gamma
logger.info("%d-CV Result = %.2f." % (k, cv.accuracy))
return best_parameter, results
| true | true |
1c475ea363209a3a683098d4d7dce556761ceb57 | 7,113 | py | Python | app/main.py | ri10073/tracardi-api | 828bc0939b3915af4c32906c65769c5b5fd992c3 | [
"MIT"
] | null | null | null | app/main.py | ri10073/tracardi-api | 828bc0939b3915af4c32906c65769c5b5fd992c3 | [
"MIT"
] | null | null | null | app/main.py | ri10073/tracardi-api | 828bc0939b3915af4c32906c65769c5b5fd992c3 | [
"MIT"
] | null | null | null | import logging
import os
import asyncio
from time import time
import elasticsearch
from fastapi.middleware.cors import CORSMiddleware
from fastapi import FastAPI, Request, Depends
from starlette.staticfiles import StaticFiles
from app.api import token_endpoint, rule_endpoint, resource_endpoint, event_endpoint, \
profile_endpoint, flow_endpoint, generic_endpoint, project_endpoint, \
credentials_endpoint, segments_endpoint, \
tql_endpoint, health_endpoint, session_endpoint, instance_endpoint, plugins_endpoint, test_endpoint, \
settings_endpoint, \
purchases_endpoint, event_tag_endpoint, consent_type_endpoint
from app.api.auth.authentication import get_current_user
from app.api.graphql.profile import graphql_profiles
from app.api.scheduler import tasks_endpoint
from app.api.track import event_server_endpoint
from app.config import server
from app.setup.on_start import add_plugins, update_api_instance
from tracardi.config import tracardi
from tracardi.service.storage.elastic_client import ElasticClient
from app.setup.indices_setup import create_indices
from tracardi.service.storage.index import resources
logging.basicConfig(level=logging.ERROR)
logger = logging.getLogger('app.main')
logger.setLevel(tracardi.logging_level)
_local_dir = os.path.dirname(__file__)
tags_metadata = [
{
"name": "profile",
"description": "Manage profiles. Read more about core concepts of TRACARDI in documentation.",
"externalDocs": {
"description": "Profile external docs",
"url": "https://github/atompie/docs/en/docs",
},
},
{
"name": "resource",
"description": "Manage data resources. Read more about core concepts of TRACARDI in documentation.",
"externalDocs": {
"description": "Resource external docs",
"url": "https://github/atompie/docs/en/docs",
},
},
{
"name": "rule",
"description": "Manage flow rule triggers. Read more about core concepts of TRACARDI in documentation.",
"externalDocs": {
"description": "Rule external docs",
"url": "https://github/atompie/docs/en/docs",
},
},
{
"name": "flow",
"description": "Manage flows. Read more about core concepts of TRACARDI in documentation.",
"externalDocs": {
"description": "Flows external docs",
"url": "https://github/atompie/docs/en/docs",
},
},
{
"name": "event",
"description": "Manage events. Read more about core concepts of TRACARDI in documentation.",
"externalDocs": {
"description": "Events external docs",
"url": "https://github/atompie/docs/en/docs",
},
},
{
"name": "authorization",
"description": "OAuth authorization.",
},
{
"name": "tracker",
"description": "Read more about TRACARDI event server in documentation. http://localhost:8686/manual/en/site",
"externalDocs": {
"description": "External docs",
"url": "https://github/atompie/docs/en/docs",
},
}
]
application = FastAPI(
title="Tracardi Customer Data Platform Project",
description="TRACARDI open-source customer data platform offers you excellent control over your customer data with its broad set of features",
version="0.6.0",
openapi_tags=tags_metadata if server.expose_gui_api else None,
contact={
"name": "Risto Kowaczewski",
"url": "http://github.com/atompie/tracardi",
"email": "office@tracardi.com",
},
)
application.add_middleware(
CORSMiddleware,
allow_origins=['*'],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
application.mount("/tracker",
StaticFiles(
html=True,
directory=os.path.join(_local_dir, "tracker")),
name="tracker")
application.mount("/manual",
StaticFiles(
html=True,
directory=os.path.join(_local_dir, "../manual")),
name="manual")
application.include_router(event_server_endpoint.router)
application.include_router(tql_endpoint.router)
application.include_router(segments_endpoint.router)
application.include_router(credentials_endpoint.router)
application.include_router(project_endpoint.router)
application.include_router(resource_endpoint.router)
application.include_router(rule_endpoint.router)
application.include_router(flow_endpoint.router)
application.include_router(event_endpoint.router)
application.include_router(profile_endpoint.router)
application.include_router(token_endpoint.router)
application.include_router(generic_endpoint.router)
application.include_router(health_endpoint.router)
application.include_router(session_endpoint.router)
application.include_router(tasks_endpoint.router)
application.include_router(instance_endpoint.router)
application.include_router(plugins_endpoint.router)
application.include_router(test_endpoint.router)
application.include_router(settings_endpoint.router)
application.include_router(purchases_endpoint.router)
application.include_router(event_tag_endpoint.router)
application.include_router(consent_type_endpoint.router)
# GraphQL
application.include_router(graphql_profiles,
prefix="/graphql/profile",
# dependencies=[Depends(get_current_user)],
tags=["graphql"])
@application.on_event("startup")
async def app_starts():
while True:
try:
if server.reset_plugins is True:
es = ElasticClient.instance()
index = resources.resources['action']
if await es.exists_index(index.get_write_index()):
await es.remove_index(index.get_read_index())
await create_indices()
await update_api_instance()
if server.update_plugins_on_start_up is not False:
await add_plugins()
break
except elasticsearch.exceptions.ConnectionError:
await asyncio.sleep(5)
report_i_am_alive()
logger.info("START UP exits.")
@application.middleware("http")
async def add_process_time_header(request: Request, call_next):
start_time = time()
if server.make_slower_responses > 0:
await asyncio.sleep(server.make_slower_responses)
response = await call_next(request)
process_time = time() - start_time
response.headers["X-Process-Time"] = str(process_time)
return response
@application.on_event("shutdown")
async def app_shutdown():
elastic = ElasticClient.instance()
await elastic.close()
def report_i_am_alive():
async def heartbeat():
while True:
await asyncio.sleep(server.heartbeat_every)
await update_api_instance()
asyncio.create_task(heartbeat())
if __name__ == "__main__":
import uvicorn
uvicorn.run("app.main:application", host="0.0.0.0", port=8686, log_level="info")
| 34.529126 | 146 | 0.685505 | import logging
import os
import asyncio
from time import time
import elasticsearch
from fastapi.middleware.cors import CORSMiddleware
from fastapi import FastAPI, Request, Depends
from starlette.staticfiles import StaticFiles
from app.api import token_endpoint, rule_endpoint, resource_endpoint, event_endpoint, \
profile_endpoint, flow_endpoint, generic_endpoint, project_endpoint, \
credentials_endpoint, segments_endpoint, \
tql_endpoint, health_endpoint, session_endpoint, instance_endpoint, plugins_endpoint, test_endpoint, \
settings_endpoint, \
purchases_endpoint, event_tag_endpoint, consent_type_endpoint
from app.api.auth.authentication import get_current_user
from app.api.graphql.profile import graphql_profiles
from app.api.scheduler import tasks_endpoint
from app.api.track import event_server_endpoint
from app.config import server
from app.setup.on_start import add_plugins, update_api_instance
from tracardi.config import tracardi
from tracardi.service.storage.elastic_client import ElasticClient
from app.setup.indices_setup import create_indices
from tracardi.service.storage.index import resources
logging.basicConfig(level=logging.ERROR)
logger = logging.getLogger('app.main')
logger.setLevel(tracardi.logging_level)
_local_dir = os.path.dirname(__file__)
tags_metadata = [
{
"name": "profile",
"description": "Manage profiles. Read more about core concepts of TRACARDI in documentation.",
"externalDocs": {
"description": "Profile external docs",
"url": "https://github/atompie/docs/en/docs",
},
},
{
"name": "resource",
"description": "Manage data resources. Read more about core concepts of TRACARDI in documentation.",
"externalDocs": {
"description": "Resource external docs",
"url": "https://github/atompie/docs/en/docs",
},
},
{
"name": "rule",
"description": "Manage flow rule triggers. Read more about core concepts of TRACARDI in documentation.",
"externalDocs": {
"description": "Rule external docs",
"url": "https://github/atompie/docs/en/docs",
},
},
{
"name": "flow",
"description": "Manage flows. Read more about core concepts of TRACARDI in documentation.",
"externalDocs": {
"description": "Flows external docs",
"url": "https://github/atompie/docs/en/docs",
},
},
{
"name": "event",
"description": "Manage events. Read more about core concepts of TRACARDI in documentation.",
"externalDocs": {
"description": "Events external docs",
"url": "https://github/atompie/docs/en/docs",
},
},
{
"name": "authorization",
"description": "OAuth authorization.",
},
{
"name": "tracker",
"description": "Read more about TRACARDI event server in documentation. http://localhost:8686/manual/en/site",
"externalDocs": {
"description": "External docs",
"url": "https://github/atompie/docs/en/docs",
},
}
]
application = FastAPI(
title="Tracardi Customer Data Platform Project",
description="TRACARDI open-source customer data platform offers you excellent control over your customer data with its broad set of features",
version="0.6.0",
openapi_tags=tags_metadata if server.expose_gui_api else None,
contact={
"name": "Risto Kowaczewski",
"url": "http://github.com/atompie/tracardi",
"email": "office@tracardi.com",
},
)
application.add_middleware(
CORSMiddleware,
allow_origins=['*'],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
application.mount("/tracker",
StaticFiles(
html=True,
directory=os.path.join(_local_dir, "tracker")),
name="tracker")
application.mount("/manual",
StaticFiles(
html=True,
directory=os.path.join(_local_dir, "../manual")),
name="manual")
application.include_router(event_server_endpoint.router)
application.include_router(tql_endpoint.router)
application.include_router(segments_endpoint.router)
application.include_router(credentials_endpoint.router)
application.include_router(project_endpoint.router)
application.include_router(resource_endpoint.router)
application.include_router(rule_endpoint.router)
application.include_router(flow_endpoint.router)
application.include_router(event_endpoint.router)
application.include_router(profile_endpoint.router)
application.include_router(token_endpoint.router)
application.include_router(generic_endpoint.router)
application.include_router(health_endpoint.router)
application.include_router(session_endpoint.router)
application.include_router(tasks_endpoint.router)
application.include_router(instance_endpoint.router)
application.include_router(plugins_endpoint.router)
application.include_router(test_endpoint.router)
application.include_router(settings_endpoint.router)
application.include_router(purchases_endpoint.router)
application.include_router(event_tag_endpoint.router)
application.include_router(consent_type_endpoint.router)
application.include_router(graphql_profiles,
prefix="/graphql/profile",
tags=["graphql"])
@application.on_event("startup")
async def app_starts():
while True:
try:
if server.reset_plugins is True:
es = ElasticClient.instance()
index = resources.resources['action']
if await es.exists_index(index.get_write_index()):
await es.remove_index(index.get_read_index())
await create_indices()
await update_api_instance()
if server.update_plugins_on_start_up is not False:
await add_plugins()
break
except elasticsearch.exceptions.ConnectionError:
await asyncio.sleep(5)
report_i_am_alive()
logger.info("START UP exits.")
@application.middleware("http")
async def add_process_time_header(request: Request, call_next):
start_time = time()
if server.make_slower_responses > 0:
await asyncio.sleep(server.make_slower_responses)
response = await call_next(request)
process_time = time() - start_time
response.headers["X-Process-Time"] = str(process_time)
return response
@application.on_event("shutdown")
async def app_shutdown():
elastic = ElasticClient.instance()
await elastic.close()
def report_i_am_alive():
async def heartbeat():
while True:
await asyncio.sleep(server.heartbeat_every)
await update_api_instance()
asyncio.create_task(heartbeat())
if __name__ == "__main__":
import uvicorn
uvicorn.run("app.main:application", host="0.0.0.0", port=8686, log_level="info")
| true | true |
1c475ed89de55cb2f813d13f5130ed38d968d27a | 3,572 | py | Python | bindings/python/ensmallen/datasets/string/sulfurospirillumhalorespiransdsm13726.py | AnacletoLAB/ensmallen_graph | b2c1b18fb1e5801712852bcc239f239e03076f09 | [
"MIT"
] | 5 | 2021-02-17T00:44:45.000Z | 2021-08-09T16:41:47.000Z | bindings/python/ensmallen/datasets/string/sulfurospirillumhalorespiransdsm13726.py | AnacletoLAB/ensmallen_graph | b2c1b18fb1e5801712852bcc239f239e03076f09 | [
"MIT"
] | 18 | 2021-01-07T16:47:39.000Z | 2021-08-12T21:51:32.000Z | bindings/python/ensmallen/datasets/string/sulfurospirillumhalorespiransdsm13726.py | AnacletoLAB/ensmallen | b2c1b18fb1e5801712852bcc239f239e03076f09 | [
"MIT"
] | 3 | 2021-01-14T02:20:59.000Z | 2021-08-04T19:09:52.000Z | """
This file offers the methods to automatically retrieve the graph Sulfurospirillum halorespirans DSM 13726.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def SulfurospirillumHalorespiransDsm13726(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Sulfurospirillum halorespirans DSM 13726 graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.5
- physical.links.v11.5
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Sulfurospirillum halorespirans DSM 13726 graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="SulfurospirillumHalorespiransDsm13726",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 34.019048 | 223 | 0.68505 | from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph
def SulfurospirillumHalorespiransDsm13726(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
return AutomaticallyRetrievedGraph(
graph_name="SulfurospirillumHalorespiransDsm13726",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| true | true |
1c475eea3e539ba4d1a9a72d6264384d25b277e3 | 204 | py | Python | book/recursion/base_conversion.py | Web-Dev-Collaborative/algos | d280581d74ded382094283d931a202eb55fd8369 | [
"CC0-1.0"
] | 153 | 2015-12-24T00:32:23.000Z | 2022-02-24T06:00:29.000Z | book/recursion/base_conversion.py | Web-Dev-Collaborative/algos | d280581d74ded382094283d931a202eb55fd8369 | [
"CC0-1.0"
] | 78 | 2015-11-17T11:46:15.000Z | 2021-06-28T18:37:58.000Z | book/recursion/base_conversion.py | rhivent/algo-books-python | c4fa29616ca9a8a15ba40fa12d21fd8f35096d40 | [
"CC0-1.0"
] | 66 | 2015-11-02T03:38:02.000Z | 2022-03-05T17:36:26.000Z |
CHAR_FOR_INT = '0123456789abcdef'
def to_string(n, base):
if n < base:
return CHAR_FOR_INT[n]
return to_string(n // base, base) + CHAR_FOR_INT[n % base]
to_string(1453, 16) # => 5Ad
| 17 | 62 | 0.637255 |
CHAR_FOR_INT = '0123456789abcdef'
def to_string(n, base):
if n < base:
return CHAR_FOR_INT[n]
return to_string(n // base, base) + CHAR_FOR_INT[n % base]
to_string(1453, 16)
| true | true |
1c475efe695ee9d1a051a1330fe3636e05ac3b4c | 579 | py | Python | setup.py | tijko/shadow | 8ba9a8c2de2be51fa4eb387a179dbc0ac4641575 | [
"MIT"
] | null | null | null | setup.py | tijko/shadow | 8ba9a8c2de2be51fa4eb387a179dbc0ac4641575 | [
"MIT"
] | null | null | null | setup.py | tijko/shadow | 8ba9a8c2de2be51fa4eb387a179dbc0ac4641575 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
from setuptools import setup, Extension, find_packages
except ImportError:
from distutils.core import setup, Extension
setup(
name = 'shadow',
version = '0.0.1',
author='Tim Konick',
author_email='konick781@gmail.com',
url='',
description='Provides auxillary data on processes',
long_description=open('README.md').read(),
license=open('LICENSE').read(),
packages=['shadow', 'shadow.taskstats'],
ext_modules=[Extension('libshadow', sources=['shadow/libshadow/libshadow.c'])]
)
| 26.318182 | 82 | 0.670121 |
try:
from setuptools import setup, Extension, find_packages
except ImportError:
from distutils.core import setup, Extension
setup(
name = 'shadow',
version = '0.0.1',
author='Tim Konick',
author_email='konick781@gmail.com',
url='',
description='Provides auxillary data on processes',
long_description=open('README.md').read(),
license=open('LICENSE').read(),
packages=['shadow', 'shadow.taskstats'],
ext_modules=[Extension('libshadow', sources=['shadow/libshadow/libshadow.c'])]
)
| true | true |
1c475f7fce5478d597fc5b92d7692cf01e58b4c5 | 1,559 | py | Python | thenewboston_node/business_logic/models/signed_change_request/base.py | nishp77/thenewboston-node | 158b1f1739b2c6c9c21c80e9da854ca141f1cf8f | [
"MIT"
] | null | null | null | thenewboston_node/business_logic/models/signed_change_request/base.py | nishp77/thenewboston-node | 158b1f1739b2c6c9c21c80e9da854ca141f1cf8f | [
"MIT"
] | null | null | null | thenewboston_node/business_logic/models/signed_change_request/base.py | nishp77/thenewboston-node | 158b1f1739b2c6c9c21c80e9da854ca141f1cf8f | [
"MIT"
] | null | null | null | import copy
import logging
from dataclasses import dataclass
from typing import ClassVar, Type, TypeVar
from thenewboston_node.business_logic.models.base import BaseDataclass
from thenewboston_node.core.logging import validates
from thenewboston_node.core.utils.cryptography import derive_public_key
from thenewboston_node.core.utils.dataclass import cover_docstring, revert_docstring
from thenewboston_node.core.utils.types import hexstr
from ..mixins.signable import SignableMixin
from ..signed_change_request_message import SignedChangeRequestMessage
T = TypeVar('T', bound='SignedChangeRequest')
logger = logging.getLogger(__name__)
@revert_docstring
@dataclass
@cover_docstring
class SignedChangeRequest(SignableMixin, BaseDataclass):
block_type: ClassVar[str]
message: SignedChangeRequestMessage
@classmethod
def create_from_signed_change_request_message(
cls: Type[T], message: SignedChangeRequestMessage, signing_key: hexstr
) -> T:
request = cls(signer=derive_public_key(signing_key), message=copy.deepcopy(message))
request.sign(signing_key)
return request
@validates('signed request')
def validate(self, blockchain, block_number: int):
self.validate_message()
with validates('block signature'):
self.validate_signature()
@validates('signed request message')
def validate_message(self):
self.message.validate()
def get_updated_account_states(self, blockchain):
raise NotImplementedError('Must be implemented in subclass')
| 32.479167 | 92 | 0.77678 | import copy
import logging
from dataclasses import dataclass
from typing import ClassVar, Type, TypeVar
from thenewboston_node.business_logic.models.base import BaseDataclass
from thenewboston_node.core.logging import validates
from thenewboston_node.core.utils.cryptography import derive_public_key
from thenewboston_node.core.utils.dataclass import cover_docstring, revert_docstring
from thenewboston_node.core.utils.types import hexstr
from ..mixins.signable import SignableMixin
from ..signed_change_request_message import SignedChangeRequestMessage
T = TypeVar('T', bound='SignedChangeRequest')
logger = logging.getLogger(__name__)
@revert_docstring
@dataclass
@cover_docstring
class SignedChangeRequest(SignableMixin, BaseDataclass):
block_type: ClassVar[str]
message: SignedChangeRequestMessage
@classmethod
def create_from_signed_change_request_message(
cls: Type[T], message: SignedChangeRequestMessage, signing_key: hexstr
) -> T:
request = cls(signer=derive_public_key(signing_key), message=copy.deepcopy(message))
request.sign(signing_key)
return request
@validates('signed request')
def validate(self, blockchain, block_number: int):
self.validate_message()
with validates('block signature'):
self.validate_signature()
@validates('signed request message')
def validate_message(self):
self.message.validate()
def get_updated_account_states(self, blockchain):
raise NotImplementedError('Must be implemented in subclass')
| true | true |
1c475f9553b3a997c5e9fa81cedd6cc86997d3a6 | 4,621 | py | Python | ProjectFiles/UMKCEntrepreneurialLegalServicesClinicDocuments/IntakeForm.py | KCLegalHackers/2016-Coding-For-Lawyers | 0e7aeaf3b446defcfa60c862dfac5627cedd1560 | [
"MIT"
] | 1 | 2021-01-15T00:34:54.000Z | 2021-01-15T00:34:54.000Z | ProjectFiles/UMKCEntrepreneurialLegalServicesClinicDocuments/IntakeForm.py | KCLegalHackers/2016-Coding-For-Lawyers | 0e7aeaf3b446defcfa60c862dfac5627cedd1560 | [
"MIT"
] | null | null | null | ProjectFiles/UMKCEntrepreneurialLegalServicesClinicDocuments/IntakeForm.py | KCLegalHackers/2016-Coding-For-Lawyers | 0e7aeaf3b446defcfa60c862dfac5627cedd1560 | [
"MIT"
] | null | null | null | print('Application for Services: To be considered for acceptance as a client, you must complete this form and return it to the Entrepreneurial Legal Services Clinic. Acceptance as a client of the UMKC Entrepreneurial Legal Services Clinic is not guaranteed, and is ultimately based upon available of resources and time to provide services, absence of conflicts of interest, financial need of the client, and educational value for our students. What is your full name?')
clientName = input()
print('What is the date? (dd/mm/yyyy)')
date = input()
print('What is the name of the entity?')
companyName = input()
print('What is your mailing address')
clientAddress = input()
print('What city do you live in?')
clientCity = input()
print('What state do you live in?')
clientState = input()
print('What zip code do you live in?')
clientZip = input()
print('What is your telephone number?')
clientPhone = input()
print('What is your email address? By providing your email address you are giving the Entrepreneurial Legal Services Clinic express permission to contact you via email with matters regarding your business and to contact you regarding other information that may be of interest to you. If you do not want the Entrepreneurial Legal Services Clinic type N/A')
clientEmail = input()
print('Applicants for services are hereby notified that the University of Missouri-Kansas City and the Entrepreneurial Legal Services Clinic do not discriminate on the basis of race, color, creed, sex, sexual orientation, age, national origin, disability or Vietnam era veterans status in admission or access to, or treatment or employment in, its programs and activities. Financial Information (required for means testing): What is your total expected income for this year?')
expectedAnnualIncome = input()
print('What was your total expected income for last year?')
pastAnnualIncome = input()
print('How much available capital do you have to spend for your entity?')
availableCapital = input()
print('Are you currently employed?')
clientEmployment = input()
if str(clientEmployment) == 'yes': #I'm not sure if these next few lines are properly formatted
print('If so, where?')
employmentLocation = input()
else:
print('Demographic Information. This information is collected for demographic purposes only; it is anonymous and does not affect your acceptance as a client. What is your race?')
print('Demographic Information. This information is collected for demographic purposes only; it is anonymous and does not affect your acceptance as a client. What is your race?')
clientRace = input()
print('What is your gender?')
clientGender = input()
print('What is your marital status?')
clientMaritalStatus = input()
print('What is your highest level of education?')
clientEducation = input()
print('Required for conflicts check List any person or company, if any, who may have a claim against you or your business. If none, type n/a')
clientClaimants = input()
print('Are you currently a student at the University of Missouri at Kansas City or any other U- System campus?')
clientUMKCStudent = input()
print('Do you currently have or expect to have any contracts, employment, or other business relationship with the University of Missouri-Kansas City or any other campus, office or operation of the University of Missouri System?')
clientUMKCContracts = input()
print('Briefly state your legal question or problem/type of legal advice sought. If unsure, type n/a')
legalAdivceSought = input()
print('Please list any deadlines under which you are operating (court dates, etc. if any). If none, type n/a')
clientDeadlines = input()
print('I hereby state the above information is true to the best of my knowledge, and give permission to the Entrepreneurial Legal Services Clinic to check for potential conflicts of interests between myself and affiliates, and with current and former clients of the clinic, clients of firms at which students may be working, UMKC ,and the University of Missouri. I further confirm that I understand that work in the Entrepreneurial Legal Services Clinic is performed by law students under the supervision of licensed attorneys and therefore I may experience a delay due to the work being completed by said students. Type your signature in the following box to confirm that you are comfortable with the preceding obligations.')
clientSignature = input()
print('Type the date in the following box to confirm that you are comfortable with the preceding obligations')
clientDate = input()
# [Client Intake Form](http://www1.law.umkc.edu/clinics/els/application.pdf)
| 78.322034 | 726 | 0.781649 | print('Application for Services: To be considered for acceptance as a client, you must complete this form and return it to the Entrepreneurial Legal Services Clinic. Acceptance as a client of the UMKC Entrepreneurial Legal Services Clinic is not guaranteed, and is ultimately based upon available of resources and time to provide services, absence of conflicts of interest, financial need of the client, and educational value for our students. What is your full name?')
clientName = input()
print('What is the date? (dd/mm/yyyy)')
date = input()
print('What is the name of the entity?')
companyName = input()
print('What is your mailing address')
clientAddress = input()
print('What city do you live in?')
clientCity = input()
print('What state do you live in?')
clientState = input()
print('What zip code do you live in?')
clientZip = input()
print('What is your telephone number?')
clientPhone = input()
print('What is your email address? By providing your email address you are giving the Entrepreneurial Legal Services Clinic express permission to contact you via email with matters regarding your business and to contact you regarding other information that may be of interest to you. If you do not want the Entrepreneurial Legal Services Clinic type N/A')
clientEmail = input()
print('Applicants for services are hereby notified that the University of Missouri-Kansas City and the Entrepreneurial Legal Services Clinic do not discriminate on the basis of race, color, creed, sex, sexual orientation, age, national origin, disability or Vietnam era veterans status in admission or access to, or treatment or employment in, its programs and activities. Financial Information (required for means testing): What is your total expected income for this year?')
expectedAnnualIncome = input()
print('What was your total expected income for last year?')
pastAnnualIncome = input()
print('How much available capital do you have to spend for your entity?')
availableCapital = input()
print('Are you currently employed?')
clientEmployment = input()
if str(clientEmployment) == 'yes':
print('If so, where?')
employmentLocation = input()
else:
print('Demographic Information. This information is collected for demographic purposes only; it is anonymous and does not affect your acceptance as a client. What is your race?')
print('Demographic Information. This information is collected for demographic purposes only; it is anonymous and does not affect your acceptance as a client. What is your race?')
clientRace = input()
print('What is your gender?')
clientGender = input()
print('What is your marital status?')
clientMaritalStatus = input()
print('What is your highest level of education?')
clientEducation = input()
print('Required for conflicts check List any person or company, if any, who may have a claim against you or your business. If none, type n/a')
clientClaimants = input()
print('Are you currently a student at the University of Missouri at Kansas City or any other U- System campus?')
clientUMKCStudent = input()
print('Do you currently have or expect to have any contracts, employment, or other business relationship with the University of Missouri-Kansas City or any other campus, office or operation of the University of Missouri System?')
clientUMKCContracts = input()
print('Briefly state your legal question or problem/type of legal advice sought. If unsure, type n/a')
legalAdivceSought = input()
print('Please list any deadlines under which you are operating (court dates, etc. if any). If none, type n/a')
clientDeadlines = input()
print('I hereby state the above information is true to the best of my knowledge, and give permission to the Entrepreneurial Legal Services Clinic to check for potential conflicts of interests between myself and affiliates, and with current and former clients of the clinic, clients of firms at which students may be working, UMKC ,and the University of Missouri. I further confirm that I understand that work in the Entrepreneurial Legal Services Clinic is performed by law students under the supervision of licensed attorneys and therefore I may experience a delay due to the work being completed by said students. Type your signature in the following box to confirm that you are comfortable with the preceding obligations.')
clientSignature = input()
print('Type the date in the following box to confirm that you are comfortable with the preceding obligations')
clientDate = input()
# [Client Intake Form](http://www1.law.umkc.edu/clinics/els/application.pdf)
| true | true |
1c475fd0731889687d14b2130b367eb0ec6cbbcf | 1,749 | py | Python | setup.py | gaussian/django-sql-explorer | 844c8f59f8a3de31ef445e18356e97afded50dfc | [
"MIT"
] | null | null | null | setup.py | gaussian/django-sql-explorer | 844c8f59f8a3de31ef445e18356e97afded50dfc | [
"MIT"
] | null | null | null | setup.py | gaussian/django-sql-explorer | 844c8f59f8a3de31ef445e18356e97afded50dfc | [
"MIT"
] | null | null | null | import os
from setuptools import setup
from explorer import __version__
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name="django-sql-explorer",
version=__version__,
author="Chris Clark",
author_email="chris@untrod.com",
description=("A pluggable app that allows users (admins) to execute SQL,"
" view, and export the results."),
license="MIT",
keywords="django sql explorer reports reporting csv database query",
url="https://github.com/groveco/django-sql-explorer",
packages=['explorer'],
long_description=read('README.rst'),
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Topic :: Utilities',
'Framework :: Django :: 1.10',
'Framework :: Django :: 1.11',
'Framework :: Django :: 2.0',
'Framework :: Django :: 2.1',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
install_requires=[
'Django>=2.2.14',
'sqlparse>=0.1.18',
'unicodecsv>=0.14.1',
'six>=1.10.0',
],
include_package_data=True,
zip_safe=False,
)
| 33.634615 | 79 | 0.612922 | import os
from setuptools import setup
from explorer import __version__
# README file and 2) it's easier to type in the README file than to put a raw
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name="django-sql-explorer",
version=__version__,
author="Chris Clark",
author_email="chris@untrod.com",
description=("A pluggable app that allows users (admins) to execute SQL,"
" view, and export the results."),
license="MIT",
keywords="django sql explorer reports reporting csv database query",
url="https://github.com/groveco/django-sql-explorer",
packages=['explorer'],
long_description=read('README.rst'),
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Topic :: Utilities',
'Framework :: Django :: 1.10',
'Framework :: Django :: 1.11',
'Framework :: Django :: 2.0',
'Framework :: Django :: 2.1',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
install_requires=[
'Django>=2.2.14',
'sqlparse>=0.1.18',
'unicodecsv>=0.14.1',
'six>=1.10.0',
],
include_package_data=True,
zip_safe=False,
)
| true | true |
1c4760d27cf1f4616f2f9ae082e15fd487249b5e | 3,074 | py | Python | tensorflow_privacy/privacy/privacy_tests/membership_inference_attack/keras_evaluation_test.py | andrewyguo/privacy | a33afde0c105ece6c48b17a80f13899cf3e7c1b3 | [
"Apache-2.0"
] | null | null | null | tensorflow_privacy/privacy/privacy_tests/membership_inference_attack/keras_evaluation_test.py | andrewyguo/privacy | a33afde0c105ece6c48b17a80f13899cf3e7c1b3 | [
"Apache-2.0"
] | null | null | null | tensorflow_privacy/privacy/privacy_tests/membership_inference_attack/keras_evaluation_test.py | andrewyguo/privacy | a33afde0c105ece6c48b17a80f13899cf3e7c1b3 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020, The TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl.testing import absltest
import numpy as np
import tensorflow as tf
from tensorflow_privacy.privacy.privacy_tests.membership_inference_attack import keras_evaluation
from tensorflow_privacy.privacy.privacy_tests.membership_inference_attack.data_structures import AttackResults
from tensorflow_privacy.privacy.privacy_tests.membership_inference_attack.data_structures import AttackType
from tensorflow_privacy.privacy.privacy_tests.membership_inference_attack.data_structures import get_flattened_attack_metrics
class UtilsTest(absltest.TestCase):
def __init__(self, methodname):
"""Initialize the test class."""
super().__init__(methodname)
self.ntrain, self.ntest = 50, 100
self.nclass = 5
self.ndim = 10
# Generate random training and test data
self.train_data = np.random.rand(self.ntrain, self.ndim)
self.test_data = np.random.rand(self.ntest, self.ndim)
self.train_labels = np.random.randint(self.nclass, size=self.ntrain)
self.test_labels = np.random.randint(self.nclass, size=self.ntest)
self.model = tf.keras.Sequential([tf.keras.layers.Dense(self.nclass)])
loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
self.model.compile(optimizer='Adam', loss=loss, metrics=['accuracy'])
def test_calculate_losses(self):
"""Test calculating the loss."""
pred, loss = keras_evaluation.calculate_losses(self.model, self.train_data,
self.train_labels)
self.assertEqual(pred.shape, (self.ntrain, self.nclass))
self.assertEqual(loss.shape, (self.ntrain,))
pred, loss = keras_evaluation.calculate_losses(self.model, self.test_data,
self.test_labels)
self.assertEqual(pred.shape, (self.ntest, self.nclass))
self.assertEqual(loss.shape, (self.ntest,))
def test_run_attack_on_keras_model(self):
"""Test the attack."""
results = keras_evaluation.run_attack_on_keras_model(
self.model, (self.train_data, self.train_labels),
(self.test_data, self.test_labels),
attack_types=[AttackType.THRESHOLD_ATTACK])
self.assertIsInstance(results, AttackResults)
att_types, att_slices, att_metrics, att_values = get_flattened_attack_metrics(
results)
self.assertLen(att_types, 2)
self.assertLen(att_slices, 2)
self.assertLen(att_metrics, 2)
self.assertLen(att_values, 2)
if __name__ == '__main__':
absltest.main()
| 41.540541 | 125 | 0.737801 |
from absl.testing import absltest
import numpy as np
import tensorflow as tf
from tensorflow_privacy.privacy.privacy_tests.membership_inference_attack import keras_evaluation
from tensorflow_privacy.privacy.privacy_tests.membership_inference_attack.data_structures import AttackResults
from tensorflow_privacy.privacy.privacy_tests.membership_inference_attack.data_structures import AttackType
from tensorflow_privacy.privacy.privacy_tests.membership_inference_attack.data_structures import get_flattened_attack_metrics
class UtilsTest(absltest.TestCase):
def __init__(self, methodname):
super().__init__(methodname)
self.ntrain, self.ntest = 50, 100
self.nclass = 5
self.ndim = 10
self.train_data = np.random.rand(self.ntrain, self.ndim)
self.test_data = np.random.rand(self.ntest, self.ndim)
self.train_labels = np.random.randint(self.nclass, size=self.ntrain)
self.test_labels = np.random.randint(self.nclass, size=self.ntest)
self.model = tf.keras.Sequential([tf.keras.layers.Dense(self.nclass)])
loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
self.model.compile(optimizer='Adam', loss=loss, metrics=['accuracy'])
def test_calculate_losses(self):
pred, loss = keras_evaluation.calculate_losses(self.model, self.train_data,
self.train_labels)
self.assertEqual(pred.shape, (self.ntrain, self.nclass))
self.assertEqual(loss.shape, (self.ntrain,))
pred, loss = keras_evaluation.calculate_losses(self.model, self.test_data,
self.test_labels)
self.assertEqual(pred.shape, (self.ntest, self.nclass))
self.assertEqual(loss.shape, (self.ntest,))
def test_run_attack_on_keras_model(self):
results = keras_evaluation.run_attack_on_keras_model(
self.model, (self.train_data, self.train_labels),
(self.test_data, self.test_labels),
attack_types=[AttackType.THRESHOLD_ATTACK])
self.assertIsInstance(results, AttackResults)
att_types, att_slices, att_metrics, att_values = get_flattened_attack_metrics(
results)
self.assertLen(att_types, 2)
self.assertLen(att_slices, 2)
self.assertLen(att_metrics, 2)
self.assertLen(att_values, 2)
if __name__ == '__main__':
absltest.main()
| true | true |
1c47629a3fff6341d9f92bd348f85e77bc92bff9 | 282 | py | Python | html_downloader.py | etworker/TinySpider | b3e3c67451d361d064d915875582341b84f0d49d | [
"MIT"
] | null | null | null | html_downloader.py | etworker/TinySpider | b3e3c67451d361d064d915875582341b84f0d49d | [
"MIT"
] | null | null | null | html_downloader.py | etworker/TinySpider | b3e3c67451d361d064d915875582341b84f0d49d | [
"MIT"
] | null | null | null | __author__ = 'worker'
import urllib2
class HtmlDownloader(object):
def download(self, url):
if url is None:
return None
response = urllib2.urlopen(url)
if response.getcode() != 200:
return None
return response.read() | 20.142857 | 39 | 0.588652 | __author__ = 'worker'
import urllib2
class HtmlDownloader(object):
def download(self, url):
if url is None:
return None
response = urllib2.urlopen(url)
if response.getcode() != 200:
return None
return response.read() | true | true |
1c4762e3f34e2ed7a22ada6411f795fe540463d8 | 18,315 | py | Python | pfp/native/compat_io.py | krx/pfp-construct | 248c43781e15ba6eb0a9a6c0982a40c0e380d9b6 | [
"MIT"
] | null | null | null | pfp/native/compat_io.py | krx/pfp-construct | 248c43781e15ba6eb0a9a6c0982a40c0e380d9b6 | [
"MIT"
] | null | null | null | pfp/native/compat_io.py | krx/pfp-construct | 248c43781e15ba6eb0a9a6c0982a40c0e380d9b6 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# encoding: utf-8
"""
This module of native functions is implemented for
compatability with 010 editor functions. Some of these functions
are nops, some are fully implemented.
"""
from pytest import skip
import six
import sys
from pfp.native import native
import pfp.interp
import pfp.errors as errors
import pfp.bitwrap as bitwrap
from .. import utils
import construct as C
# http://www.sweetscape.com/010editor/manual/FuncIO.htm
# void BigEndian()
@native(name="BigEndian", ret=None)
def BigEndian(params, ctxt, scope, stream, coord):
if len(params) > 0:
raise errors.InvalidArguments(
coord, "0 arguments", "{} args".format(len(params))
)
pfp.interp.Endian.current = pfp.interp.Endian.BIG
# void BitfieldDisablePadding()
@native(name="BitfieldDisablePadding", ret=None, send_interp=True)
def BitfieldDisablePadding(params, ctxt, scope, stream, coord, interp):
if len(params) > 0:
raise errors.InvalidArguments(
coord, "0 arguments", "{} args".format(len(params))
)
interp.set_bitfield_padded(False)
# void BitfieldEnablePadding()
@native(name="BitfieldEnablePadding", ret=None, send_interp=True)
def BitfieldEnablePadding(params, ctxt, scope, stream, coord, interp):
if len(params) > 0:
raise errors.InvalidArguments(
coord, "0 arguments", "{} args".format(len(params))
)
interp.set_bitfield_padded(True)
# void BitfieldLeftToRight()
@native(name="BitfieldLeftToRight", ret=None, send_interp=True)
def BitfieldLeftToRight(params, ctxt, scope, stream, coord, interp):
if len(params) > 0:
raise errors.InvalidArguments(
coord, "0 arguments", "{} args".format(len(params))
)
interp.set_bitfield_direction(interp.BITFIELD_DIR_LEFT_RIGHT)
# void BitfieldRightToLeft()
@native(name="BitfieldRightToLeft", ret=None, send_interp=True)
def BitfieldRightToLeft(params, ctxt, scope, stream, coord, interp):
if len(params) > 0:
raise errors.InvalidArguments(
coord, "0 arguments", "{} args".format(len(params))
)
interp.set_bitfield_direction(interp.BITFIELD_DIR_RIGHT_LEFT)
# double ConvertBytesToDouble( uchar byteArray[] )
@native(name="ConvertBytesToDouble", ret=C.Double)
def ConvertBytesToDouble(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# float ConvertBytesToFloat( uchar byteArray[] )
@native(name="ConvertBytesToFloat", ret=C.Single)
def ConvertBytesToFloat(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# hfloat ConvertBytesToHFloat( uchar byteArray[] )
@native(name="ConvertBytesToHFloat", ret=C.Single)
def ConvertBytesToHFloat(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# int ConvertDataToBytes( data_type value, uchar byteArray[] )
@native(name="ConvertDataToBytes", ret=C.Int)
def ConvertDataToBytes(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# void DeleteBytes( int64 start, int64 size )
@native(name="DeleteBytes", ret=None)
def DeleteBytes(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# int DirectoryExists( string dir )
@native(name="DirectoryExists", ret=C.Int)
def DirectoryExists(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# int FEof()
@native(name="FEof", ret=bool)
def FEof(params, ctxt, scope, stream, coord):
if len(params) > 0:
raise errors.InvalidArguments(
coord, "0 arguments", "{} args".format(len(params))
)
# now that streams are _ALL_ BitwrappedStreams, we can use BitwrappedStream-specific
# functions
return C.stream_iseof(ctxt._io)
# int64 FileSize()
@native(name="FileSize", ret=int)
def FileSize(params, ctxt, scope, stream, coord):
if len(params) > 0:
raise errors.InvalidArguments(
coord, "0 arguments", "{} args".format(len(params))
)
return ctxt._io.size()
# TFileList FindFiles( string dir, string filter )
@native(name="FindFiles", ret=None)
def FindFiles(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# int FPrintf( int fileNum, char format[], ... )
@native(name="FPrintf", ret=C.Int)
def FPrintf(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# int FSeek( int64 pos )
@native(name="FSeek", ret=int)
def FSeek(params, ctxt, scope, stream, coord):
"""Returns 0 if successful or -1 if the address is out of range
"""
if len(params) != 1:
raise errors.InvalidArguments(
coord,
"{} args".format(len(params)),
"FSeek accepts only one argument",
)
pos = utils.evaluate(params[0], ctxt)
if pos > ctxt._io.size():
return -1
C.stream_seek(ctxt._io, pos, 0, "")
return 0
# curr_pos = stream.tell()
# fsize = stream.size()
# if pos > fsize:
# stream.seek(fsize)
# return -1
# elif pos < 0:
# stream.seek(0)
# return -1
# diff = pos - curr_pos
# if diff < 0:
# stream.seek(pos)
# return 0
# data = stream.read(diff)
# # let the ctxt automatically append numbers, as needed, unless the previous
# # child was also a skipped field
# skipped_name = "_skipped"
# if len(ctxt._pfp__children) > 0 and ctxt._pfp__children[
# -1
# ]._pfp__name.startswith("_skipped"):
# old_name = ctxt._pfp__children[-1]._pfp__name
# data = ctxt._pfp__children[-1].raw_data + data
# skipped_name = old_name
# ctxt._pfp__children = ctxt._pfp__children[:-1]
# del ctxt._pfp__children_map[old_name]
# tmp_stream = bitwrap.BitwrappedStream(six.BytesIO(data))
# new_field = pfp.fields.Array(len(data), C.Byte, tmp_stream)
# ctxt._pfp__add_child(skipped_name, new_field, stream)
# scope.add_var(skipped_name, new_field)
# return 0
# int FSkip( int64 offset )
@native(name="FSkip", ret=int)
def FSkip(params, ctxt, scope, stream, coord):
"""Returns 0 if successful or -1 if the address is out of range
"""
if len(params) != 1:
raise errors.InvalidArguments(
coord,
"{} args".format(len(params)),
"FSkip accepts only one argument",
)
skip_amt = params[0]
while callable(skip_amt):
skip_amt = skip_amt(ctxt)
return C.stream_seek(ctxt._io, skip_amt, whence=1, path="")
# pos = skip_amt + stream.tell()
# return FSeek([pos], ctxt, scope, stream, coord)
# int64 FTell()
@native(name="FTell", ret=int)
def FTell(params, ctxt, scope, stream, coord):
if len(params) > 0:
raise errors.InvalidArguments(
coord, "0 arguments", "{} args".format(len(params))
)
# print()
return C.stream_tell(ctxt._io, None)
# void InsertBytes( int64 start, int64 size, uchar value=0 )
@native(name="InsertBytes", ret=None)
def InsertBytes(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# int IsBigEndian()
@native(name="IsBigEndian", ret=bool)
def IsBigEndian(params, ctxt, scope, stream, coord):
if len(params) > 0:
raise errors.InvalidArguments(
coord, "0 arguments", "{} args".format(len(params))
)
return pfp.interp.Endian.current == pfp.interp.Endian.BIG
# int IsLittleEndian()
@native(name="IsLittleEndian", ret=bool)
def IsLittleEndian(params, ctxt, scope, stream, coord):
if len(params) > 0:
raise errors.InvalidArguments(
coord, "0 arguments", "{} args".format(len(params))
)
return pfp.interp.Endian.current == pfp.interp.Endian.LITTLE
# void LittleEndian()
@native(name="LittleEndian", ret=None)
def LittleEndian(params, ctxt, scope, stream, coord):
if len(params) > 0:
raise errors.InvalidArguments(
coord, "0 arguments", "{} args".format(len(params))
)
pfp.interp.Endian.current = pfp.interp.Endian.LITTLE
# int MakeDir( string dir )
@native(name="MakeDir", ret=C.Int)
def MakeDir(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# void OverwriteBytes( int64 start, int64 size, uchar value=0 )
@native(name="OverwriteBytes", ret=None)
def OverwriteBytes(params, ctxt, scope, stream, coord):
raise NotImplementedError()
def _read_data(params, ctxt, cls, coord):
stream = ctxt._io
bits = stream._bits
curr_pos = stream.tell()
if len(params) == 1:
pos = utils.evaluate(params[0], ctxt)
stream.seek(pos, 0)
elif len(params) > 1:
raise errors.InvalidArguments(
coord, "at most 1 arguments", "{} args".format(len(params))
)
# Make sure to use the right endianness
cls.fmtstr = pfp.interp.Endian.current + cls.fmtstr[1:]
res = cls.parse_stream(stream)
# reset the stream
stream.seek(curr_pos, 0)
stream._bits = bits
return res
# char ReadByte( int64 pos=FTell() )
@native(name="ReadByte", ret=int)
def ReadByte(params, ctxt, scope, stream, coord):
return _read_data(params, ctxt, C.Int8sb, coord)
# double ReadDouble( int64 pos=FTell() )
@native(name="ReadDouble", ret=float)
def ReadDouble(params, ctxt, scope, stream, coord):
return _read_data(params, ctxt, C.Double, coord)
# float ReadFloat( int64 pos=FTell() )
@native(name="ReadFloat", ret=float)
def ReadFloat(params, ctxt, scope, stream, coord):
return _read_data(params, ctxt, C.Single, coord)
# hfloat ReadHFloat( int64 pos=FTell() )
@native(name="ReadHFloat", ret=float)
def ReadHFloat(params, ctxt, scope, stream, coord):
return _read_data(params, ctxt, C.Single, coord)
# int ReadInt( int64 pos=FTell() )
@native(name="ReadInt", ret=int)
def ReadInt(params, ctxt, scope, stream, coord):
return _read_data(params, ctxt, C.Int32sb, coord)
# int64 ReadInt64( int64 pos=FTell() )
@native(name="ReadInt64", ret=int)
def ReadInt64(params, ctxt, scope, stream, coord):
return _read_data(params, ctxt, C.Int64sb, coord)
# int64 ReadQuad( int64 pos=FTell() )
@native(name="ReadQuad", ret=int)
def ReadQuad(params, ctxt, scope, stream, coord):
return _read_data(params, ctxt, C.Int64sb, coord)
# short ReadShort( int64 pos=FTell() )
@native(name="ReadShort", ret=int)
def ReadShort(params, ctxt, scope, stream, coord):
return _read_data(params, ctxt, C.Int16sb, coord)
# uchar ReadUByte( int64 pos=FTell() )
@native(name="ReadUByte", ret=int)
def ReadUByte(params, ctxt, scope, stream, coord):
return _read_data(params, ctxt, C.Byte, coord)
# uint ReadUInt( int64 pos=FTell() )
@native(name="ReadUInt", ret=int)
def ReadUInt(params, ctxt, scope, stream, coord):
return _read_data(params, ctxt, C.Int32ub, coord)
# uint64 ReadUInt64( int64 pos=FTell() )
@native(name="ReadUInt64", ret=int)
def ReadUInt64(params, ctxt, scope, stream, coord):
return _read_data(params, ctxt, C.Int64ub, coord)
# uint64 ReadUQuad( int64 pos=FTell() )
@native(name="ReadUQuad", ret=int)
def ReadUQuad(params, ctxt, scope, stream, coord):
return _read_data(params, ctxt,C.Int64ub, coord)
# ushort ReadUShort( int64 pos=FTell() )
@native(name="ReadUShort", ret=int)
def ReadUShort(params, ctxt, scope, stream, coord):
return _read_data(params, ctxt, C.Int16ub, coord)
# char[] ReadLine( int64 pos, int maxLen=-1, int includeLinefeeds=true )
@native(name="ReadLine", ret=C.CString)
def ReadLine(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# void ReadBytes( uchar buffer[], int64 pos, int n )
@native(name="ReadBytes", ret=None)
def ReadBytes(params, ctxt, scope, stream, coord):
if len(params) != 3:
raise errors.InvalidArguments(
coord,
"3 arguments (buffer, pos, n)",
"{} args".format(len(params)),
)
if not isinstance(params[0], C.Bytes):
raise errors.InvalidArguments(
coord, "buffer must be Bytes", params[0].__class__.__name__
)
if params[0].field_cls not in [pfp.fields.UChar, C.Byte]:
raise errors.InvalidArguments(
coord,
"buffer must be an array of uchar or char",
params[0].field_cls.__name__,
)
if not isinstance(params[1], C.IntBase):
raise errors.InvalidArguments(
coord, "pos must be an integer", params[1].__class__.__name__
)
if not isinstance(params[2], C.IntBase):
raise errors.InvalidArguments(
coord, "n must be an integer", params[2].__class__.__name__
)
bits = stream._bits
curr_pos = stream.tell()
vals = [
params[0].field_cls(stream) for x in six.moves.range(utils.evaluate(params[2], ctxt))
]
stream.seek(curr_pos, 0)
stream._bits = bits
params[0]._pfp__set_value(vals)
# char[] ReadString( int64 pos, int maxLen=-1 )
@native(name="ReadString", ret=C.CString)
def ReadString(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# int ReadStringLength( int64 pos, int maxLen=-1 )
@native(name="ReadStringLength", ret=C.Int)
def ReadStringLength(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# wstring ReadWLine( int64 pos, int maxLen=-1 )
@native(name="ReadWLine", ret=C.CString)
def ReadWLine(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# wstring ReadWString( int64 pos, int maxLen=-1 )
@native(name="ReadWString", ret=C.CString)
def ReadWString(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# int ReadWStringLength( int64 pos, int maxLen=-1 )
@native(name="ReadWStringLength", ret=C.Int)
def ReadWStringLength(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# int64 TextAddressToLine( int64 address )
@native(name="TextAddressToLine", ret=C.Long)
def TextAddressToLine(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# int TextAddressToColumn( int64 address )
@native(name="TextAddressToColumn", ret=C.Int)
def TextAddressToColumn(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# int64 TextColumnToAddress( int64 line, int column )
@native(name="TextColumnToAddress", ret=C.Long)
def TextColumnToAddress(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# int64 TextGetNumLines()
@native(name="TextGetNumLines", ret=C.Long)
def TextGetNumLines(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# int TextGetLineSize( int64 line, int includeLinefeeds=true )
@native(name="TextGetLineSize", ret=C.Int)
def TextGetLineSize(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# int64 TextLineToAddress( int64 line )
@native(name="TextLineToAddress", ret=C.Long)
def TextLineToAddress(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# int TextReadLine( char buffer[], int64 line, int maxsize, int includeLinefeeds=true )
@native(name="TextReadLine", ret=C.Int)
def TextReadLine(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# int TextReadLineW( wchar_t buffer[], int64 line, int maxsize, int includeLinefeeds=true )
@native(name="TextReadLineW", ret=C.Int)
def TextReadLineW(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# void TextWriteLine( const char buffer[], int64 line, int includeLinefeeds=true )
@native(name="TextWriteLine", ret=None)
def TextWriteLine(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# void TextWriteLineW( const wchar_t buffer[], int64 line, int includeLinefeeds=true )
@native(name="TextWriteLineW", ret=None)
def TextWriteLineW(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# void WriteByte( int64 pos, char value )
@native(name="WriteByte", ret=None)
def WriteByte(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# void WriteDouble( int64 pos, double value )
@native(name="WriteDouble", ret=None)
def WriteDouble(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# void WriteFloat( int64 pos, float value )
@native(name="WriteFloat", ret=None)
def WriteFloat(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# void WriteHFloat( int64 pos, float value )
@native(name="WriteHFloat", ret=None)
def WriteHFloat(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# void WriteInt( int64 pos, int value )
@native(name="WriteInt", ret=None)
def WriteInt(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# void WriteInt64( int64 pos, int64 value )
@native(name="WriteInt64", ret=None)
def WriteInt64(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# void WriteQuad( int64 pos, int64 value )
@native(name="WriteQuad", ret=None)
def WriteQuad(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# void WriteShort( int64 pos, short value )
@native(name="WriteShort", ret=None)
def WriteShort(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# void WriteUByte( int64 pos, uchar value )
@native(name="WriteUByte", ret=None)
def WriteUByte(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# void WriteUInt( int64 pos, uint value )
@native(name="WriteUInt", ret=None)
def WriteUInt(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# void WriteUInt64( int64 pos, uint64 value )
@native(name="WriteUInt64", ret=None)
def WriteUInt64(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# void WriteUQuad( int64 pos, uint64 value )
@native(name="WriteUQuad", ret=None)
def WriteUQuad(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# void WriteUShort( int64 pos, ushort value )
@native(name="WriteUShort", ret=None)
def WriteUShort(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# void WriteBytes( const uchar buffer[], int64 pos, int n )
@native(name="WriteBytes", ret=None)
def WriteBytes(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# void WriteString( int64 pos, const char value[] )
@native(name="WriteString", ret=None)
def WriteString(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# void WriteWString( int64 pos, const wstring value )
@native(name="WriteWString", ret=None)
def WriteWString(params, ctxt, scope, stream, coord):
raise NotImplementedError()
| 29.82899 | 93 | 0.690527 |
from pytest import skip
import six
import sys
from pfp.native import native
import pfp.interp
import pfp.errors as errors
import pfp.bitwrap as bitwrap
from .. import utils
import construct as C
@native(name="BigEndian", ret=None)
def BigEndian(params, ctxt, scope, stream, coord):
if len(params) > 0:
raise errors.InvalidArguments(
coord, "0 arguments", "{} args".format(len(params))
)
pfp.interp.Endian.current = pfp.interp.Endian.BIG
@native(name="BitfieldDisablePadding", ret=None, send_interp=True)
def BitfieldDisablePadding(params, ctxt, scope, stream, coord, interp):
if len(params) > 0:
raise errors.InvalidArguments(
coord, "0 arguments", "{} args".format(len(params))
)
interp.set_bitfield_padded(False)
@native(name="BitfieldEnablePadding", ret=None, send_interp=True)
def BitfieldEnablePadding(params, ctxt, scope, stream, coord, interp):
if len(params) > 0:
raise errors.InvalidArguments(
coord, "0 arguments", "{} args".format(len(params))
)
interp.set_bitfield_padded(True)
@native(name="BitfieldLeftToRight", ret=None, send_interp=True)
def BitfieldLeftToRight(params, ctxt, scope, stream, coord, interp):
if len(params) > 0:
raise errors.InvalidArguments(
coord, "0 arguments", "{} args".format(len(params))
)
interp.set_bitfield_direction(interp.BITFIELD_DIR_LEFT_RIGHT)
@native(name="BitfieldRightToLeft", ret=None, send_interp=True)
def BitfieldRightToLeft(params, ctxt, scope, stream, coord, interp):
if len(params) > 0:
raise errors.InvalidArguments(
coord, "0 arguments", "{} args".format(len(params))
)
interp.set_bitfield_direction(interp.BITFIELD_DIR_RIGHT_LEFT)
@native(name="ConvertBytesToDouble", ret=C.Double)
def ConvertBytesToDouble(params, ctxt, scope, stream, coord):
raise NotImplementedError()
@native(name="ConvertBytesToFloat", ret=C.Single)
def ConvertBytesToFloat(params, ctxt, scope, stream, coord):
raise NotImplementedError()
@native(name="ConvertBytesToHFloat", ret=C.Single)
def ConvertBytesToHFloat(params, ctxt, scope, stream, coord):
raise NotImplementedError()
@native(name="ConvertDataToBytes", ret=C.Int)
def ConvertDataToBytes(params, ctxt, scope, stream, coord):
raise NotImplementedError()
@native(name="DeleteBytes", ret=None)
def DeleteBytes(params, ctxt, scope, stream, coord):
raise NotImplementedError()
@native(name="DirectoryExists", ret=C.Int)
def DirectoryExists(params, ctxt, scope, stream, coord):
raise NotImplementedError()
@native(name="FEof", ret=bool)
def FEof(params, ctxt, scope, stream, coord):
if len(params) > 0:
raise errors.InvalidArguments(
coord, "0 arguments", "{} args".format(len(params))
)
return C.stream_iseof(ctxt._io)
@native(name="FileSize", ret=int)
def FileSize(params, ctxt, scope, stream, coord):
if len(params) > 0:
raise errors.InvalidArguments(
coord, "0 arguments", "{} args".format(len(params))
)
return ctxt._io.size()
@native(name="FindFiles", ret=None)
def FindFiles(params, ctxt, scope, stream, coord):
raise NotImplementedError()
@native(name="FPrintf", ret=C.Int)
def FPrintf(params, ctxt, scope, stream, coord):
raise NotImplementedError()
@native(name="FSeek", ret=int)
def FSeek(params, ctxt, scope, stream, coord):
if len(params) != 1:
raise errors.InvalidArguments(
coord,
"{} args".format(len(params)),
"FSeek accepts only one argument",
)
pos = utils.evaluate(params[0], ctxt)
if pos > ctxt._io.size():
return -1
C.stream_seek(ctxt._io, pos, 0, "")
return 0
=int)
def FSkip(params, ctxt, scope, stream, coord):
if len(params) != 1:
raise errors.InvalidArguments(
coord,
"{} args".format(len(params)),
"FSkip accepts only one argument",
)
skip_amt = params[0]
while callable(skip_amt):
skip_amt = skip_amt(ctxt)
return C.stream_seek(ctxt._io, skip_amt, whence=1, path="")
@native(name="FTell", ret=int)
def FTell(params, ctxt, scope, stream, coord):
if len(params) > 0:
raise errors.InvalidArguments(
coord, "0 arguments", "{} args".format(len(params))
)
return C.stream_tell(ctxt._io, None)
@native(name="InsertBytes", ret=None)
def InsertBytes(params, ctxt, scope, stream, coord):
raise NotImplementedError()
@native(name="IsBigEndian", ret=bool)
def IsBigEndian(params, ctxt, scope, stream, coord):
if len(params) > 0:
raise errors.InvalidArguments(
coord, "0 arguments", "{} args".format(len(params))
)
return pfp.interp.Endian.current == pfp.interp.Endian.BIG
@native(name="IsLittleEndian", ret=bool)
def IsLittleEndian(params, ctxt, scope, stream, coord):
if len(params) > 0:
raise errors.InvalidArguments(
coord, "0 arguments", "{} args".format(len(params))
)
return pfp.interp.Endian.current == pfp.interp.Endian.LITTLE
@native(name="LittleEndian", ret=None)
def LittleEndian(params, ctxt, scope, stream, coord):
if len(params) > 0:
raise errors.InvalidArguments(
coord, "0 arguments", "{} args".format(len(params))
)
pfp.interp.Endian.current = pfp.interp.Endian.LITTLE
@native(name="MakeDir", ret=C.Int)
def MakeDir(params, ctxt, scope, stream, coord):
raise NotImplementedError()
@native(name="OverwriteBytes", ret=None)
def OverwriteBytes(params, ctxt, scope, stream, coord):
raise NotImplementedError()
def _read_data(params, ctxt, cls, coord):
stream = ctxt._io
bits = stream._bits
curr_pos = stream.tell()
if len(params) == 1:
pos = utils.evaluate(params[0], ctxt)
stream.seek(pos, 0)
elif len(params) > 1:
raise errors.InvalidArguments(
coord, "at most 1 arguments", "{} args".format(len(params))
)
cls.fmtstr = pfp.interp.Endian.current + cls.fmtstr[1:]
res = cls.parse_stream(stream)
stream.seek(curr_pos, 0)
stream._bits = bits
return res
@native(name="ReadByte", ret=int)
def ReadByte(params, ctxt, scope, stream, coord):
return _read_data(params, ctxt, C.Int8sb, coord)
@native(name="ReadDouble", ret=float)
def ReadDouble(params, ctxt, scope, stream, coord):
return _read_data(params, ctxt, C.Double, coord)
@native(name="ReadFloat", ret=float)
def ReadFloat(params, ctxt, scope, stream, coord):
return _read_data(params, ctxt, C.Single, coord)
@native(name="ReadHFloat", ret=float)
def ReadHFloat(params, ctxt, scope, stream, coord):
return _read_data(params, ctxt, C.Single, coord)
@native(name="ReadInt", ret=int)
def ReadInt(params, ctxt, scope, stream, coord):
return _read_data(params, ctxt, C.Int32sb, coord)
@native(name="ReadInt64", ret=int)
def ReadInt64(params, ctxt, scope, stream, coord):
return _read_data(params, ctxt, C.Int64sb, coord)
@native(name="ReadQuad", ret=int)
def ReadQuad(params, ctxt, scope, stream, coord):
return _read_data(params, ctxt, C.Int64sb, coord)
@native(name="ReadShort", ret=int)
def ReadShort(params, ctxt, scope, stream, coord):
return _read_data(params, ctxt, C.Int16sb, coord)
@native(name="ReadUByte", ret=int)
def ReadUByte(params, ctxt, scope, stream, coord):
return _read_data(params, ctxt, C.Byte, coord)
@native(name="ReadUInt", ret=int)
def ReadUInt(params, ctxt, scope, stream, coord):
return _read_data(params, ctxt, C.Int32ub, coord)
@native(name="ReadUInt64", ret=int)
def ReadUInt64(params, ctxt, scope, stream, coord):
return _read_data(params, ctxt, C.Int64ub, coord)
@native(name="ReadUQuad", ret=int)
def ReadUQuad(params, ctxt, scope, stream, coord):
return _read_data(params, ctxt,C.Int64ub, coord)
@native(name="ReadUShort", ret=int)
def ReadUShort(params, ctxt, scope, stream, coord):
return _read_data(params, ctxt, C.Int16ub, coord)
@native(name="ReadLine", ret=C.CString)
def ReadLine(params, ctxt, scope, stream, coord):
raise NotImplementedError()
@native(name="ReadBytes", ret=None)
def ReadBytes(params, ctxt, scope, stream, coord):
if len(params) != 3:
raise errors.InvalidArguments(
coord,
"3 arguments (buffer, pos, n)",
"{} args".format(len(params)),
)
if not isinstance(params[0], C.Bytes):
raise errors.InvalidArguments(
coord, "buffer must be Bytes", params[0].__class__.__name__
)
if params[0].field_cls not in [pfp.fields.UChar, C.Byte]:
raise errors.InvalidArguments(
coord,
"buffer must be an array of uchar or char",
params[0].field_cls.__name__,
)
if not isinstance(params[1], C.IntBase):
raise errors.InvalidArguments(
coord, "pos must be an integer", params[1].__class__.__name__
)
if not isinstance(params[2], C.IntBase):
raise errors.InvalidArguments(
coord, "n must be an integer", params[2].__class__.__name__
)
bits = stream._bits
curr_pos = stream.tell()
vals = [
params[0].field_cls(stream) for x in six.moves.range(utils.evaluate(params[2], ctxt))
]
stream.seek(curr_pos, 0)
stream._bits = bits
params[0]._pfp__set_value(vals)
@native(name="ReadString", ret=C.CString)
def ReadString(params, ctxt, scope, stream, coord):
raise NotImplementedError()
@native(name="ReadStringLength", ret=C.Int)
def ReadStringLength(params, ctxt, scope, stream, coord):
raise NotImplementedError()
@native(name="ReadWLine", ret=C.CString)
def ReadWLine(params, ctxt, scope, stream, coord):
raise NotImplementedError()
@native(name="ReadWString", ret=C.CString)
def ReadWString(params, ctxt, scope, stream, coord):
raise NotImplementedError()
@native(name="ReadWStringLength", ret=C.Int)
def ReadWStringLength(params, ctxt, scope, stream, coord):
raise NotImplementedError()
@native(name="TextAddressToLine", ret=C.Long)
def TextAddressToLine(params, ctxt, scope, stream, coord):
raise NotImplementedError()
@native(name="TextAddressToColumn", ret=C.Int)
def TextAddressToColumn(params, ctxt, scope, stream, coord):
raise NotImplementedError()
@native(name="TextColumnToAddress", ret=C.Long)
def TextColumnToAddress(params, ctxt, scope, stream, coord):
raise NotImplementedError()
@native(name="TextGetNumLines", ret=C.Long)
def TextGetNumLines(params, ctxt, scope, stream, coord):
raise NotImplementedError()
@native(name="TextGetLineSize", ret=C.Int)
def TextGetLineSize(params, ctxt, scope, stream, coord):
raise NotImplementedError()
@native(name="TextLineToAddress", ret=C.Long)
def TextLineToAddress(params, ctxt, scope, stream, coord):
raise NotImplementedError()
@native(name="TextReadLine", ret=C.Int)
def TextReadLine(params, ctxt, scope, stream, coord):
raise NotImplementedError()
@native(name="TextReadLineW", ret=C.Int)
def TextReadLineW(params, ctxt, scope, stream, coord):
raise NotImplementedError()
@native(name="TextWriteLine", ret=None)
def TextWriteLine(params, ctxt, scope, stream, coord):
raise NotImplementedError()
@native(name="TextWriteLineW", ret=None)
def TextWriteLineW(params, ctxt, scope, stream, coord):
raise NotImplementedError()
@native(name="WriteByte", ret=None)
def WriteByte(params, ctxt, scope, stream, coord):
raise NotImplementedError()
@native(name="WriteDouble", ret=None)
def WriteDouble(params, ctxt, scope, stream, coord):
raise NotImplementedError()
@native(name="WriteFloat", ret=None)
def WriteFloat(params, ctxt, scope, stream, coord):
raise NotImplementedError()
@native(name="WriteHFloat", ret=None)
def WriteHFloat(params, ctxt, scope, stream, coord):
raise NotImplementedError()
@native(name="WriteInt", ret=None)
def WriteInt(params, ctxt, scope, stream, coord):
raise NotImplementedError()
@native(name="WriteInt64", ret=None)
def WriteInt64(params, ctxt, scope, stream, coord):
raise NotImplementedError()
@native(name="WriteQuad", ret=None)
def WriteQuad(params, ctxt, scope, stream, coord):
raise NotImplementedError()
@native(name="WriteShort", ret=None)
def WriteShort(params, ctxt, scope, stream, coord):
raise NotImplementedError()
@native(name="WriteUByte", ret=None)
def WriteUByte(params, ctxt, scope, stream, coord):
raise NotImplementedError()
@native(name="WriteUInt", ret=None)
def WriteUInt(params, ctxt, scope, stream, coord):
raise NotImplementedError()
@native(name="WriteUInt64", ret=None)
def WriteUInt64(params, ctxt, scope, stream, coord):
raise NotImplementedError()
@native(name="WriteUQuad", ret=None)
def WriteUQuad(params, ctxt, scope, stream, coord):
raise NotImplementedError()
@native(name="WriteUShort", ret=None)
def WriteUShort(params, ctxt, scope, stream, coord):
raise NotImplementedError()
@native(name="WriteBytes", ret=None)
def WriteBytes(params, ctxt, scope, stream, coord):
raise NotImplementedError()
@native(name="WriteString", ret=None)
def WriteString(params, ctxt, scope, stream, coord):
raise NotImplementedError()
@native(name="WriteWString", ret=None)
def WriteWString(params, ctxt, scope, stream, coord):
raise NotImplementedError()
| true | true |
1c4763580d072403c8ca37e045aa564412f3085f | 3,801 | py | Python | train_utils.py | Jack407/TFCNs_source_code | f41466ad18457dd6335287112191e5daacf6d80d | [
"MIT"
] | null | null | null | train_utils.py | Jack407/TFCNs_source_code | f41466ad18457dd6335287112191e5daacf6d80d | [
"MIT"
] | null | null | null | train_utils.py | Jack407/TFCNs_source_code | f41466ad18457dd6335287112191e5daacf6d80d | [
"MIT"
] | null | null | null | import argparse
import logging
import random
import sys
import time
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from tensorboardX import SummaryWriter
from torch.nn.modules.loss import CrossEntropyLoss
from torch.utils.data import DataLoader
from tqdm import tqdm
from utils import one_hot_encoder
from loss import mixed_focal_loss
from loss import dice_loss as dl
from torchvision import transforms
import os
def train_starter(args, model, snapshot_path):
from preprocess import TFCNs_dataset, RandomGenerator
logging.basicConfig(filename=snapshot_path + "/log.txt", level=logging.INFO,
format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S')
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
logging.info(str(args))
base_lr = args.base_lr
num_classes = args.num_classes
batch_size = args.batch_size * args.n_gpu
db_train = TFCNs_dataset(base_dir=args.root_path, list_dir=args.list_dir, split="train",
transform=transforms.Compose(
[RandomGenerator(output_size=[args.img_size, args.img_size])]))
print("The length of train set is: {}".format(len(db_train)))
def worker_init_fn(worker_id):
random.seed(args.seed + worker_id)
trainloader = DataLoader(db_train, batch_size=batch_size, shuffle=True, num_workers=8, pin_memory=True,
worker_init_fn=worker_init_fn)
if args.n_gpu > 1:
model = nn.DataParallel(model)
model.train()
optimizer = optim.SGD(model.parameters(), lr=base_lr, momentum=0.9, weight_decay=0.0001)
writer = SummaryWriter(snapshot_path + '/log')
iter_num = 0
max_epoch = args.max_epochs
max_iterations = args.max_epochs * len(trainloader) # max_epoch = max_iterations // len(trainloader) + 1
logging.info("{} iterations per epoch. {} max iterations ".format(len(trainloader), max_iterations))
best_performance = 0.0
iterator = tqdm(range(max_epoch), ncols=70)
for epoch_num in iterator:
for i_batch, sampled_batch in enumerate(trainloader):
image_batch, label_batch = sampled_batch['image'], sampled_batch['label']
image_batch, label_batch = image_batch.cuda(), label_batch.cuda()
outputs = model(image_batch)
label_batch = one_hot_encoder(label_batch,args.dataset,args.num_classes)
outputs = torch.softmax(outputs,dim=1)
loss = mixed_focal_loss(label_batch,outputs)
loss = torch.mean(loss,axis=0)
optimizer.zero_grad()
loss.backward()
optimizer.step()
lr_ = base_lr * (1.0 - iter_num / max_iterations) ** 0.9
for param_group in optimizer.param_groups:
param_group['lr'] = lr_
iter_num = iter_num + 1
writer.add_scalar('info/lr', lr_, iter_num)
writer.add_scalar('info/total_loss', loss, iter_num)
logging.info('iteration %d : loss : %f' % (iter_num, loss.item()))
save_interval = 50 # int(max_epoch/6)
if epoch_num > int(max_epoch / 2) and (epoch_num + 1) % save_interval == 0:
save_mode_path = os.path.join(snapshot_path, 'epoch_' + str(epoch_num) + '.pth')
torch.save(model.state_dict(), save_mode_path)
logging.info("save model to {}".format(save_mode_path))
if epoch_num >= max_epoch - 1:
save_mode_path = os.path.join(snapshot_path, 'epoch_' + str(epoch_num) + '.pth')
torch.save(model.state_dict(), save_mode_path)
logging.info("save model to {}".format(save_mode_path))
iterator.close()
break
writer.close()
return "Training Finished!"
| 43.689655 | 109 | 0.660353 | import argparse
import logging
import random
import sys
import time
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from tensorboardX import SummaryWriter
from torch.nn.modules.loss import CrossEntropyLoss
from torch.utils.data import DataLoader
from tqdm import tqdm
from utils import one_hot_encoder
from loss import mixed_focal_loss
from loss import dice_loss as dl
from torchvision import transforms
import os
def train_starter(args, model, snapshot_path):
from preprocess import TFCNs_dataset, RandomGenerator
logging.basicConfig(filename=snapshot_path + "/log.txt", level=logging.INFO,
format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S')
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
logging.info(str(args))
base_lr = args.base_lr
num_classes = args.num_classes
batch_size = args.batch_size * args.n_gpu
db_train = TFCNs_dataset(base_dir=args.root_path, list_dir=args.list_dir, split="train",
transform=transforms.Compose(
[RandomGenerator(output_size=[args.img_size, args.img_size])]))
print("The length of train set is: {}".format(len(db_train)))
def worker_init_fn(worker_id):
random.seed(args.seed + worker_id)
trainloader = DataLoader(db_train, batch_size=batch_size, shuffle=True, num_workers=8, pin_memory=True,
worker_init_fn=worker_init_fn)
if args.n_gpu > 1:
model = nn.DataParallel(model)
model.train()
optimizer = optim.SGD(model.parameters(), lr=base_lr, momentum=0.9, weight_decay=0.0001)
writer = SummaryWriter(snapshot_path + '/log')
iter_num = 0
max_epoch = args.max_epochs
max_iterations = args.max_epochs * len(trainloader)
logging.info("{} iterations per epoch. {} max iterations ".format(len(trainloader), max_iterations))
best_performance = 0.0
iterator = tqdm(range(max_epoch), ncols=70)
for epoch_num in iterator:
for i_batch, sampled_batch in enumerate(trainloader):
image_batch, label_batch = sampled_batch['image'], sampled_batch['label']
image_batch, label_batch = image_batch.cuda(), label_batch.cuda()
outputs = model(image_batch)
label_batch = one_hot_encoder(label_batch,args.dataset,args.num_classes)
outputs = torch.softmax(outputs,dim=1)
loss = mixed_focal_loss(label_batch,outputs)
loss = torch.mean(loss,axis=0)
optimizer.zero_grad()
loss.backward()
optimizer.step()
lr_ = base_lr * (1.0 - iter_num / max_iterations) ** 0.9
for param_group in optimizer.param_groups:
param_group['lr'] = lr_
iter_num = iter_num + 1
writer.add_scalar('info/lr', lr_, iter_num)
writer.add_scalar('info/total_loss', loss, iter_num)
logging.info('iteration %d : loss : %f' % (iter_num, loss.item()))
save_interval = 50
if epoch_num > int(max_epoch / 2) and (epoch_num + 1) % save_interval == 0:
save_mode_path = os.path.join(snapshot_path, 'epoch_' + str(epoch_num) + '.pth')
torch.save(model.state_dict(), save_mode_path)
logging.info("save model to {}".format(save_mode_path))
if epoch_num >= max_epoch - 1:
save_mode_path = os.path.join(snapshot_path, 'epoch_' + str(epoch_num) + '.pth')
torch.save(model.state_dict(), save_mode_path)
logging.info("save model to {}".format(save_mode_path))
iterator.close()
break
writer.close()
return "Training Finished!"
| true | true |
1c4763d96158d165cbae23a7f534f6cbe67be1a2 | 78,654 | py | Python | source/codegen/metadata/nifgen/functions.py | zhindes/grpc-device | 616aa913963098b12d276693895b7eb946f82df4 | [
"MIT"
] | null | null | null | source/codegen/metadata/nifgen/functions.py | zhindes/grpc-device | 616aa913963098b12d276693895b7eb946f82df4 | [
"MIT"
] | 23 | 2021-04-16T06:22:40.000Z | 2021-06-11T05:51:45.000Z | source/codegen/metadata/nifgen/functions.py | zhindes/grpc-device | 616aa913963098b12d276693895b7eb946f82df4 | [
"MIT"
] | 1 | 2021-10-30T09:23:49.000Z | 2021-10-30T09:23:49.000Z | functions = {
'AbortGeneration':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
}
],
'returns':'ViStatus'
},
'AdjustSampleClockRelativeDelay':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'adjustmentTime',
'direction':'in',
'type':'ViReal64'
}
],
'returns':'ViStatus'
},
'AllocateNamedWaveform':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'waveformName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'waveformSize',
'direction':'in',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'AllocateWaveform':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'waveformSize',
'direction':'in',
'type':'ViInt32'
},
{
'name':'waveformHandle',
'direction':'out',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'CheckAttributeViBoolean':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'attributeId',
'direction':'in',
'type':'ViAttr'
},
{
'name':'attributeValue',
'direction':'in',
'type':'ViBoolean'
}
],
'returns':'ViStatus'
},
'CheckAttributeViInt32':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'attributeId',
'direction':'in',
'type':'ViAttr'
},
{
'name':'attributeValue',
'direction':'in',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'CheckAttributeViInt64':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'attributeId',
'direction':'in',
'type':'ViAttr'
},
{
'name':'attributeValue',
'direction':'in',
'type':'ViInt64'
}
],
'returns':'ViStatus'
},
'CheckAttributeViReal64':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'attributeId',
'direction':'in',
'type':'ViAttr'
},
{
'name':'attributeValue',
'direction':'in',
'type':'ViReal64'
}
],
'returns':'ViStatus'
},
'CheckAttributeViSession':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'attributeId',
'direction':'in',
'type':'ViAttr'
},
{
'name':'attributeValue',
'direction':'in',
'type':'ViSession'
}
],
'returns':'ViStatus'
},
'CheckAttributeViString':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'attributeId',
'direction':'in',
'type':'ViAttr'
},
{
'name':'attributeValue',
'direction':'in',
'type':'ViConstString'
}
],
'returns':'ViStatus'
},
'ClearArbMemory':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
}
],
'returns':'ViStatus'
},
'ClearArbSequence':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'sequenceHandle',
'direction':'in',
'type':'ViInt32',
'enum':'SequenceHandle'
}
],
'returns':'ViStatus'
},
'ClearArbWaveform':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'waveformHandle',
'direction':'in',
'type':'ViInt32',
'enum':'WaveformHandle'
}
],
'returns':'ViStatus'
},
'ClearError':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
}
],
'returns':'ViStatus'
},
'ClearFreqList':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'frequencyListHandle',
'direction':'in',
'type':'ViInt32',
'enum':'FrequencyListOptions'
}
],
'returns':'ViStatus'
},
'ClearInterchangeWarnings':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
}
],
'returns':'ViStatus'
},
'ClearUserStandardWaveform':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
}
],
'returns':'ViStatus'
},
'Close':{
'cname' : 'niFgen_close',
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
}
],
'returns':'ViStatus'
},
'Commit':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
}
],
'returns':'ViStatus'
},
'ConfigureAmplitude':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'amplitude',
'direction':'in',
'type':'ViReal64'
}
],
'returns':'ViStatus'
},
'ConfigureArbSequence':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'sequenceHandle',
'direction':'in',
'type':'ViInt32'
},
{
'name':'gain',
'direction':'in',
'type':'ViReal64'
},
{
'name':'offset',
'direction':'in',
'type':'ViReal64'
}
],
'returns':'ViStatus'
},
'ConfigureArbWaveform':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'waveformHandle',
'direction':'in',
'type':'ViInt32'
},
{
'name':'gain',
'direction':'in',
'type':'ViReal64'
},
{
'name':'offset',
'direction':'in',
'type':'ViReal64'
}
],
'returns':'ViStatus'
},
'ConfigureChannels':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channels',
'direction':'in',
'type':'ViConstString'
}
],
'returns':'ViStatus'
},
'ConfigureClockMode':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'clockMode',
'direction':'in',
'type':'ViInt32',
'enum':'ClockMode'
}
],
'returns':'ViStatus'
},
'ConfigureCustomFIRFilterCoefficients':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'numberOfCoefficients',
'direction':'in',
'type':'ViInt32'
},
{
'name':'coefficientsArray',
'direction':'in',
'type':'ViReal64[]',
'size':{
'mechanism':'len',
'value':'numberOfCoefficients'
}
}
],
'returns':'ViStatus'
},
'ConfigureDigitalEdgeScriptTrigger':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'triggerId',
'direction':'in',
'type':'ViConstString'
},
{
'name':'source',
'direction':'in',
'type':'ViConstString'
},
{
'name':'edge',
'direction':'in',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'ConfigureDigitalEdgeStartTrigger':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'source',
'direction':'in',
'type':'ViConstString'
},
{
'name':'edge',
'direction':'in',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'ConfigureDigitalLevelScriptTrigger':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'triggerId',
'direction':'in',
'type':'ViConstString'
},
{
'name':'source',
'direction':'in',
'type':'ViConstString'
},
{
'name':'triggerWhen',
'direction':'in',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'ConfigureFreqList':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'frequencyListHandle',
'direction':'in',
'type':'ViInt32'
},
{
'name':'amplitude',
'direction':'in',
'type':'ViReal64'
},
{
'name':'dcOffset',
'direction':'in',
'type':'ViReal64'
},
{
'name':'startPhase',
'direction':'in',
'type':'ViReal64'
}
],
'returns':'ViStatus'
},
'ConfigureFrequency':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'frequency',
'direction':'in',
'type':'ViReal64'
}
],
'returns':'ViStatus'
},
'ConfigureOperationMode':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'operationMode',
'direction':'in',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'ConfigureOutputEnabled':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'enabled',
'direction':'in',
'type':'ViBoolean'
}
],
'returns':'ViStatus'
},
'ConfigureOutputImpedance':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'impedance',
'direction':'in',
'type':'ViReal64'
}
],
'returns':'ViStatus'
},
'ConfigureOutputMode':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'outputMode',
'direction':'in',
'type':'ViInt32',
'enum':'OutputMode'
}
],
'returns':'ViStatus'
},
'ConfigureP2PEndpointFullnessStartTrigger':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'p2pEndpointFullnessLevel',
'direction':'in',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'ConfigureReferenceClock':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'referenceClockSource',
'direction':'in',
'type':'ViConstString'
},
{
'name':'referenceClockFrequency',
'direction':'in',
'type':'ViReal64'
}
],
'returns':'ViStatus'
},
'ConfigureSampleClockSource':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'sampleClockSource',
'direction':'in',
'type':'ViConstString'
}
],
'returns':'ViStatus'
},
'ConfigureSampleRate':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'sampleRate',
'direction':'in',
'type':'ViReal64'
}
],
'returns':'ViStatus'
},
'ConfigureSoftwareEdgeScriptTrigger':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'triggerId',
'direction':'in',
'type':'ViConstString'
}
],
'returns':'ViStatus'
},
'ConfigureSoftwareEdgeStartTrigger':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
}
],
'returns':'ViStatus'
},
'ConfigureStandardWaveform':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'waveform',
'direction':'in',
'type':'ViInt32',
'enum':'Waveform'
},
{
'name':'amplitude',
'direction':'in',
'type':'ViReal64'
},
{
'name':'dcOffset',
'direction':'in',
'type':'ViReal64'
},
{
'name':'frequency',
'direction':'in',
'type':'ViReal64'
},
{
'name':'startPhase',
'direction':'in',
'type':'ViReal64'
}
],
'returns':'ViStatus'
},
'ConfigureSynchronization':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'synchronizationSource',
'direction':'in',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'ConfigureTriggerMode':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'triggerMode',
'direction':'in',
'type':'ViInt32',
'enum':'TriggerMode'
}
],
'returns':'ViStatus'
},
'CreateAdvancedArbSequence':{
'codegen_method': 'CustomCode',
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'sequenceLength',
'direction':'in',
'type':'ViInt32'
},
{
'name':'waveformHandlesArray',
'direction':'in',
'type':'ViInt32[]',
'size':{
'mechanism':'len',
'value':'sequenceLength'
}
},
{
'name':'loopCountsArray',
'direction':'in',
'type':'ViInt32[]',
'size':{
'mechanism':'len',
'value':'sequenceLength'
}
},
{
'name':'sampleCountsArray',
'direction':'in',
'type':'ViInt32[]',
'size':{
'mechanism':'len',
'value':'sequenceLength'
}
},
{
'name':'markerLocationArray',
'direction':'in',
'type':'ViInt32[]',
'size':{
'mechanism':'len',
'value':'sequenceLength'
}
},
{
'name':'coercedMarkersArray',
'direction':'out',
'type':'ViInt32[]',
'size':{
'mechanism':'custom-code',
'value':'sequenceLength'
}
},
{
'name':'sequenceHandle',
'direction':'out',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'CreateArbSequence':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'sequenceLength',
'direction':'in',
'type':'ViInt32'
},
{
'name':'waveformHandlesArray',
'direction':'in',
'type':'ViInt32[]',
'size':{
'mechanism':'len',
'value':'sequenceLength'
}
},
{
'name':'loopCountsArray',
'direction':'in',
'type':'ViInt32[]',
'size':{
'mechanism':'len',
'value':'sequenceLength'
}
},
{
'name':'sequenceHandle',
'direction':'out',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'CreateFreqList':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'waveform',
'direction':'in',
'type':'ViInt32',
'enum':'Waveform'
},
{
'name':'frequencyListLength',
'direction':'in',
'type':'ViInt32'
},
{
'name':'frequencyArray',
'direction':'in',
'type':'ViReal64[]',
'size':{
'mechanism':'len',
'value':'frequencyListLength'
}
},
{
'name':'durationArray',
'direction':'in',
'type':'ViReal64[]',
'size':{
'mechanism':'len',
'value':'frequencyListLength'
}
},
{
'name':'frequencyListHandle',
'direction':'out',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'CreateWaveformComplexF64':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'numberOfSamples',
'direction':'in',
'type':'ViInt32'
},
{
'name':'waveformDataArray',
'direction':'in',
'type': 'struct NIComplexNumber_struct[]',
'grpc_type': 'repeated NIComplexNumber',
'size': {
'mechanism': 'len',
'value': 'numberOfSamples'
}
},
{
'name':'waveformHandle',
'direction':'out',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'CreateWaveformF64':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'waveformSize',
'direction':'in',
'type':'ViInt32'
},
{
'name':'waveformDataArray',
'direction':'in',
'type':'ViReal64[]',
'size':{
'mechanism':'len',
'value':'waveformSize'
}
},
{
'name':'waveformHandle',
'direction':'out',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'CreateWaveformFromFileF64':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'fileName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'byteOrder',
'direction':'in',
'type':'ViInt32',
'enum':'ByteOrder'
},
{
'name':'waveformHandle',
'direction':'out',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'CreateWaveformFromFileHWS':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'fileName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'useRateFromWaveform',
'direction':'in',
'type':'ViBoolean'
},
{
'name':'useGainAndOffsetFromWaveform',
'direction':'in',
'type':'ViBoolean'
},
{
'name':'waveformHandle',
'direction':'out',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'CreateWaveformI16': {
'parameters': [
{
'direction': 'in',
'name': 'vi',
'type': 'ViSession'
},
{
'direction': 'in',
'name': 'channelName',
'type': 'ViConstString'
},
{
'direction': 'in',
'name': 'waveformSize',
'type': 'ViInt32'
},
{
'direction': 'in',
'name': 'waveformDataArray',
'size': {
'mechanism': 'len',
'value': 'waveformSize'
},
'type': 'ViInt16[]',
'use_array': True
},
{
'direction': 'out',
'name': 'waveformHandle',
'type': 'ViInt32'
}
],
'returns': 'ViStatus'
},
'CreateWaveformFromFileI16':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'fileName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'byteOrder',
'direction':'in',
'type':'ViInt32',
'enum':'ByteOrder'
},
{
'name':'waveformHandle',
'direction':'out',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'DefineUserStandardWaveform':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'waveformSize',
'direction':'in',
'type':'ViInt32'
},
{
'name':'waveformDataArray',
'direction':'in',
'type':'ViReal64[]',
'size':{
'mechanism':'len',
'value':'waveformSize'
}
}
],
'returns':'ViStatus'
},
'DeleteNamedWaveform':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'waveformName',
'direction':'in',
'type':'ViConstString'
}
],
'returns':'ViStatus'
},
'DeleteScript':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'scriptName',
'direction':'in',
'type':'ViConstString'
}
],
'returns':'ViStatus'
},
'Disable':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
}
],
'returns':'ViStatus'
},
'DisableAnalogFilter':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
}
],
'returns':'ViStatus'
},
'DisableDigitalFilter':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
}
],
'returns':'ViStatus'
},
'DisableDigitalPatterning':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
}
],
'returns':'ViStatus'
},
'DisableScriptTrigger':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'triggerId',
'direction':'in',
'type':'ViConstString'
}
],
'returns':'ViStatus'
},
'DisableStartTrigger':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
}
],
'returns':'ViStatus'
},
'EnableAnalogFilter':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'filterCorrectionFrequency',
'direction':'in',
'type':'ViReal64'
}
],
'returns':'ViStatus'
},
'EnableDigitalFilter':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
}
],
'returns':'ViStatus'
},
'EnableDigitalPatterning':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
}
],
'returns':'ViStatus'
},
'ErrorHandler':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'errorCode',
'direction':'in',
'type':'ViStatus'
},
{
'name':'errorMessage',
'direction':'out',
'type':'ViChar[]',
'size':{
'mechanism':'fixed',
'value':256
}
}
],
'returns':'ViStatus'
},
'ErrorMessage':{
'cname' : 'niFgen_error_message',
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'errorCode',
'direction':'in',
'type':'ViStatus'
},
{
'name':'errorMessage',
'direction':'out',
'type':'ViChar[]',
'size':{
'mechanism':'fixed',
'value':256
}
}
],
'returns':'ViStatus'
},
'ErrorQuery': {
'cname' : 'niFgen_error_query',
'parameters': [
{
'direction': 'in',
'name': 'vi',
'type': 'ViSession'
},
{
'direction': 'out',
'name': 'errorCode',
'type': 'ViInt32'
},
{
'direction': 'out',
'name': 'errorMessage',
'size': {
'mechanism': 'fixed',
'value': 256
},
'type': 'ViChar[]'
}
],
'returns': 'ViStatus'
},
'ExportAttributeConfigurationBuffer':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'sizeInBytes',
'direction':'in',
'type':'ViInt32'
},
{
'name':'configuration',
'direction':'out',
'type':'ViAddr[]',
'size':{
'mechanism':'ivi-dance',
'value':'sizeInBytes'
}
}
],
'returns':'ViStatus'
},
'ExportAttributeConfigurationFile':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'filePath',
'direction':'in',
'type':'ViConstString'
}
],
'returns':'ViStatus'
},
'ExportSignal':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'signal',
'direction':'in',
'enum':'Signal',
'type':'ViInt32'
},
{
'name':'signalIdentifier',
'direction':'in',
'type':'ViConstString'
},
{
'name':'outputTerminal',
'direction':'in',
'type':'ViConstString'
}
],
'returns':'ViStatus'
},
'GetAttributeViBoolean':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'attributeId',
'direction':'in',
'type':'ViAttr'
},
{
'name':'attributeValue',
'direction':'out',
'type':'ViBoolean'
}
],
'returns':'ViStatus'
},
'GetAttributeViInt32':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'attributeId',
'direction':'in',
'type':'ViAttr'
},
{
'name':'attributeValue',
'direction':'out',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'GetAttributeViInt64':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'attributeId',
'direction':'in',
'type':'ViAttr'
},
{
'name':'attributeValue',
'direction':'out',
'type':'ViInt64'
}
],
'returns':'ViStatus'
},
'GetAttributeViReal64':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'attributeId',
'direction':'in',
'type':'ViAttr'
},
{
'name':'attributeValue',
'direction':'out',
'type':'ViReal64'
}
],
'returns':'ViStatus'
},
'GetAttributeViSession':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'attributeId',
'direction':'in',
'type':'ViAttr'
},
{
'name':'attributeValue',
'direction':'out',
'type':'ViSession'
}
],
'returns':'ViStatus'
},
'GetAttributeViString':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'attributeId',
'direction':'in',
'type':'ViAttr'
},
{
'name':'arraySize',
'direction':'in',
'type':'ViInt32'
},
{
'name':'attributeValue',
'direction':'out',
'type':'ViChar[]',
'size':{
'mechanism':'ivi-dance',
'value':'arraySize'
}
}
],
'returns':'ViStatus'
},
'GetChannelName':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'index',
'direction':'in',
'type':'ViInt32'
},
{
'name':'bufferSize',
'direction':'in',
'type':'ViInt32'
},
{
'name':'channelString',
'direction':'out',
'type':'ViChar[]',
'size':{
'mechanism':'ivi-dance',
'value':'bufferSize'
}
}
],
'returns':'ViStatus'
},
'GetError':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'errorCode',
'direction':'out',
'type':'ViStatus'
},
{
'name':'errorDescriptionBufferSize',
'direction':'in',
'type':'ViInt32'
},
{
'name':'errorDescription',
'direction':'out',
'type':'ViChar[]',
'size':{
'mechanism':'ivi-dance',
'value':'errorDescriptionBufferSize'
}
}
],
'returns':'ViStatus'
},
'GetExtCalLastDateAndTime':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'year',
'direction':'out',
'type':'ViInt32'
},
{
'name':'month',
'direction':'out',
'type':'ViInt32'
},
{
'name':'day',
'direction':'out',
'type':'ViInt32'
},
{
'name':'hour',
'direction':'out',
'type':'ViInt32'
},
{
'name':'minute',
'direction':'out',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'GetExtCalLastTemp':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'temperature',
'direction':'out',
'type':'ViReal64'
}
],
'returns':'ViStatus'
},
'GetExtCalRecommendedInterval':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'months',
'direction':'out',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'GetFIRFilterCoefficients':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'arraySize',
'direction':'in',
'type':'ViInt32'
},
{
'name':'coefficientsArray',
'direction':'out',
'type':'ViReal64[]',
'size':{
'mechanism':'ivi-dance-with-a-twist',
'value':'arraySize',
'value_twist':'numberOfCoefficientsRead',
}
},
{
'name':'numberOfCoefficientsRead',
'direction':'out',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'GetHardwareState':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'state',
'direction':'out',
'type':'ViInt32',
'enum':'HardwareState'
}
],
'returns':'ViStatus'
},
'GetNextCoercionRecord':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'bufferSize',
'direction':'in',
'type':'ViInt32'
},
{
'name':'coercionRecord',
'direction':'out',
'type':'ViChar[]',
'size': {
'mechanism': 'ivi-dance',
'value': 'bufferSize'
}
}
],
'returns':'ViStatus'
},
'GetNextInterchangeWarning':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'bufferSize',
'direction':'in',
'type':'ViInt32'
},
{
'name':'interchangeWarning',
'direction':'out',
'type':'ViChar[]',
'size': {
'mechanism': 'ivi-dance',
'value': 'bufferSize'
}
}
],
'returns':'ViStatus'
},
'GetSelfCalLastDateAndTime':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'year',
'direction':'out',
'type':'ViInt32'
},
{
'name':'month',
'direction':'out',
'type':'ViInt32'
},
{
'name':'day',
'direction':'out',
'type':'ViInt32'
},
{
'name':'hour',
'direction':'out',
'type':'ViInt32'
},
{
'name':'minute',
'direction':'out',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'GetSelfCalLastTemp':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'temperature',
'direction':'out',
'type':'ViReal64'
}
],
'returns':'ViStatus'
},
'GetSelfCalSupported':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'selfCalSupported',
'direction':'out',
'type':'ViBoolean'
}
],
'returns':'ViStatus'
},
'GetStreamEndpointHandle':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'streamEndpoint',
'direction':'in',
'type':'ViConstString'
},
{
'name':'readerHandle',
'direction':'out',
'type':'ViUInt32'
}
],
'returns':'ViStatus'
},
'ImportAttributeConfigurationBuffer':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'sizeInBytes',
'direction':'in',
'type':'ViInt32'
},
{
'name':'configuration',
'direction':'in',
'type':'ViAddr[]',
'size':{
'mechanism':'len',
'value':'sizeInBytes'
}
}
],
'returns':'ViStatus'
},
'ImportAttributeConfigurationFile':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'filePath',
'direction':'in',
'type':'ViConstString'
}
],
'returns':'ViStatus'
},
'Init': {
'init_method': True,
'cname': 'niFgen_init ',
'parameters': [
{
'direction': 'in',
'name': 'resourceName',
'type': 'ViRsrc'
},
{
'direction': 'in',
'name': 'idQuery',
'type': 'ViBoolean'
},
{
'direction': 'in',
'name': 'resetDevice',
'type': 'ViBoolean'
},
{
'direction': 'out',
'name': 'vi',
'type': 'ViSession'
}
],
'returns': 'ViStatus',
},
'InitWithOptions':{
'init_method' : True,
'parameters':[
{
'name':'resourceName',
'direction':'in',
'type':'ViRsrc'
},
{
'name':'idQuery',
'direction':'in',
'type':'ViBoolean'
},
{
'name':'resetDevice',
'direction':'in',
'type':'ViBoolean'
},
{
'name':'optionString',
'direction':'in',
'type':'ViConstString'
},
{
'name':'vi',
'direction':'out',
'type':'ViSession'
}
],
'returns':'ViStatus'
},
'InitializeWithChannels':{
'init_method' : True,
'parameters':[
{
'name':'resourceName',
'direction':'in',
'type':'ViRsrc'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'resetDevice',
'direction':'in',
'type':'ViBoolean'
},
{
'name':'optionString',
'direction':'in',
'type':'ViConstString'
},
{
'name':'vi',
'direction':'out',
'type':'ViSession'
}
],
'returns':'ViStatus'
},
'InitiateGeneration':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
}
],
'returns':'ViStatus'
},
'InvalidateAllAttributes':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
}
],
'returns':'ViStatus'
},
'IsDone':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'done',
'direction':'out',
'type':'ViBoolean'
}
],
'returns':'ViStatus'
},
'LockSession':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'callerHasLock',
'direction':'out',
'type':'ViBoolean'
}
],
'returns':'ViStatus'
},
'ManualEnableP2PStream':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'endpointName',
'direction':'in',
'type':'ViConstString'
}
],
'returns':'ViStatus'
},
'QueryArbSeqCapabilities':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'maximumNumberOfSequences',
'direction':'out',
'type':'ViInt32'
},
{
'name':'minimumSequenceLength',
'direction':'out',
'type':'ViInt32'
},
{
'name':'maximumSequenceLength',
'direction':'out',
'type':'ViInt32'
},
{
'name':'maximumLoopCount',
'direction':'out',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'QueryArbWfmCapabilities':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'maximumNumberOfWaveforms',
'direction':'out',
'type':'ViInt32'
},
{
'name':'waveformQuantum',
'direction':'out',
'type':'ViInt32'
},
{
'name':'minimumWaveformSize',
'direction':'out',
'type':'ViInt32'
},
{
'name':'maximumWaveformSize',
'direction':'out',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'QueryFreqListCapabilities':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'maximumNumberOfFreqLists',
'direction':'out',
'type':'ViInt32'
},
{
'name':'minimumFrequencyListLength',
'direction':'out',
'type':'ViInt32'
},
{
'name':'maximumFrequencyListLength',
'direction':'out',
'type':'ViInt32'
},
{
'name':'minimumFrequencyListDuration',
'direction':'out',
'type':'ViReal64'
},
{
'name':'maximumFrequencyListDuration',
'direction':'out',
'type':'ViReal64'
},
{
'name':'frequencyListDurationQuantum',
'direction':'out',
'type':'ViReal64'
}
],
'returns':'ViStatus'
},
'ReadCurrentTemperature':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'temperature',
'direction':'out',
'type':'ViReal64'
}
],
'returns':'ViStatus'
},
'Reset':{
'cname' : 'niFgen_reset',
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
}
],
'returns':'ViStatus'
},
'ResetAttribute':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'attributeId',
'direction':'in',
'type':'ViAttr'
}
],
'returns':'ViStatus'
},
'ResetDevice':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
}
],
'returns':'ViStatus'
},
'ResetInterchangeCheck':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
}
],
'returns':'ViStatus'
},
'ResetWithDefaults':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
}
],
'returns':'ViStatus'
},
'RevisionQuery': {
'cname' : 'niFgen_revision_query',
'parameters': [
{
'direction': 'in',
'name': 'vi',
'type': 'ViSession'
},
{
'direction': 'out',
'name': 'instrumentDriverRevision',
'size': {
'mechanism': 'fixed',
'value': 256
},
'type': 'ViChar[]'
},
{
'direction': 'out',
'name': 'firmwareRevision',
'size': {
'mechanism': 'fixed',
'value': 256
},
'type': 'ViChar[]'
}
],
'returns': 'ViStatus'
},
'RouteSignalOut':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'routeSignalFrom',
'direction':'in',
'type':'ViInt32',
'enum':'RouteSignalFrom'
},
{
'name':'routeSignalTo',
'direction':'in',
'type':'ViInt32',
'enum':'RouteSignalTo'
}
],
'returns':'ViStatus'
},
'SelfCal':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
}
],
'returns':'ViStatus'
},
'SelfTest':{
'cname' : 'niFgen_self_test',
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'selfTestResult',
'direction':'out',
'type':'ViInt16'
},
{
'name':'selfTestMessage',
'direction':'out',
'type':'ViChar[]',
'size':{
'mechanism':'fixed',
'value':256
}
}
],
'returns':'ViStatus'
},
'SendSoftwareEdgeTrigger': {
'parameters': [
{
'direction': 'in',
'name': 'vi',
'type': 'ViSession'
},
{
'direction': 'in',
'enum': 'Trigger',
'name': 'trigger',
'type': 'ViInt32',
},
{
'direction': 'in',
'name': 'triggerId',
'type': 'ViString'
}
],
'returns': 'ViStatus'
},
'SetAttributeViBoolean':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'attributeId',
'direction':'in',
'type':'ViAttr'
},
{
'name':'attributeValue',
'direction':'in',
'type':'ViBoolean'
}
],
'returns':'ViStatus'
},
'SetAttributeViInt32':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'attributeId',
'direction':'in',
'type':'ViAttr'
},
{
'name':'attributeValue',
'direction':'in',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'SetAttributeViInt64':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'attributeId',
'direction':'in',
'type':'ViAttr'
},
{
'name':'attributeValue',
'direction':'in',
'type':'ViInt64'
}
],
'returns':'ViStatus'
},
'SetAttributeViReal64':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'attributeId',
'direction':'in',
'type':'ViAttr'
},
{
'name':'attributeValue',
'direction':'in',
'type':'ViReal64'
}
],
'returns':'ViStatus'
},
'SetAttributeViSession':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'attributeId',
'direction':'in',
'type':'ViAttr'
},
{
'name':'attributeValue',
'direction':'in',
'type':'ViSession'
}
],
'returns':'ViStatus'
},
'SetAttributeViString':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'attributeId',
'direction':'in',
'type':'ViAttr'
},
{
'name':'attributeValue',
'direction':'in',
'type':'ViConstString'
}
],
'returns':'ViStatus'
},
'SetNamedWaveformNextWritePosition':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'waveformName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'relativeTo',
'direction':'in',
'type':'ViInt32',
'enum':'RelativeTo'
},
{
'name':'offset',
'direction':'in',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'SetWaveformNextWritePosition':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'waveformHandle',
'direction':'in',
'type':'ViInt32'
},
{
'name':'relativeTo',
'direction':'in',
'type':'ViInt32',
'enum':'RelativeTo'
},
{
'name':'offset',
'direction':'in',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'UnlockSession':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'callerHasLock',
'direction':'out',
'type':'ViBoolean'
}
],
'returns':'ViStatus'
},
'WaitUntilDone':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'maxTime',
'direction':'in',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'WriteBinary16Waveform': {
'parameters': [
{
'direction': 'in',
'name': 'vi',
'type': 'ViSession'
},
{
'direction': 'in',
'name': 'channelName',
'type': 'ViConstString'
},
{
'direction': 'in',
'name': 'waveformHandle',
'type': 'ViInt32'
},
{
'direction': 'in',
'name': 'size',
'type': 'ViInt32'
},
{
'direction': 'in',
'name': 'data',
'size': {
'mechanism': 'len',
'value': 'size'
},
'type': 'ViInt16[]',
'use_array': True
}
],
'returns': 'ViStatus'
},
'WriteComplexBinary16Waveform':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'waveformHandle',
'direction':'in',
'type':'ViInt32'
},
{
'name':'size',
'direction':'in',
'type':'ViInt32'
},
{
'name':'data',
'direction':'in',
'type':'struct NIComplexI16_struct[]',
'grpc_type':'repeated NIComplexInt32',
'size':{
'mechanism':'len',
'value':'size'
}
}
],
'returns':'ViStatus'
},
'WriteNamedWaveformF64':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'waveformName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'size',
'direction':'in',
'type':'ViInt32'
},
{
'name':'data',
'direction':'in',
'type':'ViReal64[]',
'size':{
'mechanism':'len',
'value':'size'
}
}
],
'returns':'ViStatus'
},
'WriteNamedWaveformI16': {
'parameters': [
{
'direction': 'in',
'name': 'vi',
'type': 'ViSession'
},
{
'direction': 'in',
'name': 'channelName',
'type': 'ViConstString'
},
{
'direction': 'in',
'name': 'waveformName',
'type': 'ViConstString'
},
{
'direction': 'in',
'name': 'size',
'type': 'ViInt32'
},
{
'direction': 'in',
'name': 'data',
'size': {
'mechanism': 'len',
'value': 'size'
},
'type': 'ViInt16[]',
'use_array': True
}
],
'returns': 'ViStatus'
},
'WriteP2PEndpointI16':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'endpointName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'numberOfSamples',
'direction':'in',
'type':'ViInt32'
},
{
'name':'endpointData',
'direction':'in',
'type':'ViInt16[]',
'size': {
'mechanism': 'len',
'value': 'numberOfSamples'
}
}
],
'returns':'ViStatus'
},
'WriteScript':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'script',
'direction':'in',
'type':'ViConstString'
}
],
'returns':'ViStatus'
},
'WriteWaveform':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'waveformHandle',
'direction':'in',
'type':'ViInt32'
},
{
'name':'size',
'direction':'in',
'type':'ViInt32'
},
{
'name':'data',
'direction':'in',
'type':'ViReal64[]',
'size':{
'mechanism':'len',
'value':'size'
}
}
],
'returns':'ViStatus'
},
'WriteWaveformComplexF64':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'numberOfSamples',
'direction':'in',
'type':'ViInt32'
},
{
'name':'data',
'direction':'in',
'type':'struct NIComplexNumber_struct[]',
'grpc_type':'repeated NIComplexNumber',
'size':{
'mechanism':'len',
'value':'numberOfSamples'
}
},
{
'name':'waveformHandle',
'direction':'in',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'WriteNamedWaveformComplexF64':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'waveformName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'size',
'direction':'in',
'type':'ViInt32'
},
{
'name':'data',
'direction':'in',
'type':'struct NIComplexNumber_struct[]',
'grpc_type':'repeated NIComplexNumber',
'size':{
'mechanism':'len',
'value':'size'
}
}
],
'returns':'ViStatus'
},
'WriteNamedWaveformComplexI16':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'waveformName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'size',
'direction':'in',
'type':'ViInt32'
},
{
'name':'data',
'direction':'in',
'type':'struct NIComplexI16_struct[]',
'grpc_type':'repeated NIComplexInt32',
'size':{
'mechanism':'len',
'value':'size'
}
}
],
'returns':'ViStatus'
}
} | 25.729146 | 61 | 0.309902 | functions = {
'AbortGeneration':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
}
],
'returns':'ViStatus'
},
'AdjustSampleClockRelativeDelay':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'adjustmentTime',
'direction':'in',
'type':'ViReal64'
}
],
'returns':'ViStatus'
},
'AllocateNamedWaveform':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'waveformName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'waveformSize',
'direction':'in',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'AllocateWaveform':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'waveformSize',
'direction':'in',
'type':'ViInt32'
},
{
'name':'waveformHandle',
'direction':'out',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'CheckAttributeViBoolean':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'attributeId',
'direction':'in',
'type':'ViAttr'
},
{
'name':'attributeValue',
'direction':'in',
'type':'ViBoolean'
}
],
'returns':'ViStatus'
},
'CheckAttributeViInt32':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'attributeId',
'direction':'in',
'type':'ViAttr'
},
{
'name':'attributeValue',
'direction':'in',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'CheckAttributeViInt64':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'attributeId',
'direction':'in',
'type':'ViAttr'
},
{
'name':'attributeValue',
'direction':'in',
'type':'ViInt64'
}
],
'returns':'ViStatus'
},
'CheckAttributeViReal64':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'attributeId',
'direction':'in',
'type':'ViAttr'
},
{
'name':'attributeValue',
'direction':'in',
'type':'ViReal64'
}
],
'returns':'ViStatus'
},
'CheckAttributeViSession':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'attributeId',
'direction':'in',
'type':'ViAttr'
},
{
'name':'attributeValue',
'direction':'in',
'type':'ViSession'
}
],
'returns':'ViStatus'
},
'CheckAttributeViString':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'attributeId',
'direction':'in',
'type':'ViAttr'
},
{
'name':'attributeValue',
'direction':'in',
'type':'ViConstString'
}
],
'returns':'ViStatus'
},
'ClearArbMemory':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
}
],
'returns':'ViStatus'
},
'ClearArbSequence':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'sequenceHandle',
'direction':'in',
'type':'ViInt32',
'enum':'SequenceHandle'
}
],
'returns':'ViStatus'
},
'ClearArbWaveform':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'waveformHandle',
'direction':'in',
'type':'ViInt32',
'enum':'WaveformHandle'
}
],
'returns':'ViStatus'
},
'ClearError':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
}
],
'returns':'ViStatus'
},
'ClearFreqList':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'frequencyListHandle',
'direction':'in',
'type':'ViInt32',
'enum':'FrequencyListOptions'
}
],
'returns':'ViStatus'
},
'ClearInterchangeWarnings':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
}
],
'returns':'ViStatus'
},
'ClearUserStandardWaveform':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
}
],
'returns':'ViStatus'
},
'Close':{
'cname' : 'niFgen_close',
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
}
],
'returns':'ViStatus'
},
'Commit':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
}
],
'returns':'ViStatus'
},
'ConfigureAmplitude':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'amplitude',
'direction':'in',
'type':'ViReal64'
}
],
'returns':'ViStatus'
},
'ConfigureArbSequence':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'sequenceHandle',
'direction':'in',
'type':'ViInt32'
},
{
'name':'gain',
'direction':'in',
'type':'ViReal64'
},
{
'name':'offset',
'direction':'in',
'type':'ViReal64'
}
],
'returns':'ViStatus'
},
'ConfigureArbWaveform':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'waveformHandle',
'direction':'in',
'type':'ViInt32'
},
{
'name':'gain',
'direction':'in',
'type':'ViReal64'
},
{
'name':'offset',
'direction':'in',
'type':'ViReal64'
}
],
'returns':'ViStatus'
},
'ConfigureChannels':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channels',
'direction':'in',
'type':'ViConstString'
}
],
'returns':'ViStatus'
},
'ConfigureClockMode':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'clockMode',
'direction':'in',
'type':'ViInt32',
'enum':'ClockMode'
}
],
'returns':'ViStatus'
},
'ConfigureCustomFIRFilterCoefficients':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'numberOfCoefficients',
'direction':'in',
'type':'ViInt32'
},
{
'name':'coefficientsArray',
'direction':'in',
'type':'ViReal64[]',
'size':{
'mechanism':'len',
'value':'numberOfCoefficients'
}
}
],
'returns':'ViStatus'
},
'ConfigureDigitalEdgeScriptTrigger':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'triggerId',
'direction':'in',
'type':'ViConstString'
},
{
'name':'source',
'direction':'in',
'type':'ViConstString'
},
{
'name':'edge',
'direction':'in',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'ConfigureDigitalEdgeStartTrigger':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'source',
'direction':'in',
'type':'ViConstString'
},
{
'name':'edge',
'direction':'in',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'ConfigureDigitalLevelScriptTrigger':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'triggerId',
'direction':'in',
'type':'ViConstString'
},
{
'name':'source',
'direction':'in',
'type':'ViConstString'
},
{
'name':'triggerWhen',
'direction':'in',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'ConfigureFreqList':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'frequencyListHandle',
'direction':'in',
'type':'ViInt32'
},
{
'name':'amplitude',
'direction':'in',
'type':'ViReal64'
},
{
'name':'dcOffset',
'direction':'in',
'type':'ViReal64'
},
{
'name':'startPhase',
'direction':'in',
'type':'ViReal64'
}
],
'returns':'ViStatus'
},
'ConfigureFrequency':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'frequency',
'direction':'in',
'type':'ViReal64'
}
],
'returns':'ViStatus'
},
'ConfigureOperationMode':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'operationMode',
'direction':'in',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'ConfigureOutputEnabled':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'enabled',
'direction':'in',
'type':'ViBoolean'
}
],
'returns':'ViStatus'
},
'ConfigureOutputImpedance':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'impedance',
'direction':'in',
'type':'ViReal64'
}
],
'returns':'ViStatus'
},
'ConfigureOutputMode':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'outputMode',
'direction':'in',
'type':'ViInt32',
'enum':'OutputMode'
}
],
'returns':'ViStatus'
},
'ConfigureP2PEndpointFullnessStartTrigger':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'p2pEndpointFullnessLevel',
'direction':'in',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'ConfigureReferenceClock':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'referenceClockSource',
'direction':'in',
'type':'ViConstString'
},
{
'name':'referenceClockFrequency',
'direction':'in',
'type':'ViReal64'
}
],
'returns':'ViStatus'
},
'ConfigureSampleClockSource':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'sampleClockSource',
'direction':'in',
'type':'ViConstString'
}
],
'returns':'ViStatus'
},
'ConfigureSampleRate':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'sampleRate',
'direction':'in',
'type':'ViReal64'
}
],
'returns':'ViStatus'
},
'ConfigureSoftwareEdgeScriptTrigger':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'triggerId',
'direction':'in',
'type':'ViConstString'
}
],
'returns':'ViStatus'
},
'ConfigureSoftwareEdgeStartTrigger':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
}
],
'returns':'ViStatus'
},
'ConfigureStandardWaveform':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'waveform',
'direction':'in',
'type':'ViInt32',
'enum':'Waveform'
},
{
'name':'amplitude',
'direction':'in',
'type':'ViReal64'
},
{
'name':'dcOffset',
'direction':'in',
'type':'ViReal64'
},
{
'name':'frequency',
'direction':'in',
'type':'ViReal64'
},
{
'name':'startPhase',
'direction':'in',
'type':'ViReal64'
}
],
'returns':'ViStatus'
},
'ConfigureSynchronization':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'synchronizationSource',
'direction':'in',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'ConfigureTriggerMode':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'triggerMode',
'direction':'in',
'type':'ViInt32',
'enum':'TriggerMode'
}
],
'returns':'ViStatus'
},
'CreateAdvancedArbSequence':{
'codegen_method': 'CustomCode',
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'sequenceLength',
'direction':'in',
'type':'ViInt32'
},
{
'name':'waveformHandlesArray',
'direction':'in',
'type':'ViInt32[]',
'size':{
'mechanism':'len',
'value':'sequenceLength'
}
},
{
'name':'loopCountsArray',
'direction':'in',
'type':'ViInt32[]',
'size':{
'mechanism':'len',
'value':'sequenceLength'
}
},
{
'name':'sampleCountsArray',
'direction':'in',
'type':'ViInt32[]',
'size':{
'mechanism':'len',
'value':'sequenceLength'
}
},
{
'name':'markerLocationArray',
'direction':'in',
'type':'ViInt32[]',
'size':{
'mechanism':'len',
'value':'sequenceLength'
}
},
{
'name':'coercedMarkersArray',
'direction':'out',
'type':'ViInt32[]',
'size':{
'mechanism':'custom-code',
'value':'sequenceLength'
}
},
{
'name':'sequenceHandle',
'direction':'out',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'CreateArbSequence':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'sequenceLength',
'direction':'in',
'type':'ViInt32'
},
{
'name':'waveformHandlesArray',
'direction':'in',
'type':'ViInt32[]',
'size':{
'mechanism':'len',
'value':'sequenceLength'
}
},
{
'name':'loopCountsArray',
'direction':'in',
'type':'ViInt32[]',
'size':{
'mechanism':'len',
'value':'sequenceLength'
}
},
{
'name':'sequenceHandle',
'direction':'out',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'CreateFreqList':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'waveform',
'direction':'in',
'type':'ViInt32',
'enum':'Waveform'
},
{
'name':'frequencyListLength',
'direction':'in',
'type':'ViInt32'
},
{
'name':'frequencyArray',
'direction':'in',
'type':'ViReal64[]',
'size':{
'mechanism':'len',
'value':'frequencyListLength'
}
},
{
'name':'durationArray',
'direction':'in',
'type':'ViReal64[]',
'size':{
'mechanism':'len',
'value':'frequencyListLength'
}
},
{
'name':'frequencyListHandle',
'direction':'out',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'CreateWaveformComplexF64':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'numberOfSamples',
'direction':'in',
'type':'ViInt32'
},
{
'name':'waveformDataArray',
'direction':'in',
'type': 'struct NIComplexNumber_struct[]',
'grpc_type': 'repeated NIComplexNumber',
'size': {
'mechanism': 'len',
'value': 'numberOfSamples'
}
},
{
'name':'waveformHandle',
'direction':'out',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'CreateWaveformF64':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'waveformSize',
'direction':'in',
'type':'ViInt32'
},
{
'name':'waveformDataArray',
'direction':'in',
'type':'ViReal64[]',
'size':{
'mechanism':'len',
'value':'waveformSize'
}
},
{
'name':'waveformHandle',
'direction':'out',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'CreateWaveformFromFileF64':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'fileName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'byteOrder',
'direction':'in',
'type':'ViInt32',
'enum':'ByteOrder'
},
{
'name':'waveformHandle',
'direction':'out',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'CreateWaveformFromFileHWS':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'fileName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'useRateFromWaveform',
'direction':'in',
'type':'ViBoolean'
},
{
'name':'useGainAndOffsetFromWaveform',
'direction':'in',
'type':'ViBoolean'
},
{
'name':'waveformHandle',
'direction':'out',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'CreateWaveformI16': {
'parameters': [
{
'direction': 'in',
'name': 'vi',
'type': 'ViSession'
},
{
'direction': 'in',
'name': 'channelName',
'type': 'ViConstString'
},
{
'direction': 'in',
'name': 'waveformSize',
'type': 'ViInt32'
},
{
'direction': 'in',
'name': 'waveformDataArray',
'size': {
'mechanism': 'len',
'value': 'waveformSize'
},
'type': 'ViInt16[]',
'use_array': True
},
{
'direction': 'out',
'name': 'waveformHandle',
'type': 'ViInt32'
}
],
'returns': 'ViStatus'
},
'CreateWaveformFromFileI16':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'fileName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'byteOrder',
'direction':'in',
'type':'ViInt32',
'enum':'ByteOrder'
},
{
'name':'waveformHandle',
'direction':'out',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'DefineUserStandardWaveform':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'waveformSize',
'direction':'in',
'type':'ViInt32'
},
{
'name':'waveformDataArray',
'direction':'in',
'type':'ViReal64[]',
'size':{
'mechanism':'len',
'value':'waveformSize'
}
}
],
'returns':'ViStatus'
},
'DeleteNamedWaveform':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'waveformName',
'direction':'in',
'type':'ViConstString'
}
],
'returns':'ViStatus'
},
'DeleteScript':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'scriptName',
'direction':'in',
'type':'ViConstString'
}
],
'returns':'ViStatus'
},
'Disable':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
}
],
'returns':'ViStatus'
},
'DisableAnalogFilter':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
}
],
'returns':'ViStatus'
},
'DisableDigitalFilter':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
}
],
'returns':'ViStatus'
},
'DisableDigitalPatterning':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
}
],
'returns':'ViStatus'
},
'DisableScriptTrigger':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'triggerId',
'direction':'in',
'type':'ViConstString'
}
],
'returns':'ViStatus'
},
'DisableStartTrigger':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
}
],
'returns':'ViStatus'
},
'EnableAnalogFilter':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'filterCorrectionFrequency',
'direction':'in',
'type':'ViReal64'
}
],
'returns':'ViStatus'
},
'EnableDigitalFilter':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
}
],
'returns':'ViStatus'
},
'EnableDigitalPatterning':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
}
],
'returns':'ViStatus'
},
'ErrorHandler':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'errorCode',
'direction':'in',
'type':'ViStatus'
},
{
'name':'errorMessage',
'direction':'out',
'type':'ViChar[]',
'size':{
'mechanism':'fixed',
'value':256
}
}
],
'returns':'ViStatus'
},
'ErrorMessage':{
'cname' : 'niFgen_error_message',
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'errorCode',
'direction':'in',
'type':'ViStatus'
},
{
'name':'errorMessage',
'direction':'out',
'type':'ViChar[]',
'size':{
'mechanism':'fixed',
'value':256
}
}
],
'returns':'ViStatus'
},
'ErrorQuery': {
'cname' : 'niFgen_error_query',
'parameters': [
{
'direction': 'in',
'name': 'vi',
'type': 'ViSession'
},
{
'direction': 'out',
'name': 'errorCode',
'type': 'ViInt32'
},
{
'direction': 'out',
'name': 'errorMessage',
'size': {
'mechanism': 'fixed',
'value': 256
},
'type': 'ViChar[]'
}
],
'returns': 'ViStatus'
},
'ExportAttributeConfigurationBuffer':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'sizeInBytes',
'direction':'in',
'type':'ViInt32'
},
{
'name':'configuration',
'direction':'out',
'type':'ViAddr[]',
'size':{
'mechanism':'ivi-dance',
'value':'sizeInBytes'
}
}
],
'returns':'ViStatus'
},
'ExportAttributeConfigurationFile':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'filePath',
'direction':'in',
'type':'ViConstString'
}
],
'returns':'ViStatus'
},
'ExportSignal':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'signal',
'direction':'in',
'enum':'Signal',
'type':'ViInt32'
},
{
'name':'signalIdentifier',
'direction':'in',
'type':'ViConstString'
},
{
'name':'outputTerminal',
'direction':'in',
'type':'ViConstString'
}
],
'returns':'ViStatus'
},
'GetAttributeViBoolean':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'attributeId',
'direction':'in',
'type':'ViAttr'
},
{
'name':'attributeValue',
'direction':'out',
'type':'ViBoolean'
}
],
'returns':'ViStatus'
},
'GetAttributeViInt32':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'attributeId',
'direction':'in',
'type':'ViAttr'
},
{
'name':'attributeValue',
'direction':'out',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'GetAttributeViInt64':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'attributeId',
'direction':'in',
'type':'ViAttr'
},
{
'name':'attributeValue',
'direction':'out',
'type':'ViInt64'
}
],
'returns':'ViStatus'
},
'GetAttributeViReal64':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'attributeId',
'direction':'in',
'type':'ViAttr'
},
{
'name':'attributeValue',
'direction':'out',
'type':'ViReal64'
}
],
'returns':'ViStatus'
},
'GetAttributeViSession':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'attributeId',
'direction':'in',
'type':'ViAttr'
},
{
'name':'attributeValue',
'direction':'out',
'type':'ViSession'
}
],
'returns':'ViStatus'
},
'GetAttributeViString':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'attributeId',
'direction':'in',
'type':'ViAttr'
},
{
'name':'arraySize',
'direction':'in',
'type':'ViInt32'
},
{
'name':'attributeValue',
'direction':'out',
'type':'ViChar[]',
'size':{
'mechanism':'ivi-dance',
'value':'arraySize'
}
}
],
'returns':'ViStatus'
},
'GetChannelName':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'index',
'direction':'in',
'type':'ViInt32'
},
{
'name':'bufferSize',
'direction':'in',
'type':'ViInt32'
},
{
'name':'channelString',
'direction':'out',
'type':'ViChar[]',
'size':{
'mechanism':'ivi-dance',
'value':'bufferSize'
}
}
],
'returns':'ViStatus'
},
'GetError':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'errorCode',
'direction':'out',
'type':'ViStatus'
},
{
'name':'errorDescriptionBufferSize',
'direction':'in',
'type':'ViInt32'
},
{
'name':'errorDescription',
'direction':'out',
'type':'ViChar[]',
'size':{
'mechanism':'ivi-dance',
'value':'errorDescriptionBufferSize'
}
}
],
'returns':'ViStatus'
},
'GetExtCalLastDateAndTime':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'year',
'direction':'out',
'type':'ViInt32'
},
{
'name':'month',
'direction':'out',
'type':'ViInt32'
},
{
'name':'day',
'direction':'out',
'type':'ViInt32'
},
{
'name':'hour',
'direction':'out',
'type':'ViInt32'
},
{
'name':'minute',
'direction':'out',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'GetExtCalLastTemp':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'temperature',
'direction':'out',
'type':'ViReal64'
}
],
'returns':'ViStatus'
},
'GetExtCalRecommendedInterval':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'months',
'direction':'out',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'GetFIRFilterCoefficients':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'arraySize',
'direction':'in',
'type':'ViInt32'
},
{
'name':'coefficientsArray',
'direction':'out',
'type':'ViReal64[]',
'size':{
'mechanism':'ivi-dance-with-a-twist',
'value':'arraySize',
'value_twist':'numberOfCoefficientsRead',
}
},
{
'name':'numberOfCoefficientsRead',
'direction':'out',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'GetHardwareState':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'state',
'direction':'out',
'type':'ViInt32',
'enum':'HardwareState'
}
],
'returns':'ViStatus'
},
'GetNextCoercionRecord':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'bufferSize',
'direction':'in',
'type':'ViInt32'
},
{
'name':'coercionRecord',
'direction':'out',
'type':'ViChar[]',
'size': {
'mechanism': 'ivi-dance',
'value': 'bufferSize'
}
}
],
'returns':'ViStatus'
},
'GetNextInterchangeWarning':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'bufferSize',
'direction':'in',
'type':'ViInt32'
},
{
'name':'interchangeWarning',
'direction':'out',
'type':'ViChar[]',
'size': {
'mechanism': 'ivi-dance',
'value': 'bufferSize'
}
}
],
'returns':'ViStatus'
},
'GetSelfCalLastDateAndTime':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'year',
'direction':'out',
'type':'ViInt32'
},
{
'name':'month',
'direction':'out',
'type':'ViInt32'
},
{
'name':'day',
'direction':'out',
'type':'ViInt32'
},
{
'name':'hour',
'direction':'out',
'type':'ViInt32'
},
{
'name':'minute',
'direction':'out',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'GetSelfCalLastTemp':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'temperature',
'direction':'out',
'type':'ViReal64'
}
],
'returns':'ViStatus'
},
'GetSelfCalSupported':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'selfCalSupported',
'direction':'out',
'type':'ViBoolean'
}
],
'returns':'ViStatus'
},
'GetStreamEndpointHandle':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'streamEndpoint',
'direction':'in',
'type':'ViConstString'
},
{
'name':'readerHandle',
'direction':'out',
'type':'ViUInt32'
}
],
'returns':'ViStatus'
},
'ImportAttributeConfigurationBuffer':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'sizeInBytes',
'direction':'in',
'type':'ViInt32'
},
{
'name':'configuration',
'direction':'in',
'type':'ViAddr[]',
'size':{
'mechanism':'len',
'value':'sizeInBytes'
}
}
],
'returns':'ViStatus'
},
'ImportAttributeConfigurationFile':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'filePath',
'direction':'in',
'type':'ViConstString'
}
],
'returns':'ViStatus'
},
'Init': {
'init_method': True,
'cname': 'niFgen_init ',
'parameters': [
{
'direction': 'in',
'name': 'resourceName',
'type': 'ViRsrc'
},
{
'direction': 'in',
'name': 'idQuery',
'type': 'ViBoolean'
},
{
'direction': 'in',
'name': 'resetDevice',
'type': 'ViBoolean'
},
{
'direction': 'out',
'name': 'vi',
'type': 'ViSession'
}
],
'returns': 'ViStatus',
},
'InitWithOptions':{
'init_method' : True,
'parameters':[
{
'name':'resourceName',
'direction':'in',
'type':'ViRsrc'
},
{
'name':'idQuery',
'direction':'in',
'type':'ViBoolean'
},
{
'name':'resetDevice',
'direction':'in',
'type':'ViBoolean'
},
{
'name':'optionString',
'direction':'in',
'type':'ViConstString'
},
{
'name':'vi',
'direction':'out',
'type':'ViSession'
}
],
'returns':'ViStatus'
},
'InitializeWithChannels':{
'init_method' : True,
'parameters':[
{
'name':'resourceName',
'direction':'in',
'type':'ViRsrc'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'resetDevice',
'direction':'in',
'type':'ViBoolean'
},
{
'name':'optionString',
'direction':'in',
'type':'ViConstString'
},
{
'name':'vi',
'direction':'out',
'type':'ViSession'
}
],
'returns':'ViStatus'
},
'InitiateGeneration':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
}
],
'returns':'ViStatus'
},
'InvalidateAllAttributes':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
}
],
'returns':'ViStatus'
},
'IsDone':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'done',
'direction':'out',
'type':'ViBoolean'
}
],
'returns':'ViStatus'
},
'LockSession':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'callerHasLock',
'direction':'out',
'type':'ViBoolean'
}
],
'returns':'ViStatus'
},
'ManualEnableP2PStream':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'endpointName',
'direction':'in',
'type':'ViConstString'
}
],
'returns':'ViStatus'
},
'QueryArbSeqCapabilities':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'maximumNumberOfSequences',
'direction':'out',
'type':'ViInt32'
},
{
'name':'minimumSequenceLength',
'direction':'out',
'type':'ViInt32'
},
{
'name':'maximumSequenceLength',
'direction':'out',
'type':'ViInt32'
},
{
'name':'maximumLoopCount',
'direction':'out',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'QueryArbWfmCapabilities':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'maximumNumberOfWaveforms',
'direction':'out',
'type':'ViInt32'
},
{
'name':'waveformQuantum',
'direction':'out',
'type':'ViInt32'
},
{
'name':'minimumWaveformSize',
'direction':'out',
'type':'ViInt32'
},
{
'name':'maximumWaveformSize',
'direction':'out',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'QueryFreqListCapabilities':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'maximumNumberOfFreqLists',
'direction':'out',
'type':'ViInt32'
},
{
'name':'minimumFrequencyListLength',
'direction':'out',
'type':'ViInt32'
},
{
'name':'maximumFrequencyListLength',
'direction':'out',
'type':'ViInt32'
},
{
'name':'minimumFrequencyListDuration',
'direction':'out',
'type':'ViReal64'
},
{
'name':'maximumFrequencyListDuration',
'direction':'out',
'type':'ViReal64'
},
{
'name':'frequencyListDurationQuantum',
'direction':'out',
'type':'ViReal64'
}
],
'returns':'ViStatus'
},
'ReadCurrentTemperature':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'temperature',
'direction':'out',
'type':'ViReal64'
}
],
'returns':'ViStatus'
},
'Reset':{
'cname' : 'niFgen_reset',
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
}
],
'returns':'ViStatus'
},
'ResetAttribute':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'attributeId',
'direction':'in',
'type':'ViAttr'
}
],
'returns':'ViStatus'
},
'ResetDevice':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
}
],
'returns':'ViStatus'
},
'ResetInterchangeCheck':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
}
],
'returns':'ViStatus'
},
'ResetWithDefaults':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
}
],
'returns':'ViStatus'
},
'RevisionQuery': {
'cname' : 'niFgen_revision_query',
'parameters': [
{
'direction': 'in',
'name': 'vi',
'type': 'ViSession'
},
{
'direction': 'out',
'name': 'instrumentDriverRevision',
'size': {
'mechanism': 'fixed',
'value': 256
},
'type': 'ViChar[]'
},
{
'direction': 'out',
'name': 'firmwareRevision',
'size': {
'mechanism': 'fixed',
'value': 256
},
'type': 'ViChar[]'
}
],
'returns': 'ViStatus'
},
'RouteSignalOut':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'routeSignalFrom',
'direction':'in',
'type':'ViInt32',
'enum':'RouteSignalFrom'
},
{
'name':'routeSignalTo',
'direction':'in',
'type':'ViInt32',
'enum':'RouteSignalTo'
}
],
'returns':'ViStatus'
},
'SelfCal':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
}
],
'returns':'ViStatus'
},
'SelfTest':{
'cname' : 'niFgen_self_test',
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'selfTestResult',
'direction':'out',
'type':'ViInt16'
},
{
'name':'selfTestMessage',
'direction':'out',
'type':'ViChar[]',
'size':{
'mechanism':'fixed',
'value':256
}
}
],
'returns':'ViStatus'
},
'SendSoftwareEdgeTrigger': {
'parameters': [
{
'direction': 'in',
'name': 'vi',
'type': 'ViSession'
},
{
'direction': 'in',
'enum': 'Trigger',
'name': 'trigger',
'type': 'ViInt32',
},
{
'direction': 'in',
'name': 'triggerId',
'type': 'ViString'
}
],
'returns': 'ViStatus'
},
'SetAttributeViBoolean':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'attributeId',
'direction':'in',
'type':'ViAttr'
},
{
'name':'attributeValue',
'direction':'in',
'type':'ViBoolean'
}
],
'returns':'ViStatus'
},
'SetAttributeViInt32':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'attributeId',
'direction':'in',
'type':'ViAttr'
},
{
'name':'attributeValue',
'direction':'in',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'SetAttributeViInt64':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'attributeId',
'direction':'in',
'type':'ViAttr'
},
{
'name':'attributeValue',
'direction':'in',
'type':'ViInt64'
}
],
'returns':'ViStatus'
},
'SetAttributeViReal64':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'attributeId',
'direction':'in',
'type':'ViAttr'
},
{
'name':'attributeValue',
'direction':'in',
'type':'ViReal64'
}
],
'returns':'ViStatus'
},
'SetAttributeViSession':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'attributeId',
'direction':'in',
'type':'ViAttr'
},
{
'name':'attributeValue',
'direction':'in',
'type':'ViSession'
}
],
'returns':'ViStatus'
},
'SetAttributeViString':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'attributeId',
'direction':'in',
'type':'ViAttr'
},
{
'name':'attributeValue',
'direction':'in',
'type':'ViConstString'
}
],
'returns':'ViStatus'
},
'SetNamedWaveformNextWritePosition':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'waveformName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'relativeTo',
'direction':'in',
'type':'ViInt32',
'enum':'RelativeTo'
},
{
'name':'offset',
'direction':'in',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'SetWaveformNextWritePosition':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'waveformHandle',
'direction':'in',
'type':'ViInt32'
},
{
'name':'relativeTo',
'direction':'in',
'type':'ViInt32',
'enum':'RelativeTo'
},
{
'name':'offset',
'direction':'in',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'UnlockSession':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'callerHasLock',
'direction':'out',
'type':'ViBoolean'
}
],
'returns':'ViStatus'
},
'WaitUntilDone':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'maxTime',
'direction':'in',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'WriteBinary16Waveform': {
'parameters': [
{
'direction': 'in',
'name': 'vi',
'type': 'ViSession'
},
{
'direction': 'in',
'name': 'channelName',
'type': 'ViConstString'
},
{
'direction': 'in',
'name': 'waveformHandle',
'type': 'ViInt32'
},
{
'direction': 'in',
'name': 'size',
'type': 'ViInt32'
},
{
'direction': 'in',
'name': 'data',
'size': {
'mechanism': 'len',
'value': 'size'
},
'type': 'ViInt16[]',
'use_array': True
}
],
'returns': 'ViStatus'
},
'WriteComplexBinary16Waveform':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'waveformHandle',
'direction':'in',
'type':'ViInt32'
},
{
'name':'size',
'direction':'in',
'type':'ViInt32'
},
{
'name':'data',
'direction':'in',
'type':'struct NIComplexI16_struct[]',
'grpc_type':'repeated NIComplexInt32',
'size':{
'mechanism':'len',
'value':'size'
}
}
],
'returns':'ViStatus'
},
'WriteNamedWaveformF64':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'waveformName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'size',
'direction':'in',
'type':'ViInt32'
},
{
'name':'data',
'direction':'in',
'type':'ViReal64[]',
'size':{
'mechanism':'len',
'value':'size'
}
}
],
'returns':'ViStatus'
},
'WriteNamedWaveformI16': {
'parameters': [
{
'direction': 'in',
'name': 'vi',
'type': 'ViSession'
},
{
'direction': 'in',
'name': 'channelName',
'type': 'ViConstString'
},
{
'direction': 'in',
'name': 'waveformName',
'type': 'ViConstString'
},
{
'direction': 'in',
'name': 'size',
'type': 'ViInt32'
},
{
'direction': 'in',
'name': 'data',
'size': {
'mechanism': 'len',
'value': 'size'
},
'type': 'ViInt16[]',
'use_array': True
}
],
'returns': 'ViStatus'
},
'WriteP2PEndpointI16':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'endpointName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'numberOfSamples',
'direction':'in',
'type':'ViInt32'
},
{
'name':'endpointData',
'direction':'in',
'type':'ViInt16[]',
'size': {
'mechanism': 'len',
'value': 'numberOfSamples'
}
}
],
'returns':'ViStatus'
},
'WriteScript':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'script',
'direction':'in',
'type':'ViConstString'
}
],
'returns':'ViStatus'
},
'WriteWaveform':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'waveformHandle',
'direction':'in',
'type':'ViInt32'
},
{
'name':'size',
'direction':'in',
'type':'ViInt32'
},
{
'name':'data',
'direction':'in',
'type':'ViReal64[]',
'size':{
'mechanism':'len',
'value':'size'
}
}
],
'returns':'ViStatus'
},
'WriteWaveformComplexF64':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'numberOfSamples',
'direction':'in',
'type':'ViInt32'
},
{
'name':'data',
'direction':'in',
'type':'struct NIComplexNumber_struct[]',
'grpc_type':'repeated NIComplexNumber',
'size':{
'mechanism':'len',
'value':'numberOfSamples'
}
},
{
'name':'waveformHandle',
'direction':'in',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'WriteNamedWaveformComplexF64':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'waveformName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'size',
'direction':'in',
'type':'ViInt32'
},
{
'name':'data',
'direction':'in',
'type':'struct NIComplexNumber_struct[]',
'grpc_type':'repeated NIComplexNumber',
'size':{
'mechanism':'len',
'value':'size'
}
}
],
'returns':'ViStatus'
},
'WriteNamedWaveformComplexI16':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'waveformName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'size',
'direction':'in',
'type':'ViInt32'
},
{
'name':'data',
'direction':'in',
'type':'struct NIComplexI16_struct[]',
'grpc_type':'repeated NIComplexInt32',
'size':{
'mechanism':'len',
'value':'size'
}
}
],
'returns':'ViStatus'
}
} | true | true |
1c476528ea9e0ab39dc368d76e84eab32c00fa45 | 724 | py | Python | mldp/tests/transformers/test_seq_len_computer.py | prashantlv/mltoolkit | acc192bafc66b7661d541ef4f604b5e5ab7df5ca | [
"MIT"
] | 1 | 2020-10-03T05:23:31.000Z | 2020-10-03T05:23:31.000Z | mldp/tests/transformers/test_seq_len_computer.py | prashantlv/mltoolkit | acc192bafc66b7661d541ef4f604b5e5ab7df5ca | [
"MIT"
] | null | null | null | mldp/tests/transformers/test_seq_len_computer.py | prashantlv/mltoolkit | acc192bafc66b7661d541ef4f604b5e5ab7df5ca | [
"MIT"
] | null | null | null | import unittest
from mldp.steps.transformers.nlp import SeqLenComputer
from mldp.utils.tools import DataChunk
from copy import deepcopy
import numpy as np
class TestSeqLenComputer(unittest.TestCase):
def test_output(self):
fn = "dummy"
new_fn = "dummy_len"
data = [[1, 2, 3], [12], ["a", "b", "d", "e"]]
actual_dc = DataChunk(**{fn: np.array(deepcopy(data))})
expected_dc = DataChunk(**{fn: np.array(deepcopy(data)),
new_fn: np.array([3, 1, 4])})
slc = SeqLenComputer(fname=fn, new_len_fname=new_fn)
actual_dc = slc(actual_dc)
self.assertTrue(actual_dc == expected_dc)
if __name__ == '__main__':
unittest.main()
| 27.846154 | 64 | 0.618785 | import unittest
from mldp.steps.transformers.nlp import SeqLenComputer
from mldp.utils.tools import DataChunk
from copy import deepcopy
import numpy as np
class TestSeqLenComputer(unittest.TestCase):
def test_output(self):
fn = "dummy"
new_fn = "dummy_len"
data = [[1, 2, 3], [12], ["a", "b", "d", "e"]]
actual_dc = DataChunk(**{fn: np.array(deepcopy(data))})
expected_dc = DataChunk(**{fn: np.array(deepcopy(data)),
new_fn: np.array([3, 1, 4])})
slc = SeqLenComputer(fname=fn, new_len_fname=new_fn)
actual_dc = slc(actual_dc)
self.assertTrue(actual_dc == expected_dc)
if __name__ == '__main__':
unittest.main()
| true | true |
1c4765731326549e159d462a7abaa90cb1582cbf | 181 | py | Python | apps/profile/apps.py | OpenAdaptronik/Rattler | c3bdde0ca56b6d77f49bc830fa2b8bb41a26bae4 | [
"MIT"
] | 2 | 2018-05-18T08:38:29.000Z | 2018-05-22T08:26:09.000Z | apps/profile/apps.py | IT-PM-OpenAdaptronik/Webapp | c3bdde0ca56b6d77f49bc830fa2b8bb41a26bae4 | [
"MIT"
] | 118 | 2017-10-31T13:45:09.000Z | 2018-02-24T20:51:42.000Z | apps/profile/apps.py | OpenAdaptronik/Rattler | c3bdde0ca56b6d77f49bc830fa2b8bb41a26bae4 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
from django.utils.translation import gettext_lazy as _
class ProfileConfig(AppConfig):
name = 'apps.profile'
verbose_name = _('profile')
| 22.625 | 54 | 0.762431 | from django.apps import AppConfig
from django.utils.translation import gettext_lazy as _
class ProfileConfig(AppConfig):
name = 'apps.profile'
verbose_name = _('profile')
| true | true |
1c47670eaf2832f39b529a294728b4e11a136702 | 629 | py | Python | src/create_experiment.py | G-Simeone/Learning_Accident_Occurence_on_Dutch_Highways | 1f3992a529fed70fd488811d68128a1e255fac5f | [
"MIT"
] | 4 | 2018-11-09T16:18:28.000Z | 2019-04-09T11:19:23.000Z | src/create_experiment.py | G-Simeone/Learning_Accident_Occurence_on_Dutch_Highways | 1f3992a529fed70fd488811d68128a1e255fac5f | [
"MIT"
] | null | null | null | src/create_experiment.py | G-Simeone/Learning_Accident_Occurence_on_Dutch_Highways | 1f3992a529fed70fd488811d68128a1e255fac5f | [
"MIT"
] | 1 | 2020-05-28T18:48:17.000Z | 2020-05-28T18:48:17.000Z | import sys
from utils import write_exp_utils
import pandas as pd
from utils import misc_utils
import psycopg2
from psycopg2.extras import Json, DictCursor
def main(argv):
print(argv[1])
w = write_exp_utils.ExperimentConfig(argv[1], argv[2])
print("writing {} to database".format(argv[1]) )
w.write_to_db()# write experiment on database
# check if the experiment is written correctly
q = 'select experiment_id from rws_experiment.experiment_table order by experiment_id desc limit 1;'
conn = misc_utils.connect_rds()
print(pd.read_sql(q, conn))
if __name__== '__main__':
main(sys.argv)
| 29.952381 | 105 | 0.732909 | import sys
from utils import write_exp_utils
import pandas as pd
from utils import misc_utils
import psycopg2
from psycopg2.extras import Json, DictCursor
def main(argv):
print(argv[1])
w = write_exp_utils.ExperimentConfig(argv[1], argv[2])
print("writing {} to database".format(argv[1]) )
w.write_to_db()
q = 'select experiment_id from rws_experiment.experiment_table order by experiment_id desc limit 1;'
conn = misc_utils.connect_rds()
print(pd.read_sql(q, conn))
if __name__== '__main__':
main(sys.argv)
| true | true |
1c4767c28a173b87d61645270342bcabb9c6929c | 7,674 | py | Python | setup.py | WildbookOrg/wbia-deprecate-tpl-brambox | 9aa6a69f706d0653a65520c696a7cd66715b6a37 | [
"MIT"
] | 2 | 2019-03-23T03:14:11.000Z | 2019-11-21T07:16:13.000Z | setup.py | WildbookOrg/wbia-deprecate-tpl-brambox | 9aa6a69f706d0653a65520c696a7cd66715b6a37 | [
"MIT"
] | null | null | null | setup.py | WildbookOrg/wbia-deprecate-tpl-brambox | 9aa6a69f706d0653a65520c696a7cd66715b6a37 | [
"MIT"
] | 1 | 2021-12-01T03:04:53.000Z | 2021-12-01T03:04:53.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import sys
from os.path import exists
from collections import OrderedDict
from setuptools import find_packages
from skbuild import setup
def native_mb_python_tag(plat_impl=None, version_info=None):
"""
Example:
>>> print(native_mb_python_tag())
>>> print(native_mb_python_tag('PyPy', (2, 7)))
>>> print(native_mb_python_tag('CPython', (3, 8)))
"""
if plat_impl is None:
import platform
plat_impl = platform.python_implementation()
if version_info is None:
import sys
version_info = sys.version_info
major, minor = version_info[0:2]
ver = '{}{}'.format(major, minor)
if plat_impl == 'CPython':
# TODO: get if cp27m or cp27mu
impl = 'cp'
if ver == '27':
IS_27_BUILT_WITH_UNICODE = True # how to determine this?
if IS_27_BUILT_WITH_UNICODE:
abi = 'mu'
else:
abi = 'm'
else:
if ver == '38':
# no abi in 38?
abi = ''
else:
abi = 'm'
mb_tag = '{impl}{ver}-{impl}{ver}{abi}'.format(**locals())
elif plat_impl == 'PyPy':
abi = ''
impl = 'pypy'
ver = '{}{}'.format(major, minor)
mb_tag = '{impl}-{ver}'.format(**locals())
else:
raise NotImplementedError(plat_impl)
return mb_tag
def parse_version(fpath='brambox/__init__.py'):
"""
Statically parse the version number from a python file
"""
import ast
if not exists(fpath):
raise ValueError('fpath={!r} does not exist'.format(fpath))
with open(fpath, 'r') as file_:
sourcecode = file_.read()
pt = ast.parse(sourcecode)
class VersionVisitor(ast.NodeVisitor):
def visit_Assign(self, node):
for target in node.targets:
if getattr(target, 'id', None) == '__version__':
self.version = node.value.s
visitor = VersionVisitor()
visitor.visit(pt)
return visitor.version
def parse_long_description(fpath='README.rst'):
"""
Reads README text, but doesn't break if README does not exist.
"""
if exists(fpath):
with open(fpath, 'r') as file:
return file.read()
return ''
def parse_requirements(fname='requirements.txt', with_version=False):
"""
Parse the package dependencies listed in a requirements file but strips
specific versioning information.
Args:
fname (str): path to requirements file
with_version (bool, default=False): if true include version specs
Returns:
List[str]: list of requirements items
CommandLine:
python -c "import setup; print(setup.parse_requirements())"
python -c "import setup; print(chr(10).join(setup.parse_requirements(with_version=True)))"
"""
from os.path import exists
import re
require_fpath = fname
def parse_line(line):
"""
Parse information from a line in a requirements text file
"""
if line.startswith('-r '):
# Allow specifying requirements in other files
target = line.split(' ')[1]
for info in parse_require_file(target):
yield info
else:
info = {'line': line}
if line.startswith('-e '):
info['package'] = line.split('#egg=')[1]
else:
# Remove versioning from the package
pat = '(' + '|'.join(['>=', '==', '>']) + ')'
parts = re.split(pat, line, maxsplit=1)
parts = [p.strip() for p in parts]
info['package'] = parts[0]
if len(parts) > 1:
op, rest = parts[1:]
if ';' in rest:
# Handle platform specific dependencies
# http://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-platform-specific-dependencies
version, platform_deps = map(str.strip, rest.split(';'))
info['platform_deps'] = platform_deps
else:
version = rest # NOQA
info['version'] = (op, version)
yield info
def parse_require_file(fpath):
with open(fpath, 'r') as f:
for line in f.readlines():
line = line.strip()
if line and not line.startswith('#'):
for info in parse_line(line):
yield info
def gen_packages_items():
if exists(require_fpath):
for info in parse_require_file(require_fpath):
parts = [info['package']]
if with_version and 'version' in info:
parts.extend(info['version'])
if not sys.version.startswith('3.4'):
# apparently package_deps are broken in 3.4
platform_deps = info.get('platform_deps')
if platform_deps is not None:
parts.append(';' + platform_deps)
item = ''.join(parts)
yield item
packages = list(gen_packages_items())
return packages
NAME = 'wbia-brambox'
MB_PYTHON_TAG = native_mb_python_tag() # NOQA
AUTHORS = [
'EAVISE',
'Jason Parham',
'WildMe Developers',
]
AUTHOR_EMAIL = 'dev@wildme.org'
URL = 'https://github.com/WildbookOrg/wbia-tpl-brambox'
LICENSE = 'BSD'
DESCRIPTION = 'brambox - Basic Recipes for Annotations and Modeling'
KWARGS = OrderedDict(
name=NAME,
author=', '.join(AUTHORS),
author_email=AUTHOR_EMAIL,
description=DESCRIPTION,
long_description=parse_long_description('README.rst'),
long_description_content_type='text/x-rst',
url=URL,
license=LICENSE,
install_requires=parse_requirements('requirements/runtime.txt'),
extras_require={
'all': parse_requirements('requirements.txt'),
'tests': parse_requirements('requirements/tests.txt'),
'build': parse_requirements('requirements/build.txt'),
'runtime': parse_requirements('requirements/runtime.txt'),
},
# --- VERSION ---
# The following settings retreive the version from git.
# See https://github.com/pypa/setuptools_scm/ for more information
setup_requires=['setuptools_scm'],
use_scm_version={
'write_to': 'brambox/_version.py',
'write_to_template': '__version__ = "{version}"',
'tag_regex': '^(?P<prefix>v)?(?P<version>[^\\+]+)(?P<suffix>.*)?$',
'local_scheme': 'dirty-tag',
},
packages=find_packages(),
include_package_data=False,
# List of classifiers available at:
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 6 - Mature',
'License :: OSI Approved :: BSD License',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Unix',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
)
if __name__ == '__main__':
"""
python -c "import brambox; print(brambox.__file__)"
"""
setup(**KWARGS)
| 31.975 | 125 | 0.572974 |
from __future__ import absolute_import, division, print_function
import sys
from os.path import exists
from collections import OrderedDict
from setuptools import find_packages
from skbuild import setup
def native_mb_python_tag(plat_impl=None, version_info=None):
if plat_impl is None:
import platform
plat_impl = platform.python_implementation()
if version_info is None:
import sys
version_info = sys.version_info
major, minor = version_info[0:2]
ver = '{}{}'.format(major, minor)
if plat_impl == 'CPython':
impl = 'cp'
if ver == '27':
IS_27_BUILT_WITH_UNICODE = True
if IS_27_BUILT_WITH_UNICODE:
abi = 'mu'
else:
abi = 'm'
else:
if ver == '38':
abi = ''
else:
abi = 'm'
mb_tag = '{impl}{ver}-{impl}{ver}{abi}'.format(**locals())
elif plat_impl == 'PyPy':
abi = ''
impl = 'pypy'
ver = '{}{}'.format(major, minor)
mb_tag = '{impl}-{ver}'.format(**locals())
else:
raise NotImplementedError(plat_impl)
return mb_tag
def parse_version(fpath='brambox/__init__.py'):
import ast
if not exists(fpath):
raise ValueError('fpath={!r} does not exist'.format(fpath))
with open(fpath, 'r') as file_:
sourcecode = file_.read()
pt = ast.parse(sourcecode)
class VersionVisitor(ast.NodeVisitor):
def visit_Assign(self, node):
for target in node.targets:
if getattr(target, 'id', None) == '__version__':
self.version = node.value.s
visitor = VersionVisitor()
visitor.visit(pt)
return visitor.version
def parse_long_description(fpath='README.rst'):
if exists(fpath):
with open(fpath, 'r') as file:
return file.read()
return ''
def parse_requirements(fname='requirements.txt', with_version=False):
from os.path import exists
import re
require_fpath = fname
def parse_line(line):
if line.startswith('-r '):
target = line.split(' ')[1]
for info in parse_require_file(target):
yield info
else:
info = {'line': line}
if line.startswith('-e '):
info['package'] = line.split('#egg=')[1]
else:
pat = '(' + '|'.join(['>=', '==', '>']) + ')'
parts = re.split(pat, line, maxsplit=1)
parts = [p.strip() for p in parts]
info['package'] = parts[0]
if len(parts) > 1:
op, rest = parts[1:]
if ';' in rest:
m_deps = map(str.strip, rest.split(';'))
info['platform_deps'] = platform_deps
else:
version = rest
info['version'] = (op, version)
yield info
def parse_require_file(fpath):
with open(fpath, 'r') as f:
for line in f.readlines():
line = line.strip()
if line and not line.startswith('#'):
for info in parse_line(line):
yield info
def gen_packages_items():
if exists(require_fpath):
for info in parse_require_file(require_fpath):
parts = [info['package']]
if with_version and 'version' in info:
parts.extend(info['version'])
if not sys.version.startswith('3.4'):
platform_deps = info.get('platform_deps')
if platform_deps is not None:
parts.append(';' + platform_deps)
item = ''.join(parts)
yield item
packages = list(gen_packages_items())
return packages
NAME = 'wbia-brambox'
MB_PYTHON_TAG = native_mb_python_tag()
AUTHORS = [
'EAVISE',
'Jason Parham',
'WildMe Developers',
]
AUTHOR_EMAIL = 'dev@wildme.org'
URL = 'https://github.com/WildbookOrg/wbia-tpl-brambox'
LICENSE = 'BSD'
DESCRIPTION = 'brambox - Basic Recipes for Annotations and Modeling'
KWARGS = OrderedDict(
name=NAME,
author=', '.join(AUTHORS),
author_email=AUTHOR_EMAIL,
description=DESCRIPTION,
long_description=parse_long_description('README.rst'),
long_description_content_type='text/x-rst',
url=URL,
license=LICENSE,
install_requires=parse_requirements('requirements/runtime.txt'),
extras_require={
'all': parse_requirements('requirements.txt'),
'tests': parse_requirements('requirements/tests.txt'),
'build': parse_requirements('requirements/build.txt'),
'runtime': parse_requirements('requirements/runtime.txt'),
},
setup_requires=['setuptools_scm'],
use_scm_version={
'write_to': 'brambox/_version.py',
'write_to_template': '__version__ = "{version}"',
'tag_regex': '^(?P<prefix>v)?(?P<version>[^\\+]+)(?P<suffix>.*)?$',
'local_scheme': 'dirty-tag',
},
packages=find_packages(),
include_package_data=False,
classifiers=[
'Development Status :: 6 - Mature',
'License :: OSI Approved :: BSD License',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Unix',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
)
if __name__ == '__main__':
setup(**KWARGS)
| true | true |
1c476801c70edbae6a98a7915c2d93aa454b9a2d | 5,022 | py | Python | Analysis/SampleVisualization_AE.py | melodist/MELTNET | 47548e4a027ea4e23cdcb5ba1f1d9aa1aa7bbf29 | [
"MIT"
] | 9 | 2020-03-16T04:17:05.000Z | 2022-02-08T12:51:45.000Z | Analysis/SampleVisualization_AE.py | melodist/MELTNET | 47548e4a027ea4e23cdcb5ba1f1d9aa1aa7bbf29 | [
"MIT"
] | 1 | 2019-11-26T08:18:16.000Z | 2020-09-10T15:21:40.000Z | Analysis/SampleVisualization_AE.py | melodist/MELTNET | 47548e4a027ea4e23cdcb5ba1f1d9aa1aa7bbf29 | [
"MIT"
] | 3 | 2020-03-16T04:17:30.000Z | 2021-12-02T07:10:22.000Z | """
Sample Visualization
Make 2-D image of sample distribution
1-1. Extract Features using initial network
1-2. Extract Features using trained network
2. Using K-means to classify the patches
3. Dimension reduction using PCA
4. Visualize results
"""
import tensorflow as tf
import numpy as np
from Network import NetworkKeras
import os
import time
from Extraction import PatchExtraction
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
from datetime import datetime
def SampleVisualization_AE(path_model, path_image):
""" Visualize sample distribution using PCA.
The result image will be saved on 'Results_%Y%m%d_%H%M%S'
Input
______
path_model: path of trained model
path_image: path of test image
Output
______
"""
tf.enable_eager_execution()
time_start = time.time()
# Extract Features using trained network
# Load model
input_shape = (17 * 17)
initial_model_CT = NetworkKeras.create_autoencoder(input_shape)
initial_model_PT = NetworkKeras.create_autoencoder(input_shape)
trained_model_CT = NetworkKeras.create_autoencoder(input_shape)
trained_model_CT.load_weights(path_model + 'CT')
trained_model_PT = NetworkKeras.create_autoencoder(input_shape)
trained_model_PT.load_weights(path_model + 'PT')
# Make feature extraction model
initial_extractor_CT = tf.keras.models.Model(inputs=initial_model_CT.input,
outputs=initial_model_CT.get_layer('tf_op_layer_l2_normalize').output)
initial_extractor_PT = tf.keras.models.Model(inputs=initial_model_PT.input,
outputs=initial_model_PT.get_layer('tf_op_layer_l2_normalize_2').output)
feature_extractor_CT = tf.keras.models.Model(inputs=trained_model_CT.input,
outputs=trained_model_CT.get_layer('tf_op_layer_l2_normalize_4').output)
feature_extractor_PT = tf.keras.models.Model(inputs=trained_model_PT.input,
outputs=trained_model_PT.get_layer('tf_op_layer_l2_normalize_6').output)
# Load Images
ind_CT = [[230, 380], [150, 370]]
ind_PT = [[230, 380], [150, 370]]
# Make Results Folder
now = datetime.now()
path_result = f"./Results_{now.strftime('%Y%m%d_%H%M%S')}/"
os.makedirs(path_result)
# Print Patients Number
patient_dir = os.listdir(path_image)
print(f'Patients Number: {len(patient_dir)}')
for path_patient in patient_dir:
addr_patient = f'{path_image}/{path_patient}/'\
img_CT, img_PT = PatchExtraction.stackImages(addr_patient, ind_CT, ind_PT)
patches_CT, patches_PT = PatchExtraction.patch_extraction_thres(img_CT, img_PT, 0)
# Extract Features using initial network
print(f"Extract Features using initial network...")
features_init_CT = initial_extractor_CT.predict(patches_CT, steps=1)
features_init_PT = initial_extractor_PT.predict(patches_PT, steps=1)
features_init = np.hstack((features_init_CT, features_init_PT))
# Extract Features
print(f"Extract Features...")
features_CT = feature_extractor_CT.predict(patches_CT, steps=1)
features_PT = feature_extractor_PT.predict(patches_PT, steps=1)
features = np.hstack((features_CT, features_PT))
# Using K-means
print(f"K-means Clustering...")
num_labels = 5
model_k_means = KMeans(n_clusters=num_labels, random_state=0)
model_k_means.fit(features)
# Merging Patches
num_x = 44
num_y = 30
stride = 5
label_predict = model_k_means.fit_predict(features)
label_predict_batch = label_predict.reshape((-1, num_y * num_x))
# Dimension reduction using PCA
pca = PCA(n_components=2)
features_low = pca.fit_transform(features)
features_init_low = pca.transform(features_init)
colors = ['salmon', 'orange', 'steelblue', 'violet', 'khaki']
fig, ax = plt.subplots(2, figsize=(5, 5), constrained_layout=True)
for i in range(5):
data_init = features_init_low[label_predict == i]
X_init = data_init[:, 0]
Y_init = data_init[:, 1]
ax[0].scatter(X_init, Y_init, color=colors[i], label=i, s=1)
data = features_low[label_predict == i]
X = data[:, 0]
Y = data[:, 1]
ax[1].scatter(X, Y, color=colors[i], label=i, s=1)
ax[0].legend(loc='best')
ax[0].set_xticks([])
ax[0].set_yticks([])
ax[1].legend(loc='best')
ax[1].set_xticks([])
ax[1].set_yticks([])
fig.suptitle('Distribution of patches')
plt.savefig(f"{path_result}Plot_{path_patient}.png", format='png', dpi=300)
time_end = time.time()
print(f"Evaluation Finished! Elapsed time: {time_end - time_start}")
| 35.871429 | 121 | 0.660892 |
import tensorflow as tf
import numpy as np
from Network import NetworkKeras
import os
import time
from Extraction import PatchExtraction
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
from datetime import datetime
def SampleVisualization_AE(path_model, path_image):
tf.enable_eager_execution()
time_start = time.time()
input_shape = (17 * 17)
initial_model_CT = NetworkKeras.create_autoencoder(input_shape)
initial_model_PT = NetworkKeras.create_autoencoder(input_shape)
trained_model_CT = NetworkKeras.create_autoencoder(input_shape)
trained_model_CT.load_weights(path_model + 'CT')
trained_model_PT = NetworkKeras.create_autoencoder(input_shape)
trained_model_PT.load_weights(path_model + 'PT')
initial_extractor_CT = tf.keras.models.Model(inputs=initial_model_CT.input,
outputs=initial_model_CT.get_layer('tf_op_layer_l2_normalize').output)
initial_extractor_PT = tf.keras.models.Model(inputs=initial_model_PT.input,
outputs=initial_model_PT.get_layer('tf_op_layer_l2_normalize_2').output)
feature_extractor_CT = tf.keras.models.Model(inputs=trained_model_CT.input,
outputs=trained_model_CT.get_layer('tf_op_layer_l2_normalize_4').output)
feature_extractor_PT = tf.keras.models.Model(inputs=trained_model_PT.input,
outputs=trained_model_PT.get_layer('tf_op_layer_l2_normalize_6').output)
ind_CT = [[230, 380], [150, 370]]
ind_PT = [[230, 380], [150, 370]]
now = datetime.now()
path_result = f"./Results_{now.strftime('%Y%m%d_%H%M%S')}/"
os.makedirs(path_result)
patient_dir = os.listdir(path_image)
print(f'Patients Number: {len(patient_dir)}')
for path_patient in patient_dir:
addr_patient = f'{path_image}/{path_patient}/'\
img_CT, img_PT = PatchExtraction.stackImages(addr_patient, ind_CT, ind_PT)
patches_CT, patches_PT = PatchExtraction.patch_extraction_thres(img_CT, img_PT, 0)
print(f"Extract Features using initial network...")
features_init_CT = initial_extractor_CT.predict(patches_CT, steps=1)
features_init_PT = initial_extractor_PT.predict(patches_PT, steps=1)
features_init = np.hstack((features_init_CT, features_init_PT))
print(f"Extract Features...")
features_CT = feature_extractor_CT.predict(patches_CT, steps=1)
features_PT = feature_extractor_PT.predict(patches_PT, steps=1)
features = np.hstack((features_CT, features_PT))
print(f"K-means Clustering...")
num_labels = 5
model_k_means = KMeans(n_clusters=num_labels, random_state=0)
model_k_means.fit(features)
num_x = 44
num_y = 30
stride = 5
label_predict = model_k_means.fit_predict(features)
label_predict_batch = label_predict.reshape((-1, num_y * num_x))
pca = PCA(n_components=2)
features_low = pca.fit_transform(features)
features_init_low = pca.transform(features_init)
colors = ['salmon', 'orange', 'steelblue', 'violet', 'khaki']
fig, ax = plt.subplots(2, figsize=(5, 5), constrained_layout=True)
for i in range(5):
data_init = features_init_low[label_predict == i]
X_init = data_init[:, 0]
Y_init = data_init[:, 1]
ax[0].scatter(X_init, Y_init, color=colors[i], label=i, s=1)
data = features_low[label_predict == i]
X = data[:, 0]
Y = data[:, 1]
ax[1].scatter(X, Y, color=colors[i], label=i, s=1)
ax[0].legend(loc='best')
ax[0].set_xticks([])
ax[0].set_yticks([])
ax[1].legend(loc='best')
ax[1].set_xticks([])
ax[1].set_yticks([])
fig.suptitle('Distribution of patches')
plt.savefig(f"{path_result}Plot_{path_patient}.png", format='png', dpi=300)
time_end = time.time()
print(f"Evaluation Finished! Elapsed time: {time_end - time_start}")
| true | true |
1c4768746d5b6ffc5563045f2c062c9a11652afe | 7,689 | py | Python | tests/components/hue/test_init.py | sgrzys/AIS-home-assistant | 7bfc4d6d90de75eea06702c36474d91bf38df3bf | [
"Apache-2.0"
] | 1 | 2019-04-22T06:05:09.000Z | 2019-04-22T06:05:09.000Z | tests/components/hue/test_init.py | sgrzys/AIS-home-assistant | 7bfc4d6d90de75eea06702c36474d91bf38df3bf | [
"Apache-2.0"
] | 2 | 2022-01-13T04:26:00.000Z | 2022-03-12T01:05:37.000Z | tests/components/hue/test_init.py | sgrzys/AIS-home-assistant | 7bfc4d6d90de75eea06702c36474d91bf38df3bf | [
"Apache-2.0"
] | 1 | 2021-09-20T01:52:31.000Z | 2021-09-20T01:52:31.000Z | """Test Hue setup process."""
from unittest.mock import Mock, patch
from homeassistant.setup import async_setup_component
from homeassistant.components import hue
from tests.common import mock_coro, MockConfigEntry
async def test_setup_with_no_config(hass):
"""Test that we do not discover anything or try to set up a bridge."""
with patch.object(hass, 'config_entries') as mock_config_entries, \
patch.object(hue, 'configured_hosts', return_value=[]):
assert await async_setup_component(hass, hue.DOMAIN, {}) is True
# No flows started
assert len(mock_config_entries.flow.mock_calls) == 0
# No configs stored
assert hass.data[hue.DOMAIN] == {}
async def test_setup_with_discovery_no_known_auth(hass, aioclient_mock):
"""Test discovering a bridge and not having known auth."""
aioclient_mock.get(hue.API_NUPNP, json=[
{
'internalipaddress': '0.0.0.0',
'id': 'abcd1234'
}
])
with patch.object(hass, 'config_entries') as mock_config_entries, \
patch.object(hue, 'configured_hosts', return_value=[]):
mock_config_entries.flow.async_init.return_value = mock_coro()
assert await async_setup_component(hass, hue.DOMAIN, {
hue.DOMAIN: {}
}) is True
# Flow started for discovered bridge
assert len(mock_config_entries.flow.mock_calls) == 1
assert mock_config_entries.flow.mock_calls[0][2]['data'] == {
'host': '0.0.0.0',
'path': '.hue_abcd1234.conf',
}
# Config stored for domain.
assert hass.data[hue.DOMAIN] == {
'0.0.0.0': {
hue.CONF_HOST: '0.0.0.0',
hue.CONF_FILENAME: '.hue_abcd1234.conf',
hue.CONF_ALLOW_HUE_GROUPS: hue.DEFAULT_ALLOW_HUE_GROUPS,
hue.CONF_ALLOW_UNREACHABLE: hue.DEFAULT_ALLOW_UNREACHABLE,
}
}
async def test_setup_with_discovery_known_auth(hass, aioclient_mock):
"""Test we don't do anything if we discover already configured hub."""
aioclient_mock.get(hue.API_NUPNP, json=[
{
'internalipaddress': '0.0.0.0',
'id': 'abcd1234'
}
])
with patch.object(hass, 'config_entries') as mock_config_entries, \
patch.object(hue, 'configured_hosts', return_value=['0.0.0.0']):
assert await async_setup_component(hass, hue.DOMAIN, {
hue.DOMAIN: {}
}) is True
# Flow started for discovered bridge
assert len(mock_config_entries.flow.mock_calls) == 0
# Config stored for domain.
assert hass.data[hue.DOMAIN] == {}
async def test_setup_defined_hosts_known_auth(hass):
"""Test we don't initiate a config entry if config bridge is known."""
with patch.object(hass, 'config_entries') as mock_config_entries, \
patch.object(hue, 'configured_hosts', return_value=['0.0.0.0']):
assert await async_setup_component(hass, hue.DOMAIN, {
hue.DOMAIN: {
hue.CONF_BRIDGES: {
hue.CONF_HOST: '0.0.0.0',
hue.CONF_FILENAME: 'bla.conf',
hue.CONF_ALLOW_HUE_GROUPS: False,
hue.CONF_ALLOW_UNREACHABLE: True
}
}
}) is True
# Flow started for discovered bridge
assert len(mock_config_entries.flow.mock_calls) == 0
# Config stored for domain.
assert hass.data[hue.DOMAIN] == {
'0.0.0.0': {
hue.CONF_HOST: '0.0.0.0',
hue.CONF_FILENAME: 'bla.conf',
hue.CONF_ALLOW_HUE_GROUPS: False,
hue.CONF_ALLOW_UNREACHABLE: True
}
}
async def test_setup_defined_hosts_no_known_auth(hass):
"""Test we initiate config entry if config bridge is not known."""
with patch.object(hass, 'config_entries') as mock_config_entries, \
patch.object(hue, 'configured_hosts', return_value=[]):
mock_config_entries.flow.async_init.return_value = mock_coro()
assert await async_setup_component(hass, hue.DOMAIN, {
hue.DOMAIN: {
hue.CONF_BRIDGES: {
hue.CONF_HOST: '0.0.0.0',
hue.CONF_FILENAME: 'bla.conf',
hue.CONF_ALLOW_HUE_GROUPS: False,
hue.CONF_ALLOW_UNREACHABLE: True
}
}
}) is True
# Flow started for discovered bridge
assert len(mock_config_entries.flow.mock_calls) == 1
assert mock_config_entries.flow.mock_calls[0][2]['data'] == {
'host': '0.0.0.0',
'path': 'bla.conf',
}
# Config stored for domain.
assert hass.data[hue.DOMAIN] == {
'0.0.0.0': {
hue.CONF_HOST: '0.0.0.0',
hue.CONF_FILENAME: 'bla.conf',
hue.CONF_ALLOW_HUE_GROUPS: False,
hue.CONF_ALLOW_UNREACHABLE: True
}
}
async def test_config_passed_to_config_entry(hass):
"""Test that configured options for a host are loaded via config entry."""
entry = MockConfigEntry(domain=hue.DOMAIN, data={
'host': '0.0.0.0',
})
entry.add_to_hass(hass)
mock_registry = Mock()
with patch.object(hue, 'HueBridge') as mock_bridge, \
patch('homeassistant.helpers.device_registry.async_get_registry',
return_value=mock_coro(mock_registry)):
mock_bridge.return_value.async_setup.return_value = mock_coro(True)
mock_bridge.return_value.api.config = Mock(
mac='mock-mac',
bridgeid='mock-bridgeid',
raw={
'modelid': 'mock-modelid',
'swversion': 'mock-swversion',
}
)
# Can't set name via kwargs
mock_bridge.return_value.api.config.name = 'mock-name'
assert await async_setup_component(hass, hue.DOMAIN, {
hue.DOMAIN: {
hue.CONF_BRIDGES: {
hue.CONF_HOST: '0.0.0.0',
hue.CONF_FILENAME: 'bla.conf',
hue.CONF_ALLOW_HUE_GROUPS: False,
hue.CONF_ALLOW_UNREACHABLE: True
}
}
}) is True
assert len(mock_bridge.mock_calls) == 2
p_hass, p_entry, p_allow_unreachable, p_allow_groups = \
mock_bridge.mock_calls[0][1]
assert p_hass is hass
assert p_entry is entry
assert p_allow_unreachable is True
assert p_allow_groups is False
assert len(mock_registry.mock_calls) == 1
assert mock_registry.mock_calls[0][2] == {
'config_entry': entry.entry_id,
'connections': {
('mac', 'mock-mac')
},
'identifiers': {
('hue', 'mock-bridgeid')
},
'manufacturer': 'Signify',
'name': 'mock-name',
'model': 'mock-modelid',
'sw_version': 'mock-swversion'
}
async def test_unload_entry(hass):
"""Test being able to unload an entry."""
entry = MockConfigEntry(domain=hue.DOMAIN, data={
'host': '0.0.0.0',
})
entry.add_to_hass(hass)
with patch.object(hue, 'HueBridge') as mock_bridge, \
patch('homeassistant.helpers.device_registry.async_get_registry',
return_value=mock_coro(Mock())):
mock_bridge.return_value.async_setup.return_value = mock_coro(True)
mock_bridge.return_value.api.config = Mock()
assert await async_setup_component(hass, hue.DOMAIN, {}) is True
assert len(mock_bridge.return_value.mock_calls) == 1
mock_bridge.return_value.async_reset.return_value = mock_coro(True)
assert await hue.async_unload_entry(hass, entry)
assert len(mock_bridge.return_value.async_reset.mock_calls) == 1
assert hass.data[hue.DOMAIN] == {}
| 35.109589 | 78 | 0.613864 | from unittest.mock import Mock, patch
from homeassistant.setup import async_setup_component
from homeassistant.components import hue
from tests.common import mock_coro, MockConfigEntry
async def test_setup_with_no_config(hass):
with patch.object(hass, 'config_entries') as mock_config_entries, \
patch.object(hue, 'configured_hosts', return_value=[]):
assert await async_setup_component(hass, hue.DOMAIN, {}) is True
assert len(mock_config_entries.flow.mock_calls) == 0
assert hass.data[hue.DOMAIN] == {}
async def test_setup_with_discovery_no_known_auth(hass, aioclient_mock):
aioclient_mock.get(hue.API_NUPNP, json=[
{
'internalipaddress': '0.0.0.0',
'id': 'abcd1234'
}
])
with patch.object(hass, 'config_entries') as mock_config_entries, \
patch.object(hue, 'configured_hosts', return_value=[]):
mock_config_entries.flow.async_init.return_value = mock_coro()
assert await async_setup_component(hass, hue.DOMAIN, {
hue.DOMAIN: {}
}) is True
assert len(mock_config_entries.flow.mock_calls) == 1
assert mock_config_entries.flow.mock_calls[0][2]['data'] == {
'host': '0.0.0.0',
'path': '.hue_abcd1234.conf',
}
assert hass.data[hue.DOMAIN] == {
'0.0.0.0': {
hue.CONF_HOST: '0.0.0.0',
hue.CONF_FILENAME: '.hue_abcd1234.conf',
hue.CONF_ALLOW_HUE_GROUPS: hue.DEFAULT_ALLOW_HUE_GROUPS,
hue.CONF_ALLOW_UNREACHABLE: hue.DEFAULT_ALLOW_UNREACHABLE,
}
}
async def test_setup_with_discovery_known_auth(hass, aioclient_mock):
aioclient_mock.get(hue.API_NUPNP, json=[
{
'internalipaddress': '0.0.0.0',
'id': 'abcd1234'
}
])
with patch.object(hass, 'config_entries') as mock_config_entries, \
patch.object(hue, 'configured_hosts', return_value=['0.0.0.0']):
assert await async_setup_component(hass, hue.DOMAIN, {
hue.DOMAIN: {}
}) is True
assert len(mock_config_entries.flow.mock_calls) == 0
assert hass.data[hue.DOMAIN] == {}
async def test_setup_defined_hosts_known_auth(hass):
with patch.object(hass, 'config_entries') as mock_config_entries, \
patch.object(hue, 'configured_hosts', return_value=['0.0.0.0']):
assert await async_setup_component(hass, hue.DOMAIN, {
hue.DOMAIN: {
hue.CONF_BRIDGES: {
hue.CONF_HOST: '0.0.0.0',
hue.CONF_FILENAME: 'bla.conf',
hue.CONF_ALLOW_HUE_GROUPS: False,
hue.CONF_ALLOW_UNREACHABLE: True
}
}
}) is True
assert len(mock_config_entries.flow.mock_calls) == 0
assert hass.data[hue.DOMAIN] == {
'0.0.0.0': {
hue.CONF_HOST: '0.0.0.0',
hue.CONF_FILENAME: 'bla.conf',
hue.CONF_ALLOW_HUE_GROUPS: False,
hue.CONF_ALLOW_UNREACHABLE: True
}
}
async def test_setup_defined_hosts_no_known_auth(hass):
with patch.object(hass, 'config_entries') as mock_config_entries, \
patch.object(hue, 'configured_hosts', return_value=[]):
mock_config_entries.flow.async_init.return_value = mock_coro()
assert await async_setup_component(hass, hue.DOMAIN, {
hue.DOMAIN: {
hue.CONF_BRIDGES: {
hue.CONF_HOST: '0.0.0.0',
hue.CONF_FILENAME: 'bla.conf',
hue.CONF_ALLOW_HUE_GROUPS: False,
hue.CONF_ALLOW_UNREACHABLE: True
}
}
}) is True
assert len(mock_config_entries.flow.mock_calls) == 1
assert mock_config_entries.flow.mock_calls[0][2]['data'] == {
'host': '0.0.0.0',
'path': 'bla.conf',
}
assert hass.data[hue.DOMAIN] == {
'0.0.0.0': {
hue.CONF_HOST: '0.0.0.0',
hue.CONF_FILENAME: 'bla.conf',
hue.CONF_ALLOW_HUE_GROUPS: False,
hue.CONF_ALLOW_UNREACHABLE: True
}
}
async def test_config_passed_to_config_entry(hass):
entry = MockConfigEntry(domain=hue.DOMAIN, data={
'host': '0.0.0.0',
})
entry.add_to_hass(hass)
mock_registry = Mock()
with patch.object(hue, 'HueBridge') as mock_bridge, \
patch('homeassistant.helpers.device_registry.async_get_registry',
return_value=mock_coro(mock_registry)):
mock_bridge.return_value.async_setup.return_value = mock_coro(True)
mock_bridge.return_value.api.config = Mock(
mac='mock-mac',
bridgeid='mock-bridgeid',
raw={
'modelid': 'mock-modelid',
'swversion': 'mock-swversion',
}
)
mock_bridge.return_value.api.config.name = 'mock-name'
assert await async_setup_component(hass, hue.DOMAIN, {
hue.DOMAIN: {
hue.CONF_BRIDGES: {
hue.CONF_HOST: '0.0.0.0',
hue.CONF_FILENAME: 'bla.conf',
hue.CONF_ALLOW_HUE_GROUPS: False,
hue.CONF_ALLOW_UNREACHABLE: True
}
}
}) is True
assert len(mock_bridge.mock_calls) == 2
p_hass, p_entry, p_allow_unreachable, p_allow_groups = \
mock_bridge.mock_calls[0][1]
assert p_hass is hass
assert p_entry is entry
assert p_allow_unreachable is True
assert p_allow_groups is False
assert len(mock_registry.mock_calls) == 1
assert mock_registry.mock_calls[0][2] == {
'config_entry': entry.entry_id,
'connections': {
('mac', 'mock-mac')
},
'identifiers': {
('hue', 'mock-bridgeid')
},
'manufacturer': 'Signify',
'name': 'mock-name',
'model': 'mock-modelid',
'sw_version': 'mock-swversion'
}
async def test_unload_entry(hass):
entry = MockConfigEntry(domain=hue.DOMAIN, data={
'host': '0.0.0.0',
})
entry.add_to_hass(hass)
with patch.object(hue, 'HueBridge') as mock_bridge, \
patch('homeassistant.helpers.device_registry.async_get_registry',
return_value=mock_coro(Mock())):
mock_bridge.return_value.async_setup.return_value = mock_coro(True)
mock_bridge.return_value.api.config = Mock()
assert await async_setup_component(hass, hue.DOMAIN, {}) is True
assert len(mock_bridge.return_value.mock_calls) == 1
mock_bridge.return_value.async_reset.return_value = mock_coro(True)
assert await hue.async_unload_entry(hass, entry)
assert len(mock_bridge.return_value.async_reset.mock_calls) == 1
assert hass.data[hue.DOMAIN] == {}
| true | true |
1c476b5d686fb5d71b925dc5ae700b71ab106d76 | 3,587 | py | Python | auto_xml.py | tdwitham/AutohammerPy | 1621400fd148f012bc59176ad51aa05c5c879c4f | [
"BSD-2-Clause"
] | null | null | null | auto_xml.py | tdwitham/AutohammerPy | 1621400fd148f012bc59176ad51aa05c5c879c4f | [
"BSD-2-Clause"
] | null | null | null | auto_xml.py | tdwitham/AutohammerPy | 1621400fd148f012bc59176ad51aa05c5c879c4f | [
"BSD-2-Clause"
] | null | null | null | # (c) 2016,2017 - Timothy D. Witham tim.wookie.witham@gmail.com
# Licensed under BSD 2-Clause
__author__ = 'wookie'
import pprint
from components.FileOps import writeLog, initialLogDir, makeLogDir
from components.infrastructure import getSysInfo
from components.MySQL import MySQLOps
global DBOP
runConfig = dict()
secParms = dict()
def ckheader(cEvent, cTag, cText):
if (cEvent == 'start'):
setTo = cTag
else:
setTo = None
if (cTag == 'autohammer'):
return True
elif (cTag == 'config'):
return True
elif (cTag == 'connect'):
return True
elif cTag == 'import_code':
return True
elif cTag == 'run_code':
return True
elif (cTag == 'run_sql'):
return True
elif (cTag == 'sys_info'):
return True
return False
def finishSection(thisSection):
if (thisSection == 'config'):
initialLogDir(runConfig)
elif (thisSection == 'connect'):
writeLog(1, '<connect>')
connectDB(runConfig)
writeLog(-1,'</connect>')
elif (thisSection =='sys_info'):
writeLog(1, '<sys_info>')
getSysInfo()
writeLog(-1, '</sys_info>')
elif (thisSection =='run_sql'):
runSQL(runConfig, secParms)
elif (thisSection =='run_code'):
runCode(runConfig, secParms )
elif (thisSection =='load_code'):
loadCode()
elif (thisSection =='autohammer'):
finishIt()
def doSection(thisSection, cEvent, cTag, cText):
if thisSection == None:
return
elif thisSection == 'config':
runConfig[cTag] = cText
elif thisSection == 'run_sql':
secParms[cTag] = cText
elif thisSection == 'run_code':
secParms[cTag] = cText
elif thisSection == 'load_code':
runConfig[cTag] = cText
def validateConfig():
global runConfig
global dbConfig
print("runConfig")
pprint.pprint(runConfig, width=1)
print("dbConfig")
pprint.pprint(dbConfig, width=1)
print('Validate config')
if dbConfig['test'].upper() == 'TPCC':
runConfig['logDir'] = makeLogDir(dbConfig['rdbms'], dbConfig['test'], dbConfig['warehouses'])
copyFiles(runConfig['logDir'])
elif dbConfig['test'].upper() == 'TPCH':
runConfig['logDir'] = makeLogDir(dbConfig['rdbms'], dbConfig['test'], dbConfig['db_scale'])
copyFiles(runConfig['logDir'])
def validateTest():
print('Validate test')
def setupCode():
#validateCode()
print('setting up code section - I think that this is a do not care')
def runCode():
# validateCode()
print('Running a code section ')
def validateSQL():
print('Validate SQL config')
def validateCode():
print('Validate Code config')
def loadCode():
print("Inside of load code")
def finishIt():
print("Done with Autohammer")
def runSQL(runConfig, secParms):
global DBOP
if (secParms['use_db'] == 'system'):
DBOP.connectAdmin(runConfig)
DBOP.nowDoSQL(runConfig, secParms)
DBOP.disconnectAdmin()
else:
DBOP.connectUser(runConfig)
DBOP.nowDoSQL(runConfig, secParms)
DBOP.disconnectUser()
def connectDB(runCOnfig):
global DBOP
if runConfig['rdbms'].lower() == 'oracle':
adminOracle(runConfig)
elif runConfig['rdbms'].lower() == 'mysql':
DBOP = MySQLOps()
elif runConfig['rdbms'].lower() == 'mssql':
DBOP = MSSQLDB()
elif runConfig['rdbms'].lower() == 'pgsql':
adminMSSQL(runConfig)
else:
writeLog("ERROR: Unknown RDBMS {}i\n".format(runConfig['rdbms']))
| 26.182482 | 101 | 0.622805 |
__author__ = 'wookie'
import pprint
from components.FileOps import writeLog, initialLogDir, makeLogDir
from components.infrastructure import getSysInfo
from components.MySQL import MySQLOps
global DBOP
runConfig = dict()
secParms = dict()
def ckheader(cEvent, cTag, cText):
if (cEvent == 'start'):
setTo = cTag
else:
setTo = None
if (cTag == 'autohammer'):
return True
elif (cTag == 'config'):
return True
elif (cTag == 'connect'):
return True
elif cTag == 'import_code':
return True
elif cTag == 'run_code':
return True
elif (cTag == 'run_sql'):
return True
elif (cTag == 'sys_info'):
return True
return False
def finishSection(thisSection):
if (thisSection == 'config'):
initialLogDir(runConfig)
elif (thisSection == 'connect'):
writeLog(1, '<connect>')
connectDB(runConfig)
writeLog(-1,'</connect>')
elif (thisSection =='sys_info'):
writeLog(1, '<sys_info>')
getSysInfo()
writeLog(-1, '</sys_info>')
elif (thisSection =='run_sql'):
runSQL(runConfig, secParms)
elif (thisSection =='run_code'):
runCode(runConfig, secParms )
elif (thisSection =='load_code'):
loadCode()
elif (thisSection =='autohammer'):
finishIt()
def doSection(thisSection, cEvent, cTag, cText):
if thisSection == None:
return
elif thisSection == 'config':
runConfig[cTag] = cText
elif thisSection == 'run_sql':
secParms[cTag] = cText
elif thisSection == 'run_code':
secParms[cTag] = cText
elif thisSection == 'load_code':
runConfig[cTag] = cText
def validateConfig():
global runConfig
global dbConfig
print("runConfig")
pprint.pprint(runConfig, width=1)
print("dbConfig")
pprint.pprint(dbConfig, width=1)
print('Validate config')
if dbConfig['test'].upper() == 'TPCC':
runConfig['logDir'] = makeLogDir(dbConfig['rdbms'], dbConfig['test'], dbConfig['warehouses'])
copyFiles(runConfig['logDir'])
elif dbConfig['test'].upper() == 'TPCH':
runConfig['logDir'] = makeLogDir(dbConfig['rdbms'], dbConfig['test'], dbConfig['db_scale'])
copyFiles(runConfig['logDir'])
def validateTest():
print('Validate test')
def setupCode():
print('setting up code section - I think that this is a do not care')
def runCode():
print('Running a code section ')
def validateSQL():
print('Validate SQL config')
def validateCode():
print('Validate Code config')
def loadCode():
print("Inside of load code")
def finishIt():
print("Done with Autohammer")
def runSQL(runConfig, secParms):
global DBOP
if (secParms['use_db'] == 'system'):
DBOP.connectAdmin(runConfig)
DBOP.nowDoSQL(runConfig, secParms)
DBOP.disconnectAdmin()
else:
DBOP.connectUser(runConfig)
DBOP.nowDoSQL(runConfig, secParms)
DBOP.disconnectUser()
def connectDB(runCOnfig):
global DBOP
if runConfig['rdbms'].lower() == 'oracle':
adminOracle(runConfig)
elif runConfig['rdbms'].lower() == 'mysql':
DBOP = MySQLOps()
elif runConfig['rdbms'].lower() == 'mssql':
DBOP = MSSQLDB()
elif runConfig['rdbms'].lower() == 'pgsql':
adminMSSQL(runConfig)
else:
writeLog("ERROR: Unknown RDBMS {}i\n".format(runConfig['rdbms']))
| true | true |
1c476bd27893d69e83ccb306a1d2ce80722a4ad1 | 9,534 | py | Python | piqa/fsim.py | francois-rozet/spiq | a2e68c38da9129c85867e77641ed29d88e84c9d7 | [
"MIT"
] | 19 | 2020-10-12T13:57:21.000Z | 2020-12-05T12:23:41.000Z | piqa/fsim.py | francois-rozet/spiq | a2e68c38da9129c85867e77641ed29d88e84c9d7 | [
"MIT"
] | null | null | null | piqa/fsim.py | francois-rozet/spiq | a2e68c38da9129c85867e77641ed29d88e84c9d7 | [
"MIT"
] | null | null | null | r"""Feature Similarity (FSIM)
This module implements the FSIM in PyTorch.
Original:
https://www4.comp.polyu.edu.hk/~cslzhang/IQA/FSIM/FSIM.htm
References:
.. [Zhang2011] FSIM: A Feature Similarity Index for Image Quality Assessment (Zhang et al., 2011)
.. [Kovesi1999] Image Features From Phase Congruency (Kovesi, 1999)
"""
import math
import torch
import torch.fft as fft
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from .utils import _jit, assert_type, reduce_tensor
from .utils import complex as cx
from .utils.color import ColorConv
from .utils.functional import (
scharr_kernel,
gradient_kernel,
filter_grid,
log_gabor,
channel_conv,
l2_norm,
)
@_jit
def fsim(
x: Tensor,
y: Tensor,
pc_x: Tensor,
pc_y: Tensor,
kernel: Tensor,
value_range: float = 1.,
t1: float = 0.85,
t2: float = 160. / (255. ** 2),
t3: float = 200. / (255. ** 2),
t4: float = 200. / (255. ** 2),
lmbda: float = 0.03,
) -> Tensor:
r"""Returns the FSIM between :math:`x` and :math:`y`,
without color space conversion and downsampling.
Args:
x: An input tensor, :math:`(N, 3 \text{ or } 1, H, W)`.
y: A target tensor, :math:`(N, 3 \text{ or } 1, H, W)`.
pc_x: The input phase congruency, :math:`(N, H, W)`.
pc_y: The target phase congruency, :math:`(N, H, W)`.
kernel: A gradient kernel, :math:`(2, 1, K, K)`.
value_range: The value range :math:`L` of the inputs (usually `1.` or `255`).
Note:
For the remaining arguments, refer to [Zhang2011]_.
Returns:
The FSIM vector, :math:`(N,)`.
Example:
>>> x = torch.rand(5, 3, 256, 256)
>>> y = torch.rand(5, 3, 256, 256)
>>> filters = pc_filters(x)
>>> pc_x = phase_congruency(x[:, :1], filters)
>>> pc_y = phase_congruency(y[:, :1], filters)
>>> kernel = gradient_kernel(scharr_kernel())
>>> l = fsim(x, y, pc_x, pc_y, kernel)
>>> l.size()
torch.Size([5])
"""
t2 *= value_range ** 2
t3 *= value_range ** 2
t4 *= value_range ** 2
y_x, y_y = x[:, :1], y[:, :1]
# Phase congruency similarity
pc_m = torch.max(pc_x, pc_y)
s_pc = (2 * pc_x * pc_y + t1) / (pc_x ** 2 + pc_y ** 2 + t1)
# Gradient magnitude similarity
pad = kernel.size(-1) // 2
g_x = l2_norm(channel_conv(y_x, kernel, padding=pad), dims=[1])
g_y = l2_norm(channel_conv(y_y, kernel, padding=pad), dims=[1])
s_g = (2 * g_x * g_y + t2) / (g_x ** 2 + g_y ** 2 + t2)
# Chrominance similarity
s_l = s_pc * s_g
if x.size(1) == 3:
i_x, i_y = x[:, 1], y[:, 1]
q_x, q_y = x[:, 2], y[:, 2]
s_i = (2 * i_x * i_y + t3) / (i_x ** 2 + i_y ** 2 + t3)
s_q = (2 * q_x * q_y + t4) / (q_x ** 2 + q_y ** 2 + t4)
s_iq = s_i * s_q
s_iq = cx.complx(s_iq, torch.zeros_like(s_iq))
s_iq_lambda = cx.real(cx.pow(s_iq, lmbda))
s_l = s_l * s_iq_lambda
# Feature similarity
fs = (s_l * pc_m).sum(dim=(-1, -2)) / pc_m.sum(dim=(-1, -2))
return fs
@_jit
def pc_filters(
x: Tensor,
scales: int = 4,
orientations: int = 4,
wavelength: float = 6.,
factor: float = 2.,
sigma_f: float = 0.5978, # -log(0.55)
sigma_theta: float = 0.6545, # pi / (4 * 1.2)
) -> Tensor:
r"""Returns the log-Gabor filters for :func:`phase_congruency`.
Args:
x: An input tensor, :math:`(*, H, W)`.
scales: The number of scales, :math:`S_1`.
orientations: The number of orientations, :math:`S_2`.
Note:
For the remaining arguments, refer to [Kovesi1999]_.
Returns:
The filters tensor, :math:`(S_1, S_2, H, W)`.
"""
r, theta = filter_grid(x)
# Low-pass filter
lowpass = 1 / (1 + (r / 0.45) ** (2 * 15))
# Radial
radial = []
for i in range(scales):
f_0 = 1 / (wavelength * factor ** i)
lg = log_gabor(r, f_0, sigma_f)
radial.append(lg)
radial = torch.stack(radial)
# Angular
cos_theta = torch.cos(theta)
sin_theta = torch.sin(theta)
theta_j = math.pi * torch.arange(orientations).to(x) / orientations
theta_j = theta_j.reshape(orientations, 1, 1)
## Measure (theta - theta_j) in the sine/cosine domains
## to prevent wrap-around errors
delta_sin = sin_theta * theta_j.cos() - cos_theta * theta_j.sin()
delta_cos = cos_theta * theta_j.cos() + sin_theta * theta_j.sin()
delta_theta = torch.atan2(delta_sin, delta_cos)
angular = torch.exp(-delta_theta ** 2 / (2 * sigma_theta ** 2))
# Combination
filters = lowpass * radial[:, None] * angular[None, :]
return filters
@_jit
def phase_congruency(
x: Tensor,
filters: Tensor,
value_range: float = 1.,
k: float = 2.,
rescale: float = 1.7,
eps: float = 1e-8,
) -> Tensor:
r"""Returns the Phase Congruency (PC) of :math:`x`.
Args:
x: An input tensor, :math:`(N, 1, H, W)`.
filters: The frequency domain filters, :math:`(S_1, S_2, H, W)`.
value_range: The value range :math:`L` of the input (usually `1.` or `255`).
Note:
For the remaining arguments, refer to [Kovesi1999]_.
Returns:
The PC tensor, :math:`(N, H, W)`.
Example:
>>> x = torch.rand(5, 1, 256, 256)
>>> filters = pc_filters(x)
>>> pc = phase_congruency(x, filters)
>>> pc.size()
torch.Size([5, 256, 256])
"""
x = x * (255. / value_range)
# Filters
M_hat = filters
M = fft.ifft2(M_hat)
M = cx.real(torch.view_as_real(M))
# Even & odd (real and imaginary) responses
eo = fft.ifft2(fft.fft2(x[:, None]) * M_hat)
eo = torch.view_as_real(eo)
# Amplitude
A = cx.mod(eo)
# Expected E^2
A2 = A[:, 0] ** 2
median_A2, _ = A2.flatten(-2).median(dim=-1)
expect_A2 = median_A2 / math.log(2)
expect_M2_hat = (M_hat[0] ** 2).mean(dim=(-1, -2))
expect_MiMj = (M[:, None] * M[None, :]).sum(dim=(0, 1, 3, 4))
expect_E2 = expect_A2 * expect_MiMj / expect_M2_hat
# Threshold
sigma_G = expect_E2.sqrt()
mu_R = sigma_G * (math.pi / 2) ** 0.5
sigma_R = sigma_G * (2 - math.pi / 2) ** 0.5
T = mu_R + k * sigma_R
T = T / rescale # emprirical rescaling
T = T[..., None, None]
# Phase deviation
FH = eo.sum(dim=1, keepdim=True)
phi_eo = FH / (cx.mod(FH)[..., None] + eps)
E = cx.dot(eo, phi_eo) - cx.dot(eo, cx.turn(phi_eo)).abs()
E = E.sum(dim=1)
# Phase congruency
pc = (E - T).relu().sum(dim=1) / (A.sum(dim=(1, 2)) + eps)
return pc
class FSIM(nn.Module):
r"""Creates a criterion that measures the FSIM
between an input and a target.
Before applying :func:`fsim`, the input and target are converted from
RBG to Y(IQ) and downsampled by a factor :math:`\frac{\min(H, W)}{256}`.
Args:
chromatic: Whether to use the chromatic channels (IQ) or not.
downsample: Whether downsampling is enabled or not.
kernel: A gradient kernel, :math:`(2, 1, K, K)`.
If `None`, use the Scharr kernel instead.
reduction: Specifies the reduction to apply to the output:
`'none'` | `'mean'` | `'sum'`.
Note:
`**kwargs` are passed to :func:`fsim`.
Shapes:
input: :math:`(N, 3, H, W)`
target: :math:`(N, 3, H, W)`
output: :math:`(N,)` or :math:`()` depending on `reduction`
Example:
>>> criterion = FSIM().cuda()
>>> x = torch.rand(5, 3, 256, 256, requires_grad=True).cuda()
>>> y = torch.rand(5, 3, 256, 256).cuda()
>>> l = 1 - criterion(x, y)
>>> l.size()
torch.Size([])
>>> l.backward()
"""
def __init__(
self,
chromatic: bool = True,
downsample: bool = True,
kernel: Tensor = None,
reduction: str = 'mean',
**kwargs,
):
super().__init__()
if kernel is None:
kernel = gradient_kernel(scharr_kernel())
self.register_buffer('kernel', kernel)
self.register_buffer('filters', torch.zeros((0, 0, 0, 0)))
self.convert = ColorConv('RGB', 'YIQ' if chromatic else 'Y')
self.downsample = downsample
self.reduction = reduction
self.value_range = kwargs.get('value_range', 1.)
self.kwargs = kwargs
def forward(self, input: Tensor, target: Tensor) -> Tensor:
assert_type(
input, target,
device=self.kernel.device,
dim_range=(4, 4),
n_channels=3,
value_range=(0., self.value_range),
)
# Downsample
if self.downsample:
_, _, h, w = input.size()
M = round(min(h, w) / 256)
if M > 1:
input = F.avg_pool2d(input, kernel_size=M, ceil_mode=True)
target = F.avg_pool2d(target, kernel_size=M, ceil_mode=True)
# RGB to Y(IQ)
input = self.convert(input)
target = self.convert(target)
# Phase congruency
if self.filters.shape[-2:] != input.shape[-2:]:
self.filters = pc_filters(input)
pc_input = phase_congruency(input[:, :1], self.filters, self.value_range)
pc_target = phase_congruency(target[:, :1], self.filters, self.value_range)
# FSIM
l = fsim(input, target, pc_input, pc_target, kernel=self.kernel, **self.kwargs)
return reduce_tensor(l, self.reduction)
| 27.877193 | 101 | 0.560835 |
import math
import torch
import torch.fft as fft
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from .utils import _jit, assert_type, reduce_tensor
from .utils import complex as cx
from .utils.color import ColorConv
from .utils.functional import (
scharr_kernel,
gradient_kernel,
filter_grid,
log_gabor,
channel_conv,
l2_norm,
)
@_jit
def fsim(
x: Tensor,
y: Tensor,
pc_x: Tensor,
pc_y: Tensor,
kernel: Tensor,
value_range: float = 1.,
t1: float = 0.85,
t2: float = 160. / (255. ** 2),
t3: float = 200. / (255. ** 2),
t4: float = 200. / (255. ** 2),
lmbda: float = 0.03,
) -> Tensor:
t2 *= value_range ** 2
t3 *= value_range ** 2
t4 *= value_range ** 2
y_x, y_y = x[:, :1], y[:, :1]
pc_m = torch.max(pc_x, pc_y)
s_pc = (2 * pc_x * pc_y + t1) / (pc_x ** 2 + pc_y ** 2 + t1)
pad = kernel.size(-1) // 2
g_x = l2_norm(channel_conv(y_x, kernel, padding=pad), dims=[1])
g_y = l2_norm(channel_conv(y_y, kernel, padding=pad), dims=[1])
s_g = (2 * g_x * g_y + t2) / (g_x ** 2 + g_y ** 2 + t2)
s_l = s_pc * s_g
if x.size(1) == 3:
i_x, i_y = x[:, 1], y[:, 1]
q_x, q_y = x[:, 2], y[:, 2]
s_i = (2 * i_x * i_y + t3) / (i_x ** 2 + i_y ** 2 + t3)
s_q = (2 * q_x * q_y + t4) / (q_x ** 2 + q_y ** 2 + t4)
s_iq = s_i * s_q
s_iq = cx.complx(s_iq, torch.zeros_like(s_iq))
s_iq_lambda = cx.real(cx.pow(s_iq, lmbda))
s_l = s_l * s_iq_lambda
fs = (s_l * pc_m).sum(dim=(-1, -2)) / pc_m.sum(dim=(-1, -2))
return fs
@_jit
def pc_filters(
x: Tensor,
scales: int = 4,
orientations: int = 4,
wavelength: float = 6.,
factor: float = 2.,
sigma_f: float = 0.5978,
sigma_theta: float = 0.6545,
) -> Tensor:
r, theta = filter_grid(x)
lowpass = 1 / (1 + (r / 0.45) ** (2 * 15))
radial = []
for i in range(scales):
f_0 = 1 / (wavelength * factor ** i)
lg = log_gabor(r, f_0, sigma_f)
radial.append(lg)
radial = torch.stack(radial)
cos_theta = torch.cos(theta)
sin_theta = torch.sin(theta)
theta_j = math.pi * torch.arange(orientations).to(x) / orientations
theta_j = theta_j.reshape(orientations, 1, 1)
_cos = cos_theta * theta_j.cos() + sin_theta * theta_j.sin()
delta_theta = torch.atan2(delta_sin, delta_cos)
angular = torch.exp(-delta_theta ** 2 / (2 * sigma_theta ** 2))
filters = lowpass * radial[:, None] * angular[None, :]
return filters
@_jit
def phase_congruency(
x: Tensor,
filters: Tensor,
value_range: float = 1.,
k: float = 2.,
rescale: float = 1.7,
eps: float = 1e-8,
) -> Tensor:
x = x * (255. / value_range)
M_hat = filters
M = fft.ifft2(M_hat)
M = cx.real(torch.view_as_real(M))
eo = fft.ifft2(fft.fft2(x[:, None]) * M_hat)
eo = torch.view_as_real(eo)
A = cx.mod(eo)
A2 = A[:, 0] ** 2
median_A2, _ = A2.flatten(-2).median(dim=-1)
expect_A2 = median_A2 / math.log(2)
expect_M2_hat = (M_hat[0] ** 2).mean(dim=(-1, -2))
expect_MiMj = (M[:, None] * M[None, :]).sum(dim=(0, 1, 3, 4))
expect_E2 = expect_A2 * expect_MiMj / expect_M2_hat
sigma_G = expect_E2.sqrt()
mu_R = sigma_G * (math.pi / 2) ** 0.5
sigma_R = sigma_G * (2 - math.pi / 2) ** 0.5
T = mu_R + k * sigma_R
T = T / rescale
T = T[..., None, None]
FH = eo.sum(dim=1, keepdim=True)
phi_eo = FH / (cx.mod(FH)[..., None] + eps)
E = cx.dot(eo, phi_eo) - cx.dot(eo, cx.turn(phi_eo)).abs()
E = E.sum(dim=1)
pc = (E - T).relu().sum(dim=1) / (A.sum(dim=(1, 2)) + eps)
return pc
class FSIM(nn.Module):
def __init__(
self,
chromatic: bool = True,
downsample: bool = True,
kernel: Tensor = None,
reduction: str = 'mean',
**kwargs,
):
super().__init__()
if kernel is None:
kernel = gradient_kernel(scharr_kernel())
self.register_buffer('kernel', kernel)
self.register_buffer('filters', torch.zeros((0, 0, 0, 0)))
self.convert = ColorConv('RGB', 'YIQ' if chromatic else 'Y')
self.downsample = downsample
self.reduction = reduction
self.value_range = kwargs.get('value_range', 1.)
self.kwargs = kwargs
def forward(self, input: Tensor, target: Tensor) -> Tensor:
assert_type(
input, target,
device=self.kernel.device,
dim_range=(4, 4),
n_channels=3,
value_range=(0., self.value_range),
)
if self.downsample:
_, _, h, w = input.size()
M = round(min(h, w) / 256)
if M > 1:
input = F.avg_pool2d(input, kernel_size=M, ceil_mode=True)
target = F.avg_pool2d(target, kernel_size=M, ceil_mode=True)
input = self.convert(input)
target = self.convert(target)
if self.filters.shape[-2:] != input.shape[-2:]:
self.filters = pc_filters(input)
pc_input = phase_congruency(input[:, :1], self.filters, self.value_range)
pc_target = phase_congruency(target[:, :1], self.filters, self.value_range)
l = fsim(input, target, pc_input, pc_target, kernel=self.kernel, **self.kwargs)
return reduce_tensor(l, self.reduction)
| true | true |
1c476c016e38e87c7a75eeb62acb50db4e2d2883 | 1,623 | py | Python | tests/test_exceptions.py | dobisel/yhttp | 4396c03905d71b801a92dead3504cc3ef7d98d79 | [
"MIT"
] | 10 | 2020-01-30T16:23:28.000Z | 2021-12-12T23:24:37.000Z | tests/test_exceptions.py | dobisel/yhttp | 4396c03905d71b801a92dead3504cc3ef7d98d79 | [
"MIT"
] | 1 | 2021-07-12T21:07:06.000Z | 2021-08-08T10:42:27.000Z | tests/test_exceptions.py | dobisel/yhttp | 4396c03905d71b801a92dead3504cc3ef7d98d79 | [
"MIT"
] | 1 | 2020-01-26T13:28:35.000Z | 2020-01-26T13:28:35.000Z | import pytest
from bddrest import status, response, when
from yhttp import statuses
def test_httpstatus(app, Given):
@app.route()
def get(req):
raise statuses.badrequest()
@app.route('/foo')
def get(req):
return statuses.badrequest()
with Given():
assert status == '400 Bad Request'
assert response.text.startswith('400 Bad Request\r\n')
assert response.headers['content-type'] == 'text/plain; charset=utf-8'
app.settings.debug = False
when()
assert status == '400 Bad Request'
assert response.text == '400 Bad Request'
assert response.headers['content-type'] == 'text/plain; charset=utf-8'
when('/foo')
assert status == 400
def test_unhandledexception(app, Given):
class MyException(Exception):
pass
@app.route()
def get(req):
raise MyException()
with pytest.raises(MyException), Given():
pass
def test_redirect(app, Given):
@app.route()
def get(req):
raise statuses.found('http://example.com')
with Given():
assert status == 302
assert response.headers['location'] == 'http://example.com'
assert response.text == ''
def test_modified(app, Given):
@app.route()
def get(req):
raise statuses.notmodified()
with Given():
assert status == 304
assert response.text == ''
def test_nocontent(app, Given):
@app.route()
def remove(req):
raise statuses.nocontent()
with Given(verb='REMOVE'):
assert status == 204
assert response == ''
| 21.077922 | 78 | 0.601356 | import pytest
from bddrest import status, response, when
from yhttp import statuses
def test_httpstatus(app, Given):
@app.route()
def get(req):
raise statuses.badrequest()
@app.route('/foo')
def get(req):
return statuses.badrequest()
with Given():
assert status == '400 Bad Request'
assert response.text.startswith('400 Bad Request\r\n')
assert response.headers['content-type'] == 'text/plain; charset=utf-8'
app.settings.debug = False
when()
assert status == '400 Bad Request'
assert response.text == '400 Bad Request'
assert response.headers['content-type'] == 'text/plain; charset=utf-8'
when('/foo')
assert status == 400
def test_unhandledexception(app, Given):
class MyException(Exception):
pass
@app.route()
def get(req):
raise MyException()
with pytest.raises(MyException), Given():
pass
def test_redirect(app, Given):
@app.route()
def get(req):
raise statuses.found('http://example.com')
with Given():
assert status == 302
assert response.headers['location'] == 'http://example.com'
assert response.text == ''
def test_modified(app, Given):
@app.route()
def get(req):
raise statuses.notmodified()
with Given():
assert status == 304
assert response.text == ''
def test_nocontent(app, Given):
@app.route()
def remove(req):
raise statuses.nocontent()
with Given(verb='REMOVE'):
assert status == 204
assert response == ''
| true | true |
1c476cdd7fb60214bfeb7c01ad0034abc05bd191 | 3,585 | py | Python | plots/thresholds/vit.py | drunkcoding/model-inference | 02d2240bc7052fa32223a80fa63625fe681db102 | [
"MIT"
] | 1 | 2021-11-15T19:07:13.000Z | 2021-11-15T19:07:13.000Z | plots/thresholds/vit.py | drunkcoding/model-inference | 02d2240bc7052fa32223a80fa63625fe681db102 | [
"MIT"
] | null | null | null | plots/thresholds/vit.py | drunkcoding/model-inference | 02d2240bc7052fa32223a80fa63625fe681db102 | [
"MIT"
] | null | null | null | from dataclasses import dataclass, field
from functools import partial
import itertools
import json
import logging
import os
import time
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score
import torch
from transformers import AutoModelForImageClassification, ViTForImageClassification
from torchvision.datasets import ImageNet
import datasets
from hfutils.preprocess import (
split_train_test,
vit_collate_fn,
ViTFeatureExtractorTransforms,
)
import pandas as pd
from torch.utils.data import DataLoader
from tqdm import tqdm
import numpy as np
from hfutils.logger import Logger
from hfutils.pipe.vit import ViTPyTorchPipeForImageClassification
from hfutils.calibration import temperature_scale
import sys
sys.path.append(".")
from plots.thresholds.utils import *
home_dir = "/mnt/raid0nvme1"
dataset_path = os.path.join(home_dir, "ImageNet")
model_keys = [
"XS",
"S",
"M",
"L",
]
model_names = [
"vit-tiny-patch16-224",
"vit-small-patch16-224",
"vit-base-patch16-224",
"vit-large-patch16-224",
]
device_map = [
"cuda:4",
"cuda:4",
"cuda:4",
"cuda:4",
]
model_paths = [
f"{home_dir}/HuggingFace/WinKawaks/vit-tiny-patch16-224",
f"{home_dir}/HuggingFace/WinKawaks/vit-small-patch16-224",
f"{home_dir}/HuggingFace/google/vit-base-patch16-224",
f"{home_dir}/HuggingFace/google/vit-large-patch16-224",
]
model_paths = dict(zip(model_keys, model_paths))
model_names = dict(zip(model_keys, model_names))
model_device = dict(zip(model_keys, device_map))
def model_inference(model, batch, temperature=None, device="cuda:0"):
pixel_values = batch["pixel_values"].to(device)
logits = model((pixel_values,))
if temperature is not None:
logits = temperature_scale(logits, temperature)
return logits
with open("tests/kernel_duration/latency.json", "r") as fp:
model_latency = json.load(fp)
with open("repository/repo_vit/meta.json", "r") as fp:
model_meta = json.load(fp)
dataset = ImageNet(
dataset_path,
split="train",
transform=ViTFeatureExtractorTransforms(model_paths[model_keys[0]], split="val"),
)
dataset, _ = split_train_test(dataset, 0.98)
num_labels = len(dataset)
dataloader = DataLoader(
dataset, shuffle=True, collate_fn=vit_collate_fn, batch_size=32, drop_last=True,
)
models = load_models(
model_keys,
model_paths,
model_device,
ViTForImageClassification,
ViTPyTorchPipeForImageClassification,
)
n_models = len(model_keys)
model_outputs = dict(zip(model_keys, [list() for _ in range(n_models)]))
m = torch.nn.Softmax(dim=-1)
labels = []
for batch in tqdm(dataloader, desc="Collect Train Data"):
label = batch["labels"]
for i, key in enumerate(model_keys):
logits = model_inference(
models[key],
batch,
device=model_device[key],
temperature=model_meta[model_names[key]]["temperature"],
)
model_outputs[key].append(logits)
labels.append(label)
model_probs, model_ans, model_outputs, labels = postprocessing_inference(
model_keys, model_outputs, labels, m
)
all_thresholds = list(
itertools.product(np.linspace(0, 1, endpoint=True, num=100), repeat=n_models - 1)
)
max_size = 100000
if len(all_thresholds) > max_size:
rnd_idx = np.random.randint(0, len(all_thresholds), max_size)
all_thresholds = [all_thresholds[i] for i in rnd_idx]
profile_thresholds(
model_keys,
model_probs,
model_ans,
model_latency,
model_names,
all_thresholds,
"vit",
)
| 24.724138 | 85 | 0.72106 | from dataclasses import dataclass, field
from functools import partial
import itertools
import json
import logging
import os
import time
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score
import torch
from transformers import AutoModelForImageClassification, ViTForImageClassification
from torchvision.datasets import ImageNet
import datasets
from hfutils.preprocess import (
split_train_test,
vit_collate_fn,
ViTFeatureExtractorTransforms,
)
import pandas as pd
from torch.utils.data import DataLoader
from tqdm import tqdm
import numpy as np
from hfutils.logger import Logger
from hfutils.pipe.vit import ViTPyTorchPipeForImageClassification
from hfutils.calibration import temperature_scale
import sys
sys.path.append(".")
from plots.thresholds.utils import *
home_dir = "/mnt/raid0nvme1"
dataset_path = os.path.join(home_dir, "ImageNet")
model_keys = [
"XS",
"S",
"M",
"L",
]
model_names = [
"vit-tiny-patch16-224",
"vit-small-patch16-224",
"vit-base-patch16-224",
"vit-large-patch16-224",
]
device_map = [
"cuda:4",
"cuda:4",
"cuda:4",
"cuda:4",
]
model_paths = [
f"{home_dir}/HuggingFace/WinKawaks/vit-tiny-patch16-224",
f"{home_dir}/HuggingFace/WinKawaks/vit-small-patch16-224",
f"{home_dir}/HuggingFace/google/vit-base-patch16-224",
f"{home_dir}/HuggingFace/google/vit-large-patch16-224",
]
model_paths = dict(zip(model_keys, model_paths))
model_names = dict(zip(model_keys, model_names))
model_device = dict(zip(model_keys, device_map))
def model_inference(model, batch, temperature=None, device="cuda:0"):
pixel_values = batch["pixel_values"].to(device)
logits = model((pixel_values,))
if temperature is not None:
logits = temperature_scale(logits, temperature)
return logits
with open("tests/kernel_duration/latency.json", "r") as fp:
model_latency = json.load(fp)
with open("repository/repo_vit/meta.json", "r") as fp:
model_meta = json.load(fp)
dataset = ImageNet(
dataset_path,
split="train",
transform=ViTFeatureExtractorTransforms(model_paths[model_keys[0]], split="val"),
)
dataset, _ = split_train_test(dataset, 0.98)
num_labels = len(dataset)
dataloader = DataLoader(
dataset, shuffle=True, collate_fn=vit_collate_fn, batch_size=32, drop_last=True,
)
models = load_models(
model_keys,
model_paths,
model_device,
ViTForImageClassification,
ViTPyTorchPipeForImageClassification,
)
n_models = len(model_keys)
model_outputs = dict(zip(model_keys, [list() for _ in range(n_models)]))
m = torch.nn.Softmax(dim=-1)
labels = []
for batch in tqdm(dataloader, desc="Collect Train Data"):
label = batch["labels"]
for i, key in enumerate(model_keys):
logits = model_inference(
models[key],
batch,
device=model_device[key],
temperature=model_meta[model_names[key]]["temperature"],
)
model_outputs[key].append(logits)
labels.append(label)
model_probs, model_ans, model_outputs, labels = postprocessing_inference(
model_keys, model_outputs, labels, m
)
all_thresholds = list(
itertools.product(np.linspace(0, 1, endpoint=True, num=100), repeat=n_models - 1)
)
max_size = 100000
if len(all_thresholds) > max_size:
rnd_idx = np.random.randint(0, len(all_thresholds), max_size)
all_thresholds = [all_thresholds[i] for i in rnd_idx]
profile_thresholds(
model_keys,
model_probs,
model_ans,
model_latency,
model_names,
all_thresholds,
"vit",
)
| true | true |
1c476d41ac879c082652e00bd7f7e69e609c5a8a | 14,750 | py | Python | game/gamesrc/objects/world/quests.py | abbacode/avaloria | 02e1805ac6e74543c96408b7951429f94bc140ca | [
"ClArtistic"
] | null | null | null | game/gamesrc/objects/world/quests.py | abbacode/avaloria | 02e1805ac6e74543c96408b7951429f94bc140ca | [
"ClArtistic"
] | null | null | null | game/gamesrc/objects/world/quests.py | abbacode/avaloria | 02e1805ac6e74543c96408b7951429f94bc140ca | [
"ClArtistic"
] | null | null | null | import random
from prettytable import PrettyTable
from src.utils import create, utils
from ev import Object
from game.gamesrc.objects import copyreader
class QuestManager(Object):
"""
This object is attached to the character and manages all quests received.
"""
def at_object_creation(self):
"""
Set some typical attributes from the management object.
"""
self.db.active_quests = {}
self.db.completed_quests = {}
self.db.to_remove_in_active = {}
self.db.character = None
self.db.is_equipped = False
def add_quest(self, quest_to_add):
active_quests = self.db.active_quests
active_quests['%s' % quest_to_add.name] = quest_to_add
self.db.active_quests = active_quests
def complete_quest(self, quest_to_remove):
character = self.db.character
character.msg("{yYou have completed the quest: %s!{n" % quest_to_remove.name)
if quest_to_remove.db.exp_reward is not None:
character.award_exp(quest_to_remove.db.exp_reward)
if quest_to_remove.db.gold_reward is not None:
character.award_gold(quest_to_remove.db.gold_reward)
if quest_to_remove.db.loot_reward is not None:
for item in quest_to_remove.db.loot_reward:
item.move_to(character, quiet=False)
if quest_to_remove.db.faction_reward is not None:
print "QuestManager->complete_quest: trying deity faction."
if not hasattr(quest_to_remove.db.faction, 'lower'):
print "QuestManager->complete_quest: trying faction_indexing"
if character.db.attributes['deity'] in "an'karith":
faction_index = quest_to_remove.db.faction.index("karith")
elif character.db.attributes['deity'] in "green warden":
faction_index = quest_to_remove.db.faction.index("warden")
else:
faction_index = quest_to_remove.db.faction.index(character.db.attributes['deity'])
faction = quest_to_remove.db.faction[faction_index]
else:
faction = quest_to_remove.db.faction
if "an'karith" in faction:
faction = 'karith'
elif "green warden" in faction:
faction = "warden"
factions = character.db.factions
factions[faction] += quest_to_remove.db.faction_reward
character.db.factions = factions
quest_to_remove.db.completed = True
completed_quests = self.db.completed_quests
completed_quests[quest_to_remove.name] = quest_to_remove
self.db.to_remove_in_active[quest_to_remove.name] = quest_to_remove
self.db.completed_quests = completed_quests
def complete_quest_objective(self, quest, objective):
character = self.db.character
character.msg("{yYou have completed a quest objective for %s!{n" % quest.name)
quest.complete_objective(objective)
def cleanup_completed_quests(self):
to_remove = self.db.to_remove_in_active
if len(self.db.to_remove_in_active) > 0:
for quest in self.db.to_remove_in_active:
print "attempting to remove the quest from active quests"
self.remove_quest(self.db.to_remove_in_active[quest])
to_remove = {}
self.db.to_remove_in_active = to_remove
def remove_quest(self, quest_to_remove):
active_quests = self.db.active_quests
del active_quests[quest_to_remove.name]
self.db.active_quests = active_quests
def check_quest_flags(self, mob=None, item=None):
character = self.db.character
print character.db.lair.db.structure_manager_id
structure_manager = self.search(character.db.lair.db.structure_manager_id, location=character.db.lair, global_search=False)
active_quests = self.db.active_quests
active_quests_temp = active_quests
print "QuestManager.check_quest_flags: Checking active quests"
for quest in active_quests_temp:
quest_obj = active_quests[quest]
quest_objectives = quest_obj.db.objectives
print "QuestManager.check_quest_flags: Checking objectives for %s" % quest_obj.name
for objective in quest_objectives:
print "QuestManager.check_quest_flags: Checking %s" % objective
if quest_objectives[objective]['completed']:
continue
if mob is not None:
if 'kill_%s' % mob.db.mob_type in quest_objectives[objective]['type']:
if 'kill_%s' % mob.db.mob_type in mob.aliases:
quest_obj.tick_counter_objective(objective, caller=self.db.character)
elif '%s' % quest_objectives[objective]['type'] in mob.aliases:
quest_obj.tick_counter_objective(objective, caller=self.db.character)
elif 'kill_%s' % mob.name.lower() in quest_objectives[objective]['type']:
quest_obj.tick_counter_objective(objective, caller=self.db.character)
elif 'boss_mob' in mob.aliases and 'kill_boss' in quest_objectives[objective]['type']:
quest_obj.tick_counter_objective(objective, caller=self.db.character)
elif 'kill_%s' % mob.db.deity in quest_objectives[objective]['type']:
quest_obj.tick_counter_objective(objective, caller=self.db.character)
elif 'kill_%s' % mob.location.db.dungeon_type in quest_objectives[objective]['type']:
quest_obj.tick_counter_objective(objective, caller=self.db.character)
elif 'kill' in quest_objectives[objective]['type']:
if 'kill' in mob.aliases and 'counter' in quest_objectives[objective].keys():
quest_obj.tick_counter_objective(objective, caller=self.db.character)
if item is not None:
if 'gather_%s' % item.db.type in quest_objectives[objective]['type']:
quest_obj.tick_counter_objective(objective, caller=self.db.character)
elif 'gather_%s' % item.name.lower() in quest_objectives[objective]['type']:
quest_obj.tick_counter_objective(objective, caller=self.db.character)
elif 'loot_rare_item' in quest_objectives[objective]['type'] and item.db.lootset == 'rare':
quest_obj.tick_counter_objective(objective, caller=self.db.character)
elif 'build' in quest_objectives[objective]['type']:
if 'gold_mine' in quest_objectives[objective]['type']:
if "Gold Mine" in structure_manager.db.already_built:
quest_obj.tick_counter_objective(objective, caller=character)
elif 'training_ground' in quest_objectives[objective]['type']:
if 'Training Grounds' in structure_manager.db.already_built:
quest_obj.tick_counter_objective(objective, caller=character)
elif 'defenses' in quest_objectives[objective]['type']:
if 'Defenses' in structure_manager.db.already_built:
quest_obj.tick_counter_objective(objective, caller=character)
elif 'level_structure' in quest_objectives[objective]['type']:
for struct in structure_manager.db.structures:
if structure_manager.db.structures[struct].db.level > 1:
quest_obj.tick_counter_objective(objective, caller=character)
break
elif 'use' in quest_objectives[objective]['type']:
command = quest_objectives[objective]['type'].split('_')[1]
print command, character.last_cmd
try:
if character.last_cmd.strip() == command.strip():
quest_obj.tick_counter_objective(objective, caller=character)
except AttributeError:
return
self.cleanup_completed_quests()
# def check_prereqs(self):
def find_quest(self, quest, completed=False):
active_quests = self.db.active_quests
completed_quests = self.db.completed_quests
if completed:
if quest in completed_quests:
quest = completed_quests[quest]
return quest
else:
return None
if quest in active_quests:
quest = active_quests[quest]
return quest
else:
return None
def quest_log_short_display(self, caller):
active_quests = self.db.active_quests
if len(active_quests) < 1:
caller.msg("You have no active quests currently.")
return
table = PrettyTable()
table._set_field_names(["Name", "Description", "Level", "Objectives"])
for quest in active_quests:
obj = active_quests[quest]
objective_string = obj.format_objectives()
table.add_row(["%s" % obj.name, "%s" % obj.db.short_description, "%s" % obj.db.quest_level, "%s" % objective_string])
msg = table.get_string()
caller.msg(msg)
caller.msg("For more detailed information, try help <questname>")
def completed_quests_view(self, caller):
completed_quests = self.db.completed_quests
completed_number = len(completed_quests)
if len(completed_quests) < 1:
caller.msg("You have no completed quests.")
return
titles = '{{c{0:<25} {1:<30} {2}{{n'.format('Name', 'Description', 'Level')
caller.msg(titles)
caller.msg('{c--------------------------------------------------------------------{n')
m = ""
for quest in completed_quests:
quest_obj = completed_quests[quest]
m += '{{C{0:<25}{{n {1:<30} {2}\n{{n'.format(quest_obj.name, quest_obj.db.short_description, quest_obj.db.quest_level)
caller.msg(m)
caller.msg('{c--------------------------------------------------------------------{n')
caller.msg("{CCompleted Quests:{n %s" % completed_number)
def quest_objectives_display(self, caller, quest):
caller.msg("%s" % quest.title())
quest = self.find_quest(quest.title())
if quest is None:
caller.msg("You are not on any quest named: %s" % quest)
return
else:
titles = '{0:<25} {1:<10}'.format('Short Description', 'Progress')
caller.msg(titles)
caller.msg("{c------------------------------------------------------------------")
objectives_message = quest.format_objectives()
caller.msg(objectives_message)
caller.msg("{c------------------------------------------------------------------")
class Quest(Object):
"""
Typical quest object.
"""
def at_object_creation(self):
self.db.level_requirement = 1
self.db.prereq = None
self.db.repeatable = False
self.db.gold_reward = 10
self.db.exp_reward = 10
self.db.loot_reward = []
self.db.faction_reward = 10
self.db.faction = None
self.db.objectives = {}
self.db.quest_level = 1
self.db.quest_type = None
self.db.long_description = ""
self.db.short_description = "Something short, and sweet"
self.db.exclusions = None
self.db.completed = False
def set_quest_aliases(self):
if 'kill' in self.db.quest_type:
self.aliases = ['kill']
elif 'gather' in self.db.quest_type:
self.aliases = ['gather']
elif 'fedex' in self.db.quest_type:
self.aliases = ['fedex']
elif 'explore' in self.db.quest_type:
self.aliases = ['explore']
def add_objective(self, objectives_dict):
objectives = self.db.objectives
objectives[objectives_dict['objective_name']] = objectives_dict
self.db.objectives = objectives
def complete_objective(self, objectives, objective, caller):
objectives[objective]['completed'] = True
caller.msg("{yYou have completed a quest objective!{n")
self.check_objectives(objectives,caller)
def tick_counter_objective(self, objective, caller):
objectives = self.db.objectives
objectives[objective]['counter'] = objectives[objective]['counter'] + 1
caller.msg("{yQuest objective advanced! %s: %s/%s{n" % (objectives[objective]['objective_name'], objectives[objective]['counter'], objectives[objective]['threshold']))
if objectives[objective]['counter'] > objectives[objective]['threshold']:
objectives[objective]['counter'] = objectives[objective]['threshold']
if objectives[objective]['counter'] >= objectives[objective]['threshold']:
self.complete_objective(objectives, objective, caller)
self.db.objectives = objectives
def check_objectives(self, objectives, caller):
quest_log = caller.db.quest_log
is_false = False
for objective in objectives:
if objectives[objective]['completed'] is False:
is_false = True
return
if is_false is not True:
self.db.completed = True
quest_log.complete_quest(self)
def set_description(self, copy_file):
self.db.long_description = copyreader.read_file(copy_file)
def add_help_entry(self):
entry = create.create_help_entry(self.name, self.db.long_description, category="Quests", locks="view:onquest(%s)" % self.name)
def format_objectives(self):
objectives = self.db.objectives
m = ""
for objective in objectives:
if len(objectives) < 2:
m += '{0:<30} {1}/{2}'.format(objectives[objective]['objective_name'], objectives[objective]['counter'], objectives[objective]['threshold'])
else:
m += '{0:<30} {1}/{2}\n'.format(objectives[objective]['objective_name'], objectives[objective]['counter'], objectives[objective]['threshold'])
return m.rstrip('\n')
| 48.360656 | 175 | 0.592746 | import random
from prettytable import PrettyTable
from src.utils import create, utils
from ev import Object
from game.gamesrc.objects import copyreader
class QuestManager(Object):
"""
This object is attached to the character and manages all quests received.
"""
def at_object_creation(self):
"""
Set some typical attributes from the management object.
"""
self.db.active_quests = {}
self.db.completed_quests = {}
self.db.to_remove_in_active = {}
self.db.character = None
self.db.is_equipped = False
def add_quest(self, quest_to_add):
active_quests = self.db.active_quests
active_quests['%s' % quest_to_add.name] = quest_to_add
self.db.active_quests = active_quests
def complete_quest(self, quest_to_remove):
character = self.db.character
character.msg("{yYou have completed the quest: %s!{n" % quest_to_remove.name)
if quest_to_remove.db.exp_reward is not None:
character.award_exp(quest_to_remove.db.exp_reward)
if quest_to_remove.db.gold_reward is not None:
character.award_gold(quest_to_remove.db.gold_reward)
if quest_to_remove.db.loot_reward is not None:
for item in quest_to_remove.db.loot_reward:
item.move_to(character, quiet=False)
if quest_to_remove.db.faction_reward is not None:
print "QuestManager->complete_quest: trying deity faction."
if not hasattr(quest_to_remove.db.faction, 'lower'):
print "QuestManager->complete_quest: trying faction_indexing"
if character.db.attributes['deity'] in "an'karith":
faction_index = quest_to_remove.db.faction.index("karith")
elif character.db.attributes['deity'] in "green warden":
faction_index = quest_to_remove.db.faction.index("warden")
else:
faction_index = quest_to_remove.db.faction.index(character.db.attributes['deity'])
faction = quest_to_remove.db.faction[faction_index]
else:
faction = quest_to_remove.db.faction
if "an'karith" in faction:
faction = 'karith'
elif "green warden" in faction:
faction = "warden"
factions = character.db.factions
factions[faction] += quest_to_remove.db.faction_reward
character.db.factions = factions
quest_to_remove.db.completed = True
completed_quests = self.db.completed_quests
completed_quests[quest_to_remove.name] = quest_to_remove
self.db.to_remove_in_active[quest_to_remove.name] = quest_to_remove
self.db.completed_quests = completed_quests
def complete_quest_objective(self, quest, objective):
character = self.db.character
character.msg("{yYou have completed a quest objective for %s!{n" % quest.name)
quest.complete_objective(objective)
def cleanup_completed_quests(self):
to_remove = self.db.to_remove_in_active
if len(self.db.to_remove_in_active) > 0:
for quest in self.db.to_remove_in_active:
print "attempting to remove the quest from active quests"
self.remove_quest(self.db.to_remove_in_active[quest])
to_remove = {}
self.db.to_remove_in_active = to_remove
def remove_quest(self, quest_to_remove):
active_quests = self.db.active_quests
del active_quests[quest_to_remove.name]
self.db.active_quests = active_quests
def check_quest_flags(self, mob=None, item=None):
character = self.db.character
print character.db.lair.db.structure_manager_id
structure_manager = self.search(character.db.lair.db.structure_manager_id, location=character.db.lair, global_search=False)
active_quests = self.db.active_quests
active_quests_temp = active_quests
print "QuestManager.check_quest_flags: Checking active quests"
for quest in active_quests_temp:
quest_obj = active_quests[quest]
quest_objectives = quest_obj.db.objectives
print "QuestManager.check_quest_flags: Checking objectives for %s" % quest_obj.name
for objective in quest_objectives:
print "QuestManager.check_quest_flags: Checking %s" % objective
if quest_objectives[objective]['completed']:
continue
if mob is not None:
if 'kill_%s' % mob.db.mob_type in quest_objectives[objective]['type']:
if 'kill_%s' % mob.db.mob_type in mob.aliases:
quest_obj.tick_counter_objective(objective, caller=self.db.character)
elif '%s' % quest_objectives[objective]['type'] in mob.aliases:
quest_obj.tick_counter_objective(objective, caller=self.db.character)
elif 'kill_%s' % mob.name.lower() in quest_objectives[objective]['type']:
quest_obj.tick_counter_objective(objective, caller=self.db.character)
elif 'boss_mob' in mob.aliases and 'kill_boss' in quest_objectives[objective]['type']:
quest_obj.tick_counter_objective(objective, caller=self.db.character)
elif 'kill_%s' % mob.db.deity in quest_objectives[objective]['type']:
quest_obj.tick_counter_objective(objective, caller=self.db.character)
elif 'kill_%s' % mob.location.db.dungeon_type in quest_objectives[objective]['type']:
quest_obj.tick_counter_objective(objective, caller=self.db.character)
elif 'kill' in quest_objectives[objective]['type']:
if 'kill' in mob.aliases and 'counter' in quest_objectives[objective].keys():
quest_obj.tick_counter_objective(objective, caller=self.db.character)
if item is not None:
if 'gather_%s' % item.db.type in quest_objectives[objective]['type']:
quest_obj.tick_counter_objective(objective, caller=self.db.character)
elif 'gather_%s' % item.name.lower() in quest_objectives[objective]['type']:
quest_obj.tick_counter_objective(objective, caller=self.db.character)
elif 'loot_rare_item' in quest_objectives[objective]['type'] and item.db.lootset == 'rare':
quest_obj.tick_counter_objective(objective, caller=self.db.character)
elif 'build' in quest_objectives[objective]['type']:
if 'gold_mine' in quest_objectives[objective]['type']:
if "Gold Mine" in structure_manager.db.already_built:
quest_obj.tick_counter_objective(objective, caller=character)
elif 'training_ground' in quest_objectives[objective]['type']:
if 'Training Grounds' in structure_manager.db.already_built:
quest_obj.tick_counter_objective(objective, caller=character)
elif 'defenses' in quest_objectives[objective]['type']:
if 'Defenses' in structure_manager.db.already_built:
quest_obj.tick_counter_objective(objective, caller=character)
elif 'level_structure' in quest_objectives[objective]['type']:
for struct in structure_manager.db.structures:
if structure_manager.db.structures[struct].db.level > 1:
quest_obj.tick_counter_objective(objective, caller=character)
break
elif 'use' in quest_objectives[objective]['type']:
command = quest_objectives[objective]['type'].split('_')[1]
print command, character.last_cmd
try:
if character.last_cmd.strip() == command.strip():
quest_obj.tick_counter_objective(objective, caller=character)
except AttributeError:
return
self.cleanup_completed_quests()
def find_quest(self, quest, completed=False):
active_quests = self.db.active_quests
completed_quests = self.db.completed_quests
if completed:
if quest in completed_quests:
quest = completed_quests[quest]
return quest
else:
return None
if quest in active_quests:
quest = active_quests[quest]
return quest
else:
return None
def quest_log_short_display(self, caller):
active_quests = self.db.active_quests
if len(active_quests) < 1:
caller.msg("You have no active quests currently.")
return
table = PrettyTable()
table._set_field_names(["Name", "Description", "Level", "Objectives"])
for quest in active_quests:
obj = active_quests[quest]
objective_string = obj.format_objectives()
table.add_row(["%s" % obj.name, "%s" % obj.db.short_description, "%s" % obj.db.quest_level, "%s" % objective_string])
msg = table.get_string()
caller.msg(msg)
caller.msg("For more detailed information, try help <questname>")
def completed_quests_view(self, caller):
completed_quests = self.db.completed_quests
completed_number = len(completed_quests)
if len(completed_quests) < 1:
caller.msg("You have no completed quests.")
return
titles = '{{c{0:<25} {1:<30} {2}{{n'.format('Name', 'Description', 'Level')
caller.msg(titles)
caller.msg('{c--------------------------------------------------------------------{n')
m = ""
for quest in completed_quests:
quest_obj = completed_quests[quest]
m += '{{C{0:<25}{{n {1:<30} {2}\n{{n'.format(quest_obj.name, quest_obj.db.short_description, quest_obj.db.quest_level)
caller.msg(m)
caller.msg('{c--------------------------------------------------------------------{n')
caller.msg("{CCompleted Quests:{n %s" % completed_number)
def quest_objectives_display(self, caller, quest):
caller.msg("%s" % quest.title())
quest = self.find_quest(quest.title())
if quest is None:
caller.msg("You are not on any quest named: %s" % quest)
return
else:
titles = '{0:<25} {1:<10}'.format('Short Description', 'Progress')
caller.msg(titles)
caller.msg("{c------------------------------------------------------------------")
objectives_message = quest.format_objectives()
caller.msg(objectives_message)
caller.msg("{c------------------------------------------------------------------")
class Quest(Object):
"""
Typical quest object.
"""
def at_object_creation(self):
self.db.level_requirement = 1
self.db.prereq = None
self.db.repeatable = False
self.db.gold_reward = 10
self.db.exp_reward = 10
self.db.loot_reward = []
self.db.faction_reward = 10
self.db.faction = None
self.db.objectives = {}
self.db.quest_level = 1
self.db.quest_type = None
self.db.long_description = ""
self.db.short_description = "Something short, and sweet"
self.db.exclusions = None
self.db.completed = False
def set_quest_aliases(self):
if 'kill' in self.db.quest_type:
self.aliases = ['kill']
elif 'gather' in self.db.quest_type:
self.aliases = ['gather']
elif 'fedex' in self.db.quest_type:
self.aliases = ['fedex']
elif 'explore' in self.db.quest_type:
self.aliases = ['explore']
def add_objective(self, objectives_dict):
objectives = self.db.objectives
objectives[objectives_dict['objective_name']] = objectives_dict
self.db.objectives = objectives
def complete_objective(self, objectives, objective, caller):
objectives[objective]['completed'] = True
caller.msg("{yYou have completed a quest objective!{n")
self.check_objectives(objectives,caller)
def tick_counter_objective(self, objective, caller):
objectives = self.db.objectives
objectives[objective]['counter'] = objectives[objective]['counter'] + 1
caller.msg("{yQuest objective advanced! %s: %s/%s{n" % (objectives[objective]['objective_name'], objectives[objective]['counter'], objectives[objective]['threshold']))
if objectives[objective]['counter'] > objectives[objective]['threshold']:
objectives[objective]['counter'] = objectives[objective]['threshold']
if objectives[objective]['counter'] >= objectives[objective]['threshold']:
self.complete_objective(objectives, objective, caller)
self.db.objectives = objectives
def check_objectives(self, objectives, caller):
quest_log = caller.db.quest_log
is_false = False
for objective in objectives:
if objectives[objective]['completed'] is False:
is_false = True
return
if is_false is not True:
self.db.completed = True
quest_log.complete_quest(self)
def set_description(self, copy_file):
self.db.long_description = copyreader.read_file(copy_file)
def add_help_entry(self):
entry = create.create_help_entry(self.name, self.db.long_description, category="Quests", locks="view:onquest(%s)" % self.name)
def format_objectives(self):
objectives = self.db.objectives
m = ""
for objective in objectives:
if len(objectives) < 2:
m += '{0:<30} {1}/{2}'.format(objectives[objective]['objective_name'], objectives[objective]['counter'], objectives[objective]['threshold'])
else:
m += '{0:<30} {1}/{2}\n'.format(objectives[objective]['objective_name'], objectives[objective]['counter'], objectives[objective]['threshold'])
return m.rstrip('\n')
| false | true |
1c476e3ec222661def123f38fb26ec5839432659 | 1,087 | py | Python | src/utils/etc.py | slowwavesleep/NeuralMorphemeSegmenter | b32f47ecc380262755bf436cf793f35901919f0f | [
"MIT"
] | null | null | null | src/utils/etc.py | slowwavesleep/NeuralMorphemeSegmenter | b32f47ecc380262755bf436cf793f35901919f0f | [
"MIT"
] | null | null | null | src/utils/etc.py | slowwavesleep/NeuralMorphemeSegmenter | b32f47ecc380262755bf436cf793f35901919f0f | [
"MIT"
] | null | null | null | import itertools
import json
from typing import Iterable, List, Tuple
def remove_pads(sequences: Iterable[Iterable[int]],
true_lengths: Iterable[int],
*,
pre_pad: bool = False) -> List[List[int]]:
assert len(sequences) == len(true_lengths)
output = []
for element, true_length in zip(sequences, true_lengths):
if pre_pad:
element = element[max(0, len(element) - true_length):]
else:
element = element[:true_length]
output.append(list(element))
return output
def flatten_list(list_to_flatten: List[list]) -> list:
return list(itertools.chain(*list_to_flatten))
def read_experiment_data(path: str) -> Tuple[List[int], List[str], List[str]]:
indices = []
original = []
segmented = []
with open(path) as file:
for line in file:
data = json.loads(line)
indices.append(data["index"])
original.append(data["original"])
segmented.append(data["segmented"])
return indices, original, segmented
| 26.512195 | 78 | 0.609936 | import itertools
import json
from typing import Iterable, List, Tuple
def remove_pads(sequences: Iterable[Iterable[int]],
true_lengths: Iterable[int],
*,
pre_pad: bool = False) -> List[List[int]]:
assert len(sequences) == len(true_lengths)
output = []
for element, true_length in zip(sequences, true_lengths):
if pre_pad:
element = element[max(0, len(element) - true_length):]
else:
element = element[:true_length]
output.append(list(element))
return output
def flatten_list(list_to_flatten: List[list]) -> list:
return list(itertools.chain(*list_to_flatten))
def read_experiment_data(path: str) -> Tuple[List[int], List[str], List[str]]:
indices = []
original = []
segmented = []
with open(path) as file:
for line in file:
data = json.loads(line)
indices.append(data["index"])
original.append(data["original"])
segmented.append(data["segmented"])
return indices, original, segmented
| true | true |
1c476f371ca7d1b74fa727dff3dcc27f059ba338 | 4,943 | py | Python | tiddlyweb/serializations/json.py | angeluseve/tiddlyweb | d24a45d48faa2b014e1c1598ec176c4c1c98fb07 | [
"BSD-3-Clause"
] | 1 | 2016-05-09T15:26:17.000Z | 2016-05-09T15:26:17.000Z | tiddlyweb/serializations/json.py | angeluseve/tiddlyweb | d24a45d48faa2b014e1c1598ec176c4c1c98fb07 | [
"BSD-3-Clause"
] | null | null | null | tiddlyweb/serializations/json.py | angeluseve/tiddlyweb | d24a45d48faa2b014e1c1598ec176c4c1c98fb07 | [
"BSD-3-Clause"
] | null | null | null | """
JSON based serializer.
"""
import simplejson
from base64 import b64encode, b64decode
from tiddlyweb.serializations import SerializationInterface
from tiddlyweb.model.bag import Bag
from tiddlyweb.model.policy import Policy
class Serialization(SerializationInterface):
"""
Turn various entities to and from JSON.
"""
def list_recipes(self, recipes):
"""
Create a JSON list of recipe names from
the provided recipes.
"""
return simplejson.dumps([recipe.name for recipe in recipes])
def list_bags(self, bags):
"""
Create a JSON list of bag names from the
provided bags.
"""
return simplejson.dumps([bag.name for bag in bags])
def list_tiddlers(self, bag):
"""
List the tiddlers in a bag as JSON.
The format is a list of dicts in
the form described by self._tiddler_dict.
"""
return simplejson.dumps([self._tiddler_dict(tiddler) for tiddler in bag.list_tiddlers()])
def recipe_as(self, recipe):
"""
A recipe as a JSON dictionary.
"""
policy = recipe.policy
policy_dict = {}
for key in ['owner', 'read', 'write', 'create', 'delete', 'manage']:
policy_dict[key] = getattr(policy, key)
return simplejson.dumps(dict(desc=recipe.desc, policy=policy_dict, recipe=recipe.get_recipe()))
def as_recipe(self, recipe, input_string):
"""
Turn a JSON dictionary into a Recipe
if it is in the proper form. Include
the policy.
"""
info = simplejson.loads(input_string)
try:
recipe.set_recipe(info['recipe'])
recipe.desc = info['desc']
if info['policy']:
recipe.policy = Policy()
for key, value in info['policy'].items():
recipe.policy.__setattr__(key, value)
except KeyError:
pass
return recipe
def bag_as(self, bag):
"""
Create a JSON dictionary representing
a Bag and Policy.
"""
policy = bag.policy
policy_dict = {}
for key in ['owner', 'read', 'write', 'create', 'delete', 'manage']:
policy_dict[key] = getattr(policy, key)
info = dict(policy=policy_dict, desc=bag.desc)
return simplejson.dumps(info)
def as_bag(self, bag, input_string):
"""
Turn a JSON string into a bag.
"""
info = simplejson.loads(input_string)
if info['policy']:
bag.policy = Policy()
for key, value in info['policy'].items():
bag.policy.__setattr__(key, value)
bag.desc = info.get('desc', '')
return bag
def tiddler_as(self, tiddler):
"""
Create a JSON dictionary representing
a tiddler, as described by _tiddler_dict
plus the text of the tiddler.
"""
tiddler_dict = self._tiddler_dict(tiddler)
if tiddler.type and tiddler.type != 'None':
tiddler_dict['text'] = b64encode(tiddler.text)
else:
tiddler_dict['text'] = tiddler.text
return simplejson.dumps(tiddler_dict)
def as_tiddler(self, tiddler, input_string):
"""
Turn a JSON dictionary into a Tiddler.
"""
dict_from_input = simplejson.loads(input_string)
accepted_keys = ['created', 'modified', 'modifier', 'tags', 'fields', 'text', 'type']
for key, value in dict_from_input.iteritems():
if value and key in accepted_keys:
setattr(tiddler, key, value)
if tiddler.type and tiddler.type != 'None':
tiddler.text = b64decode(tiddler.text)
return tiddler
def _tiddler_dict(self, tiddler):
"""
Select fields from a tiddler to create
a dictonary.
"""
unwanted_keys = ['text', 'store']
wanted_keys = [attribute for attribute in tiddler.slots if attribute not in unwanted_keys]
wanted_info = {}
for attribute in wanted_keys:
wanted_info[attribute] = getattr(tiddler, attribute, None)
wanted_info['permissions'] = self._tiddler_permissions(tiddler)
try:
fat = self.environ['tiddlyweb.query'].get('fat', [None])[0]
if fat:
wanted_info['text'] = tiddler.text
except KeyError:
pass # tiddlyweb.query is not there
return dict(wanted_info)
def _tiddler_permissions(self, tiddler):
"""
Make a list of the permissions the current user has
on this tiddler.
"""
perms = []
bag = Bag(tiddler.bag)
if tiddler.store:
bag = tiddler.store.get(bag)
if 'tiddlyweb.usersign' in self.environ:
perms = bag.policy.user_perms(self.environ['tiddlyweb.usersign'])
return perms
| 32.519737 | 103 | 0.584665 |
import simplejson
from base64 import b64encode, b64decode
from tiddlyweb.serializations import SerializationInterface
from tiddlyweb.model.bag import Bag
from tiddlyweb.model.policy import Policy
class Serialization(SerializationInterface):
def list_recipes(self, recipes):
return simplejson.dumps([recipe.name for recipe in recipes])
def list_bags(self, bags):
return simplejson.dumps([bag.name for bag in bags])
def list_tiddlers(self, bag):
return simplejson.dumps([self._tiddler_dict(tiddler) for tiddler in bag.list_tiddlers()])
def recipe_as(self, recipe):
policy = recipe.policy
policy_dict = {}
for key in ['owner', 'read', 'write', 'create', 'delete', 'manage']:
policy_dict[key] = getattr(policy, key)
return simplejson.dumps(dict(desc=recipe.desc, policy=policy_dict, recipe=recipe.get_recipe()))
def as_recipe(self, recipe, input_string):
info = simplejson.loads(input_string)
try:
recipe.set_recipe(info['recipe'])
recipe.desc = info['desc']
if info['policy']:
recipe.policy = Policy()
for key, value in info['policy'].items():
recipe.policy.__setattr__(key, value)
except KeyError:
pass
return recipe
def bag_as(self, bag):
policy = bag.policy
policy_dict = {}
for key in ['owner', 'read', 'write', 'create', 'delete', 'manage']:
policy_dict[key] = getattr(policy, key)
info = dict(policy=policy_dict, desc=bag.desc)
return simplejson.dumps(info)
def as_bag(self, bag, input_string):
info = simplejson.loads(input_string)
if info['policy']:
bag.policy = Policy()
for key, value in info['policy'].items():
bag.policy.__setattr__(key, value)
bag.desc = info.get('desc', '')
return bag
def tiddler_as(self, tiddler):
tiddler_dict = self._tiddler_dict(tiddler)
if tiddler.type and tiddler.type != 'None':
tiddler_dict['text'] = b64encode(tiddler.text)
else:
tiddler_dict['text'] = tiddler.text
return simplejson.dumps(tiddler_dict)
def as_tiddler(self, tiddler, input_string):
dict_from_input = simplejson.loads(input_string)
accepted_keys = ['created', 'modified', 'modifier', 'tags', 'fields', 'text', 'type']
for key, value in dict_from_input.iteritems():
if value and key in accepted_keys:
setattr(tiddler, key, value)
if tiddler.type and tiddler.type != 'None':
tiddler.text = b64decode(tiddler.text)
return tiddler
def _tiddler_dict(self, tiddler):
unwanted_keys = ['text', 'store']
wanted_keys = [attribute for attribute in tiddler.slots if attribute not in unwanted_keys]
wanted_info = {}
for attribute in wanted_keys:
wanted_info[attribute] = getattr(tiddler, attribute, None)
wanted_info['permissions'] = self._tiddler_permissions(tiddler)
try:
fat = self.environ['tiddlyweb.query'].get('fat', [None])[0]
if fat:
wanted_info['text'] = tiddler.text
except KeyError:
pass
return dict(wanted_info)
def _tiddler_permissions(self, tiddler):
perms = []
bag = Bag(tiddler.bag)
if tiddler.store:
bag = tiddler.store.get(bag)
if 'tiddlyweb.usersign' in self.environ:
perms = bag.policy.user_perms(self.environ['tiddlyweb.usersign'])
return perms
| true | true |
1c4771447baf8ca0aea72d01cd74569e19c6a862 | 7,917 | py | Python | solo/methods/nnsiam.py | ludysama/crp | 08027b67f174426ddac5eef8186349e8337481fc | [
"MIT"
] | 2 | 2021-11-02T07:38:33.000Z | 2021-11-21T12:55:28.000Z | solo/methods/nnsiam.py | ludysama/crp | 08027b67f174426ddac5eef8186349e8337481fc | [
"MIT"
] | null | null | null | solo/methods/nnsiam.py | ludysama/crp | 08027b67f174426ddac5eef8186349e8337481fc | [
"MIT"
] | null | null | null | # Copyright 2021 solo-learn development team.
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies
# or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
# FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import argparse
from typing import Any, Dict, List, Sequence, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from solo.losses.simsiam import simsiam_loss_func
from solo.methods.base import BaseMethod
from solo.utils.misc import gather
class NNSiam(BaseMethod):
def __init__(
self,
proj_output_dim: int,
proj_hidden_dim: int,
pred_hidden_dim: int,
queue_size: int,
**kwargs,
):
"""Implements NNSiam (https://arxiv.org/abs/2104.14548).
Args:
proj_output_dim (int): number of dimensions of projected features.
proj_hidden_dim (int): number of neurons of the hidden layers of the projector.
pred_hidden_dim (int): number of neurons of the hidden layers of the predictor.
queue_size (int): number of samples to keep in the queue.
"""
super().__init__(**kwargs)
self.queue_size = queue_size
# projector
self.projector = nn.Sequential(
nn.Linear(self.features_dim, proj_hidden_dim, bias=False),
nn.BatchNorm1d(proj_hidden_dim),
nn.ReLU(),
nn.Linear(proj_hidden_dim, proj_hidden_dim, bias=False),
nn.BatchNorm1d(proj_hidden_dim),
nn.ReLU(),
nn.Linear(proj_hidden_dim, proj_output_dim),
nn.BatchNorm1d(proj_output_dim, affine=False),
)
self.projector[6].bias.requires_grad = False # hack: not use bias as it is followed by BN
# predictor
self.predictor = nn.Sequential(
nn.Linear(proj_output_dim, pred_hidden_dim, bias=False),
nn.BatchNorm1d(pred_hidden_dim),
nn.ReLU(),
nn.Linear(pred_hidden_dim, proj_output_dim),
)
# queue
self.register_buffer("queue", torch.randn(self.queue_size, proj_output_dim))
self.register_buffer("queue_y", -torch.ones(self.queue_size, dtype=torch.long))
self.queue = F.normalize(self.queue, dim=1)
self.register_buffer("queue_ptr", torch.zeros(1, dtype=torch.long))
@staticmethod
def add_model_specific_args(parent_parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
parent_parser = super(NNSiam, NNSiam).add_model_specific_args(parent_parser)
parser = parent_parser.add_argument_group("nnsiam")
# projector
parser.add_argument("--proj_output_dim", type=int, default=128)
parser.add_argument("--proj_hidden_dim", type=int, default=2048)
# predictor
parser.add_argument("--pred_hidden_dim", type=int, default=512)
# queue settings
parser.add_argument("--queue_size", default=65536, type=int)
return parent_parser
@property
def learnable_params(self) -> List[dict]:
"""Adds projector and predictor parameters to the parent's learnable parameters.
Returns:
List[dict]: list of learnable parameters.
"""
extra_learnable_params: List[dict] = [
{"params": self.projector.parameters()},
{"params": self.predictor.parameters(), "static_lr": True},
]
return super().learnable_params + extra_learnable_params
@torch.no_grad()
def dequeue_and_enqueue(self, z: torch.Tensor, y: torch.Tensor):
"""Adds new samples and removes old samples from the queue in a fifo manner. Also stores
the labels of the samples.
Args:
z (torch.Tensor): batch of projected features.
y (torch.Tensor): labels of the samples in the batch.
"""
z = gather(z)
y = gather(y)
batch_size = z.shape[0]
ptr = int(self.queue_ptr) # type: ignore
assert self.queue_size % batch_size == 0
self.queue[ptr : ptr + batch_size, :] = z
self.queue_y[ptr : ptr + batch_size] = y # type: ignore
ptr = (ptr + batch_size) % self.queue_size
self.queue_ptr[0] = ptr # type: ignore
@torch.no_grad()
def find_nn(self, z: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""Finds the nearest neighbor of a sample.
Args:
z (torch.Tensor): a batch of projected features.
Returns:
Tuple[torch.Tensor, torch.Tensor]:
indices and projected features of the nearest neighbors.
"""
idx = (z @ self.queue.T).max(dim=1)[1]
nn = self.queue[idx]
return idx, nn
def forward(self, X: torch.Tensor, *args, **kwargs) -> Dict[str, Any]:
"""Performs the forward pass of the encoder, the projector and the predictor.
Args:
X (torch.Tensor): a batch of images in the tensor format.
Returns:
Dict[str, Any]:
a dict containing the outputs of the parent
and the projected and predicted features.
"""
out = super().forward(X, *args, **kwargs)
z = self.projector(out["feats"])
p = self.predictor(z)
return {**out, "z": z, "p": p}
def training_step(self, batch: Sequence[Any], batch_idx: int) -> torch.Tensor:
"""Training step for NNSiam reusing BaseMethod training step.
Args:
batch (Sequence[Any]): a batch of data in the format of [img_indexes, [X], Y], where
[X] is a list of size self.num_crops containing batches of images
batch_idx (int): index of the batch
Returns:
torch.Tensor: total loss composed of SimSiam loss and classification loss
"""
targets = batch[-1]
out = super().training_step(batch, batch_idx)
class_loss = out["loss"]
feats1, feats2 = out["feats"]
z1 = self.projector(feats1)
z2 = self.projector(feats2)
p1 = self.predictor(z1)
p2 = self.predictor(z2)
z1 = F.normalize(z1, dim=-1)
z2 = F.normalize(z2, dim=-1)
# find nn
idx1, nn1 = self.find_nn(z1)
_, nn2 = self.find_nn(z2)
# ------- negative cosine similarity loss -------
neg_cos_sim = simsiam_loss_func(p1, nn2) / 2 + simsiam_loss_func(p2, nn1) / 2
# compute nn accuracy
b = targets.size(0)
nn_acc = (targets == self.queue_y[idx1]).sum() / b
# dequeue and enqueue
self.dequeue_and_enqueue(z1, targets)
# calculate std of features
z1_std = F.normalize(z1, dim=-1).std(dim=0).mean()
z2_std = F.normalize(z2, dim=-1).std(dim=0).mean()
z_std = (z1_std + z2_std) / 2
metrics = {
"train_neg_cos_sim": neg_cos_sim,
"train_z_std": z_std,
"train_nn_acc": nn_acc,
}
self.log_dict(metrics, on_epoch=True, sync_dist=True)
return neg_cos_sim + class_loss
| 35.662162 | 99 | 0.630794 |
import argparse
from typing import Any, Dict, List, Sequence, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from solo.losses.simsiam import simsiam_loss_func
from solo.methods.base import BaseMethod
from solo.utils.misc import gather
class NNSiam(BaseMethod):
def __init__(
self,
proj_output_dim: int,
proj_hidden_dim: int,
pred_hidden_dim: int,
queue_size: int,
**kwargs,
):
super().__init__(**kwargs)
self.queue_size = queue_size
self.projector = nn.Sequential(
nn.Linear(self.features_dim, proj_hidden_dim, bias=False),
nn.BatchNorm1d(proj_hidden_dim),
nn.ReLU(),
nn.Linear(proj_hidden_dim, proj_hidden_dim, bias=False),
nn.BatchNorm1d(proj_hidden_dim),
nn.ReLU(),
nn.Linear(proj_hidden_dim, proj_output_dim),
nn.BatchNorm1d(proj_output_dim, affine=False),
)
self.projector[6].bias.requires_grad = False
self.predictor = nn.Sequential(
nn.Linear(proj_output_dim, pred_hidden_dim, bias=False),
nn.BatchNorm1d(pred_hidden_dim),
nn.ReLU(),
nn.Linear(pred_hidden_dim, proj_output_dim),
)
self.register_buffer("queue", torch.randn(self.queue_size, proj_output_dim))
self.register_buffer("queue_y", -torch.ones(self.queue_size, dtype=torch.long))
self.queue = F.normalize(self.queue, dim=1)
self.register_buffer("queue_ptr", torch.zeros(1, dtype=torch.long))
@staticmethod
def add_model_specific_args(parent_parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
parent_parser = super(NNSiam, NNSiam).add_model_specific_args(parent_parser)
parser = parent_parser.add_argument_group("nnsiam")
parser.add_argument("--proj_output_dim", type=int, default=128)
parser.add_argument("--proj_hidden_dim", type=int, default=2048)
parser.add_argument("--pred_hidden_dim", type=int, default=512)
parser.add_argument("--queue_size", default=65536, type=int)
return parent_parser
@property
def learnable_params(self) -> List[dict]:
extra_learnable_params: List[dict] = [
{"params": self.projector.parameters()},
{"params": self.predictor.parameters(), "static_lr": True},
]
return super().learnable_params + extra_learnable_params
@torch.no_grad()
def dequeue_and_enqueue(self, z: torch.Tensor, y: torch.Tensor):
z = gather(z)
y = gather(y)
batch_size = z.shape[0]
ptr = int(self.queue_ptr)
assert self.queue_size % batch_size == 0
self.queue[ptr : ptr + batch_size, :] = z
self.queue_y[ptr : ptr + batch_size] = y
ptr = (ptr + batch_size) % self.queue_size
self.queue_ptr[0] = ptr
@torch.no_grad()
def find_nn(self, z: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
idx = (z @ self.queue.T).max(dim=1)[1]
nn = self.queue[idx]
return idx, nn
def forward(self, X: torch.Tensor, *args, **kwargs) -> Dict[str, Any]:
out = super().forward(X, *args, **kwargs)
z = self.projector(out["feats"])
p = self.predictor(z)
return {**out, "z": z, "p": p}
def training_step(self, batch: Sequence[Any], batch_idx: int) -> torch.Tensor:
targets = batch[-1]
out = super().training_step(batch, batch_idx)
class_loss = out["loss"]
feats1, feats2 = out["feats"]
z1 = self.projector(feats1)
z2 = self.projector(feats2)
p1 = self.predictor(z1)
p2 = self.predictor(z2)
z1 = F.normalize(z1, dim=-1)
z2 = F.normalize(z2, dim=-1)
idx1, nn1 = self.find_nn(z1)
_, nn2 = self.find_nn(z2)
neg_cos_sim = simsiam_loss_func(p1, nn2) / 2 + simsiam_loss_func(p2, nn1) / 2
b = targets.size(0)
nn_acc = (targets == self.queue_y[idx1]).sum() / b
self.dequeue_and_enqueue(z1, targets)
z1_std = F.normalize(z1, dim=-1).std(dim=0).mean()
z2_std = F.normalize(z2, dim=-1).std(dim=0).mean()
z_std = (z1_std + z2_std) / 2
metrics = {
"train_neg_cos_sim": neg_cos_sim,
"train_z_std": z_std,
"train_nn_acc": nn_acc,
}
self.log_dict(metrics, on_epoch=True, sync_dist=True)
return neg_cos_sim + class_loss
| true | true |
1c47724e4746e520c60664378824afb818843692 | 6,421 | py | Python | src/create_embedded_tools.py | erenon/bazel | 9bf885afeb01c766d84acf86ca847a7b5e7bd0d8 | [
"Apache-2.0"
] | null | null | null | src/create_embedded_tools.py | erenon/bazel | 9bf885afeb01c766d84acf86ca847a7b5e7bd0d8 | [
"Apache-2.0"
] | null | null | null | src/create_embedded_tools.py | erenon/bazel | 9bf885afeb01c766d84acf86ca847a7b5e7bd0d8 | [
"Apache-2.0"
] | null | null | null | # pylint: disable=g-direct-third-party-import
# pylint: disable=g-bad-file-header
# Copyright 2017 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Creates the embedded_tools.zip that is part of the Bazel binary."""
import contextlib
import fnmatch
import os
import os.path
import re
import sys
import zipfile
from src.create_embedded_tools_lib import copy_tar_to_zip
from src.create_embedded_tools_lib import copy_zip_to_zip
from src.create_embedded_tools_lib import is_executable
output_paths = [
('*tools/jdk/BUILD*', lambda x: 'tools/jdk/BUILD'),
('*tools/platforms/platforms.BUILD', lambda x: 'platforms/BUILD'),
('*tools/platforms/*', lambda x: 'platforms/' + os.path.basename(x)),
('*tools/cpp/runfiles/generated_*',
lambda x: 'tools/cpp/runfiles/' + os.path.basename(x)[len('generated_'):]),
('*JavaBuilder*_deploy.jar', lambda x: 'tools/jdk/' + os.path.basename(x)),
('*JacocoCoverage*_deploy.jar',
lambda x: 'tools/jdk/JacocoCoverage_deploy.jar'),
('*turbine_deploy.jar', lambda x: 'tools/jdk/turbine_deploy.jar'),
('*turbine_direct*', lambda x: 'tools/jdk/' + os.path.basename(x)),
('*javac-9+181-r4173-1.jar',
lambda x: 'third_party/java/jdk/langtools/javac-9+181-r4173-1.jar'),
('*bazel-singlejar_deploy.jar',
lambda x: 'tools/jdk/singlejar/bazel-singlejar_deploy.jar'),
('*GenClass_deploy.jar', lambda x: 'tools/jdk/GenClass_deploy.jar'),
('*ExperimentalRunner_deploy.jar',
lambda x: 'tools/jdk/ExperimentalTestRunner_deploy.jar'),
('*Runner_deploy.jar', lambda x: 'tools/jdk/TestRunner_deploy.jar'),
('*singlejar_local.exe', lambda x: 'tools/jdk/singlejar/singlejar.exe'),
('*singlejar_local', lambda x: 'tools/jdk/singlejar/singlejar'),
('*launcher.exe', lambda x: 'tools/launcher/launcher.exe'),
('*def_parser.exe', lambda x: 'tools/def_parser/def_parser.exe'),
('*ijar.exe', lambda x: 'tools/jdk/ijar/ijar.exe'),
('*ijar', lambda x: 'tools/jdk/ijar/ijar'),
('*zipper.exe', lambda x: 'tools/zip/zipper/zipper.exe'),
('*zipper', lambda x: 'tools/zip/zipper/zipper'),
('*src/objc_tools/*',
lambda x: 'tools/objc/precomp_' + os.path.basename(x)),
('*xcode*StdRedirect.dylib', lambda x: 'tools/objc/StdRedirect.dylib'),
('*xcode*make_hashed_objlist.py',
lambda x: 'tools/objc/make_hashed_objlist.py'),
('*xcode*realpath', lambda x: 'tools/objc/realpath'),
('*xcode*xcode-locator', lambda x: 'tools/objc/xcode-locator'),
('*src/tools/xcode/*.sh', lambda x: 'tools/objc/' + os.path.basename(x)),
('*src/tools/xcode/*',
lambda x: 'tools/objc/' + os.path.basename(x) + '.sh'),
('*external/openjdk_*/file/*.tar.gz', lambda x: 'jdk.tar.gz'),
('*external/openjdk_*/file/*.zip', lambda x: 'jdk.zip'),
('*src/minimal_jdk.tar.gz', lambda x: 'jdk.tar.gz'),
('*src/minimal_jdk.zip', lambda x: 'jdk.zip'),
('*', lambda x: re.sub(r'^.*bazel-out/[^/]*/bin/', '', x, count=1)),
]
def get_output_path(path):
for pattern, transformer in output_paths:
if fnmatch.fnmatch(path.replace('\\', '/'), pattern):
# BUILD.tools are stored as BUILD files.
return transformer(path).replace('/BUILD.tools', '/BUILD')
def get_input_files(argsfile):
"""Returns a sorted list of tuples (archive_file, input_file).
This describes the files that should be put into the generated archive.
Args:
argsfile: The file containing the list of input files.
"""
with open(argsfile, 'r') as f:
input_files = set(x.strip() for x in f.readlines())
result = {}
for input_file in input_files:
# If we have both a BUILD and a BUILD.tools file, take the latter only.
if (os.path.basename(input_file) == 'BUILD' and
input_file + '.tools' in input_files):
continue
# This gives us the same behavior as the older bash version of this
# tool: If two input files map to the same output files, the one that
# comes last in the list of input files overrides all earlier ones.
result[get_output_path(input_file)] = input_file
# By sorting the file list, the resulting ZIP file will not be reproducible
# and deterministic.
return sorted(result.items())
def copy_jdk_into_archive(output_zip, archive_file, input_file):
"""Extract the JDK and adds it to the archive under jdk/*."""
def _replace_dirname(filename):
# Rename the first folder to 'jdk', because Bazel looks for a
# bundled JDK in the embedded tools using that folder name.
return 'jdk/' + '/'.join(filename.split('/')[1:])
# The JDK is special - it's extracted instead of copied.
if archive_file.endswith('.tar.gz'):
copy_tar_to_zip(output_zip, input_file, _replace_dirname)
elif archive_file.endswith('.zip'):
copy_zip_to_zip(output_zip, input_file, _replace_dirname)
def main():
output_zip = os.path.join(os.getcwd(), sys.argv[1])
input_files = get_input_files(sys.argv[2])
# Copy all the input_files into output_zip.
# Adding contextlib.closing to be python 2.6 (for centos 6.7) compatible
with contextlib.closing(
zipfile.ZipFile(output_zip, 'w', zipfile.ZIP_DEFLATED)) as output_zip:
zipinfo = zipfile.ZipInfo('WORKSPACE', (1980, 1, 1, 0, 0, 0))
zipinfo.external_attr = 0o644 << 16
output_zip.writestr(zipinfo, 'workspace(name = "bazel_tools")\n')
for archive_file, input_file in input_files:
if os.path.basename(archive_file) in ('jdk.tar.gz', 'jdk.zip'):
copy_jdk_into_archive(output_zip, archive_file, input_file)
else:
zipinfo = zipfile.ZipInfo(archive_file, (1980, 1, 1, 0, 0, 0))
zipinfo.external_attr = 0o755 << 16 if is_executable(
input_file) else 0o644 << 16
zipinfo.compress_type = zipfile.ZIP_DEFLATED
with open(input_file, 'rb') as f:
output_zip.writestr(zipinfo, f.read())
if __name__ == '__main__':
main()
| 42.523179 | 80 | 0.686653 |
atch
import os
import os.path
import re
import sys
import zipfile
from src.create_embedded_tools_lib import copy_tar_to_zip
from src.create_embedded_tools_lib import copy_zip_to_zip
from src.create_embedded_tools_lib import is_executable
output_paths = [
('*tools/jdk/BUILD*', lambda x: 'tools/jdk/BUILD'),
('*tools/platforms/platforms.BUILD', lambda x: 'platforms/BUILD'),
('*tools/platforms/*', lambda x: 'platforms/' + os.path.basename(x)),
('*tools/cpp/runfiles/generated_*',
lambda x: 'tools/cpp/runfiles/' + os.path.basename(x)[len('generated_'):]),
('*JavaBuilder*_deploy.jar', lambda x: 'tools/jdk/' + os.path.basename(x)),
('*JacocoCoverage*_deploy.jar',
lambda x: 'tools/jdk/JacocoCoverage_deploy.jar'),
('*turbine_deploy.jar', lambda x: 'tools/jdk/turbine_deploy.jar'),
('*turbine_direct*', lambda x: 'tools/jdk/' + os.path.basename(x)),
('*javac-9+181-r4173-1.jar',
lambda x: 'third_party/java/jdk/langtools/javac-9+181-r4173-1.jar'),
('*bazel-singlejar_deploy.jar',
lambda x: 'tools/jdk/singlejar/bazel-singlejar_deploy.jar'),
('*GenClass_deploy.jar', lambda x: 'tools/jdk/GenClass_deploy.jar'),
('*ExperimentalRunner_deploy.jar',
lambda x: 'tools/jdk/ExperimentalTestRunner_deploy.jar'),
('*Runner_deploy.jar', lambda x: 'tools/jdk/TestRunner_deploy.jar'),
('*singlejar_local.exe', lambda x: 'tools/jdk/singlejar/singlejar.exe'),
('*singlejar_local', lambda x: 'tools/jdk/singlejar/singlejar'),
('*launcher.exe', lambda x: 'tools/launcher/launcher.exe'),
('*def_parser.exe', lambda x: 'tools/def_parser/def_parser.exe'),
('*ijar.exe', lambda x: 'tools/jdk/ijar/ijar.exe'),
('*ijar', lambda x: 'tools/jdk/ijar/ijar'),
('*zipper.exe', lambda x: 'tools/zip/zipper/zipper.exe'),
('*zipper', lambda x: 'tools/zip/zipper/zipper'),
('*src/objc_tools/*',
lambda x: 'tools/objc/precomp_' + os.path.basename(x)),
('*xcode*StdRedirect.dylib', lambda x: 'tools/objc/StdRedirect.dylib'),
('*xcode*make_hashed_objlist.py',
lambda x: 'tools/objc/make_hashed_objlist.py'),
('*xcode*realpath', lambda x: 'tools/objc/realpath'),
('*xcode*xcode-locator', lambda x: 'tools/objc/xcode-locator'),
('*src/tools/xcode/*.sh', lambda x: 'tools/objc/' + os.path.basename(x)),
('*src/tools/xcode/*',
lambda x: 'tools/objc/' + os.path.basename(x) + '.sh'),
('*external/openjdk_*/file/*.tar.gz', lambda x: 'jdk.tar.gz'),
('*external/openjdk_*/file/*.zip', lambda x: 'jdk.zip'),
('*src/minimal_jdk.tar.gz', lambda x: 'jdk.tar.gz'),
('*src/minimal_jdk.zip', lambda x: 'jdk.zip'),
('*', lambda x: re.sub(r'^.*bazel-out/[^/]*/bin/', '', x, count=1)),
]
def get_output_path(path):
for pattern, transformer in output_paths:
if fnmatch.fnmatch(path.replace('\\', '/'), pattern):
return transformer(path).replace('/BUILD.tools', '/BUILD')
def get_input_files(argsfile):
with open(argsfile, 'r') as f:
input_files = set(x.strip() for x in f.readlines())
result = {}
for input_file in input_files:
if (os.path.basename(input_file) == 'BUILD' and
input_file + '.tools' in input_files):
continue
result[get_output_path(input_file)] = input_file
return sorted(result.items())
def copy_jdk_into_archive(output_zip, archive_file, input_file):
def _replace_dirname(filename):
return 'jdk/' + '/'.join(filename.split('/')[1:])
if archive_file.endswith('.tar.gz'):
copy_tar_to_zip(output_zip, input_file, _replace_dirname)
elif archive_file.endswith('.zip'):
copy_zip_to_zip(output_zip, input_file, _replace_dirname)
def main():
output_zip = os.path.join(os.getcwd(), sys.argv[1])
input_files = get_input_files(sys.argv[2])
# Copy all the input_files into output_zip.
# Adding contextlib.closing to be python 2.6 (for centos 6.7) compatible
with contextlib.closing(
zipfile.ZipFile(output_zip, 'w', zipfile.ZIP_DEFLATED)) as output_zip:
zipinfo = zipfile.ZipInfo('WORKSPACE', (1980, 1, 1, 0, 0, 0))
zipinfo.external_attr = 0o644 << 16
output_zip.writestr(zipinfo, 'workspace(name = "bazel_tools")\n')
for archive_file, input_file in input_files:
if os.path.basename(archive_file) in ('jdk.tar.gz', 'jdk.zip'):
copy_jdk_into_archive(output_zip, archive_file, input_file)
else:
zipinfo = zipfile.ZipInfo(archive_file, (1980, 1, 1, 0, 0, 0))
zipinfo.external_attr = 0o755 << 16 if is_executable(
input_file) else 0o644 << 16
zipinfo.compress_type = zipfile.ZIP_DEFLATED
with open(input_file, 'rb') as f:
output_zip.writestr(zipinfo, f.read())
if __name__ == '__main__':
main()
| true | true |
1c47729e783feede84d393f9c877b04a40b6c1cf | 5,680 | py | Python | src/morphforgeexamples/exset6_poster_ocns2012/poster1.py | mikehulluk/morphforge | 2a95096f144ed4ea487decb735ce66706357d3c7 | [
"BSD-2-Clause"
] | 1 | 2021-01-21T11:31:59.000Z | 2021-01-21T11:31:59.000Z | src/morphforgeexamples/exset6_poster_ocns2012/poster1.py | mikehulluk/morphforge | 2a95096f144ed4ea487decb735ce66706357d3c7 | [
"BSD-2-Clause"
] | null | null | null | src/morphforgeexamples/exset6_poster_ocns2012/poster1.py | mikehulluk/morphforge | 2a95096f144ed4ea487decb735ce66706357d3c7 | [
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Copyright (c) 2012 Michael Hull.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------
""" Simulation of a HodgkinHuxley-type neuron specified through NeuroUnits.
"""
import matplotlib as mpl
mpl.rcParams['font.size'] = 14
from morphforge.stdimports import *
from morphforgecontrib.stdimports import *
eqnset_txt_na = """
define_component hh_na {
i = g * (v-erev) * m**3*h
m_inf = m_alpha_rate / (m_alpha_rate + m_beta_rate)
m_tau = 1.0 / (m_alpha_rate + m_beta_rate)
m' = (m_inf-m) / m_tau
h_inf = h_alpha_rate / (h_alpha_rate + h_beta_rate)
h_tau = 1.0 / (h_alpha_rate + h_beta_rate)
h' = (h_inf-h) / h_tau
StdFormAB(V, a1, a2, a3, a4, a5) = (a1+a2*V)/(a3+std.math.exp((V+a4)/a5))
m_alpha_rate = StdFormAB(V=v, a1=m_a1, a2=m_a2, a3=m_a3, a4=m_a4, a5=m_a5)
m_beta_rate = StdFormAB(V=v, a1=m_b1, a2=m_b2, a3=m_b3, a4=m_b4, a5=m_b5)
h_alpha_rate = StdFormAB(V=v, a1=h_a1, a2=h_a2, a3=h_a3, a4=h_a4, a5=h_a5)
h_beta_rate = StdFormAB(V=v, a1=h_b1, a2=h_b2, a3=h_b3, a4=h_b4, a5=h_b5)
m_a1={-4.00 ms-1}; m_a2={-0.10 mV-1 ms-1}; m_a3={-1.00}; m_a4={40.00 mV}; m_a5={-10.00 mV};
m_b1={ 4.00 ms-1}; m_b2={ 0.00 mV-1 ms-1}; m_b3={ 0.00}; m_b4={65.00 mV}; m_b5={ 18.00 mV};
h_a1={ 0.07 ms-1}; h_a2={ 0.00 mV-1 ms-1}; h_a3={ 0.00}; h_a4={65.00 mV}; h_a5={ 20.00 mV};
h_b1={ 1.00 ms-1}; h_b2={ 0.00 mV-1 ms-1}; h_b3={ 1.00}; h_b4={35.00 mV}; h_b5={-10.00 mV};
erev = 50.0mV;
<=> PARAMETER g:(S/m2)
<=> OUTPUT i:(A/m2) METADATA {"mf":{"role":"TRANSMEMBRANECURRENT"} }
<=> INPUT v: V METADATA {"mf":{"role":"MEMBRANEVOLTAGE"} }
} """
eqnset_txt_k = """
define_component hh_k {
i = g * (v-erev) * n*n*n*n
n_inf = n_alpha_rate / (n_alpha_rate + n_beta_rate)
n_tau = 1.0 / (n_alpha_rate + n_beta_rate)
n' = (n_inf-n) / n_tau
StdFormAB(V, a1, a2, a3, a4, a5) = (a1 + a2*V)/(a3+std.math.exp((V+a4)/a5))
n_alpha_rate = StdFormAB(V=v, a1=n_a1, a2=n_a2, a3=n_a3, a4=n_a4, a5=n_a5)
n_beta_rate = StdFormAB(V=v, a1=n_b1, a2=n_b2, a3=n_b3, a4=n_b4, a5=n_b5)
n_a1={-0.55 ms-1}; n_a2={-0.01 mV-1 ms-1}; n_a3={-1.00}; n_a4={55.00 mV}; n_a5={-10.00 mV}
n_b1={0.125 ms-1}; n_b2={ 0.00 mV-1 ms-1}; n_b3={ 0.00}; n_b4={65.00 mV}; n_b5={ 80.00 mV}
g = {36.0mS/cm2}
erev = {-77.0mV}
<=> OUTPUT i:(A/m2) METADATA {"mf":{"role":"TRANSMEMBRANECURRENT"} }
<=> INPUT v: V METADATA {"mf":{"role":"MEMBRANEVOLTAGE"} }
} """
eqnset_txt_lk = """
define_component hh_lk {
i = {0.3mS/cm2} * (v- {-54.3mV})
<=> OUTPUT i:(A/m2) METADATA {"mf":{"role":"TRANSMEMBRANECURRENT"} }
<=> INPUT v: V METADATA {"mf":{"role":"MEMBRANEVOLTAGE"} }
} """
env = NEURONEnvironment()
sim = env.Simulation()
# Create a cell:
morph_dict = {'root': {'length': 18.8, 'diam': 18.8, 'id':'soma'} }
my_morph = MorphologyTree.fromDictionary(morph_dict)
cell = sim.create_cell(name="Cell1", morphology=my_morph)
#soma = cell.get_location("soma")
# Setup passive channels:
cell.set_passive( PassiveProperty.SpecificCapacitance, qty('1.0:uF/cm2'))
# Setup active channels:
na_chl = env.Channel(NeuroUnitEqnsetMechanism, name="NaChl", eqnset=eqnset_txt_na,
default_parameters={"g":qty("120:mS/cm2")}, )
k_chl = env.Channel(NeuroUnitEqnsetMechanism, name="KChl", eqnset=eqnset_txt_k, )
lk_chl = env.Channel(NeuroUnitEqnsetMechanism, name="LKChl", eqnset=eqnset_txt_lk, )
cell.apply_channel( na_chl)
cell.apply_channel( lk_chl)
cell.apply_channel( k_chl)
# Define what to record:
sim.record(cell, what=StandardTags.Voltage, name="SomaVoltage", cell_location = cell.soma)
sim.record(na_chl, what='m', cell_location=cell.soma, user_tags=[StandardTags.StateVariable])
sim.record(na_chl, what='h', cell_location=cell.soma, user_tags=[StandardTags.StateVariable])
sim.record(k_chl, what='n', cell_location=cell.soma, user_tags=[StandardTags.StateVariable])
# Create the stimulus and record the injected current:
cc = sim.create_currentclamp(name="CC1", amp=qty("100:pA"), dur=qty("100:ms"), delay=qty("100:ms"), cell_location=cell.soma)
sim.record(cc, what=StandardTags.Current)
# run the simulation
results = sim.run()
TagViewer(results, timerange=(50, 250)*units.ms, show=True)
| 40.283688 | 124 | 0.660915 |
import matplotlib as mpl
mpl.rcParams['font.size'] = 14
from morphforge.stdimports import *
from morphforgecontrib.stdimports import *
eqnset_txt_na = """
define_component hh_na {
i = g * (v-erev) * m**3*h
m_inf = m_alpha_rate / (m_alpha_rate + m_beta_rate)
m_tau = 1.0 / (m_alpha_rate + m_beta_rate)
m' = (m_inf-m) / m_tau
h_inf = h_alpha_rate / (h_alpha_rate + h_beta_rate)
h_tau = 1.0 / (h_alpha_rate + h_beta_rate)
h' = (h_inf-h) / h_tau
StdFormAB(V, a1, a2, a3, a4, a5) = (a1+a2*V)/(a3+std.math.exp((V+a4)/a5))
m_alpha_rate = StdFormAB(V=v, a1=m_a1, a2=m_a2, a3=m_a3, a4=m_a4, a5=m_a5)
m_beta_rate = StdFormAB(V=v, a1=m_b1, a2=m_b2, a3=m_b3, a4=m_b4, a5=m_b5)
h_alpha_rate = StdFormAB(V=v, a1=h_a1, a2=h_a2, a3=h_a3, a4=h_a4, a5=h_a5)
h_beta_rate = StdFormAB(V=v, a1=h_b1, a2=h_b2, a3=h_b3, a4=h_b4, a5=h_b5)
m_a1={-4.00 ms-1}; m_a2={-0.10 mV-1 ms-1}; m_a3={-1.00}; m_a4={40.00 mV}; m_a5={-10.00 mV};
m_b1={ 4.00 ms-1}; m_b2={ 0.00 mV-1 ms-1}; m_b3={ 0.00}; m_b4={65.00 mV}; m_b5={ 18.00 mV};
h_a1={ 0.07 ms-1}; h_a2={ 0.00 mV-1 ms-1}; h_a3={ 0.00}; h_a4={65.00 mV}; h_a5={ 20.00 mV};
h_b1={ 1.00 ms-1}; h_b2={ 0.00 mV-1 ms-1}; h_b3={ 1.00}; h_b4={35.00 mV}; h_b5={-10.00 mV};
erev = 50.0mV;
<=> PARAMETER g:(S/m2)
<=> OUTPUT i:(A/m2) METADATA {"mf":{"role":"TRANSMEMBRANECURRENT"} }
<=> INPUT v: V METADATA {"mf":{"role":"MEMBRANEVOLTAGE"} }
} """
eqnset_txt_k = """
define_component hh_k {
i = g * (v-erev) * n*n*n*n
n_inf = n_alpha_rate / (n_alpha_rate + n_beta_rate)
n_tau = 1.0 / (n_alpha_rate + n_beta_rate)
n' = (n_inf-n) / n_tau
StdFormAB(V, a1, a2, a3, a4, a5) = (a1 + a2*V)/(a3+std.math.exp((V+a4)/a5))
n_alpha_rate = StdFormAB(V=v, a1=n_a1, a2=n_a2, a3=n_a3, a4=n_a4, a5=n_a5)
n_beta_rate = StdFormAB(V=v, a1=n_b1, a2=n_b2, a3=n_b3, a4=n_b4, a5=n_b5)
n_a1={-0.55 ms-1}; n_a2={-0.01 mV-1 ms-1}; n_a3={-1.00}; n_a4={55.00 mV}; n_a5={-10.00 mV}
n_b1={0.125 ms-1}; n_b2={ 0.00 mV-1 ms-1}; n_b3={ 0.00}; n_b4={65.00 mV}; n_b5={ 80.00 mV}
g = {36.0mS/cm2}
erev = {-77.0mV}
<=> OUTPUT i:(A/m2) METADATA {"mf":{"role":"TRANSMEMBRANECURRENT"} }
<=> INPUT v: V METADATA {"mf":{"role":"MEMBRANEVOLTAGE"} }
} """
eqnset_txt_lk = """
define_component hh_lk {
i = {0.3mS/cm2} * (v- {-54.3mV})
<=> OUTPUT i:(A/m2) METADATA {"mf":{"role":"TRANSMEMBRANECURRENT"} }
<=> INPUT v: V METADATA {"mf":{"role":"MEMBRANEVOLTAGE"} }
} """
env = NEURONEnvironment()
sim = env.Simulation()
# Create a cell:
morph_dict = {'root': {'length': 18.8, 'diam': 18.8, 'id':'soma'} }
my_morph = MorphologyTree.fromDictionary(morph_dict)
cell = sim.create_cell(name="Cell1", morphology=my_morph)
#soma = cell.get_location("soma")
# Setup passive channels:
cell.set_passive( PassiveProperty.SpecificCapacitance, qty('1.0:uF/cm2'))
# Setup active channels:
na_chl = env.Channel(NeuroUnitEqnsetMechanism, name="NaChl", eqnset=eqnset_txt_na,
default_parameters={"g":qty("120:mS/cm2")}, )
k_chl = env.Channel(NeuroUnitEqnsetMechanism, name="KChl", eqnset=eqnset_txt_k, )
lk_chl = env.Channel(NeuroUnitEqnsetMechanism, name="LKChl", eqnset=eqnset_txt_lk, )
cell.apply_channel( na_chl)
cell.apply_channel( lk_chl)
cell.apply_channel( k_chl)
# Define what to record:
sim.record(cell, what=StandardTags.Voltage, name="SomaVoltage", cell_location = cell.soma)
sim.record(na_chl, what='m', cell_location=cell.soma, user_tags=[StandardTags.StateVariable])
sim.record(na_chl, what='h', cell_location=cell.soma, user_tags=[StandardTags.StateVariable])
sim.record(k_chl, what='n', cell_location=cell.soma, user_tags=[StandardTags.StateVariable])
# Create the stimulus and record the injected current:
cc = sim.create_currentclamp(name="CC1", amp=qty("100:pA"), dur=qty("100:ms"), delay=qty("100:ms"), cell_location=cell.soma)
sim.record(cc, what=StandardTags.Current)
# run the simulation
results = sim.run()
TagViewer(results, timerange=(50, 250)*units.ms, show=True)
| true | true |
1c4772bee94a9049c31da5ef09d5c7071e017e16 | 2,599 | py | Python | tasrif/processing_pipeline/pandas/convert_to_datetime.py | qcri/tasrif | 327bc1eccb8f8e11d8869ba65a7c72ad038aa094 | [
"BSD-3-Clause"
] | 20 | 2021-12-06T10:41:54.000Z | 2022-03-13T16:25:43.000Z | tasrif/processing_pipeline/pandas/convert_to_datetime.py | qcri/tasrif | 327bc1eccb8f8e11d8869ba65a7c72ad038aa094 | [
"BSD-3-Clause"
] | 33 | 2021-12-06T08:27:18.000Z | 2022-03-14T05:07:53.000Z | tasrif/processing_pipeline/pandas/convert_to_datetime.py | qcri/tasrif | 327bc1eccb8f8e11d8869ba65a7c72ad038aa094 | [
"BSD-3-Clause"
] | 2 | 2022-02-07T08:06:48.000Z | 2022-02-14T07:13:42.000Z | """
Operator to convert a column feature from string to datetime
"""
import pandas as pd
from tasrif.processing_pipeline import PandasOperator
from tasrif.processing_pipeline.validators import InputsAreDataFramesValidatorMixin
class ConvertToDatetimeOperator(InputsAreDataFramesValidatorMixin, PandasOperator):
"""
Converts a set of (string) features to datetime using Pandas ``to_datetime``
Examples
--------
>>> import pandas as pd
>>> from tasrif.processing_pipeline.pandas import ConvertToDatetimeOperator
>>>
>>> df0 = pd.DataFrame([[1, "2020-05-01 00:00:00", 1], [1, "2020-05-01 01:00:00", 1],
>>> [1, "2020-05-01 03:00:00", 2], [2, "2020-05-02 00:00:00", 1],
>>> [2, "2020-05-02 01:00:00", 1]],
>>> columns=['logId', 'timestamp', 'sleep_level'])
>>>
>>> operator = ConvertToDatetime(feature_names=["timestamp"], utc=True)
>>> df0 = operator.process(df0)
>>>
>>> print(df0)
. logId timestamp sleep_level
0 1 2020-05-01 00:00:00+00:00 1
1 1 2020-05-01 01:00:00+00:00 1
2 1 2020-05-01 03:00:00+00:00 2
3 2 2020-05-02 00:00:00+00:00 1
4 2 2020-05-02 01:00:00+00:00 1
"""
def __init__(self, feature_names, **kwargs):
"""Convert a set of columns features from string to datetime
Args:
feature_names (str):
Name of the string columns that represent datetime objects
**kwargs:
key word arguments passed to pandas ``to_datetime`` method
"""
self.feature_names = feature_names
super().__init__(kwargs)
self.kwargs = kwargs
def _process(self, *data_frames):
"""Processes the passed data frame as per the configuration define in the constructor.
Args:
*data_frames (list of pd.DataFrame):
Variable number of pandas dataframes to be processed
Returns:
pd.DataFrame -or- list[pd.DataFrame]
Processed dataframe(s) resulting from applying the operator
"""
columns = (
self.feature_names.copy()
if isinstance(self.feature_names, list)
else [self.feature_names]
)
processed = []
for data_frame in data_frames:
for col in columns:
data_frame[col] = pd.to_datetime(
data_frame[col], errors="coerce", **self.kwargs
)
processed.append(data_frame)
return processed
| 33.320513 | 94 | 0.58561 | import pandas as pd
from tasrif.processing_pipeline import PandasOperator
from tasrif.processing_pipeline.validators import InputsAreDataFramesValidatorMixin
class ConvertToDatetimeOperator(InputsAreDataFramesValidatorMixin, PandasOperator):
def __init__(self, feature_names, **kwargs):
self.feature_names = feature_names
super().__init__(kwargs)
self.kwargs = kwargs
def _process(self, *data_frames):
columns = (
self.feature_names.copy()
if isinstance(self.feature_names, list)
else [self.feature_names]
)
processed = []
for data_frame in data_frames:
for col in columns:
data_frame[col] = pd.to_datetime(
data_frame[col], errors="coerce", **self.kwargs
)
processed.append(data_frame)
return processed
| true | true |
1c4772d8628f28ac08f50f8f4e940c76e95bac8c | 2,757 | py | Python | deploy/env/local/lib/python2.7/site-packages/mercurial-3.1-py2.7-linux-x86_64.egg/mercurial/filelog.py | wangvictor2012/liuwei | 0a06f8fd56d78162f81f1e7e7def7bfdeb4472e1 | [
"BSD-3-Clause"
] | 3 | 2015-11-05T07:42:43.000Z | 2017-05-29T22:59:47.000Z | vendor/lib/python2.7/site-packages/mercurial/filelog.py | ddollar/gobuild | c1b0e52ab6849a13a95a3fdae4913b925f658272 | [
"MIT"
] | null | null | null | vendor/lib/python2.7/site-packages/mercurial/filelog.py | ddollar/gobuild | c1b0e52ab6849a13a95a3fdae4913b925f658272 | [
"MIT"
] | null | null | null | # filelog.py - file history class for mercurial
#
# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
import revlog
import re
_mdre = re.compile('\1\n')
def _parsemeta(text):
"""return (metadatadict, keylist, metadatasize)"""
# text can be buffer, so we can't use .startswith or .index
if text[:2] != '\1\n':
return None, None, None
s = _mdre.search(text, 2).start()
mtext = text[2:s]
meta = {}
keys = []
for l in mtext.splitlines():
k, v = l.split(": ", 1)
meta[k] = v
keys.append(k)
return meta, keys, (s + 2)
def _packmeta(meta, keys=None):
if not keys:
keys = sorted(meta.iterkeys())
return "".join("%s: %s\n" % (k, meta[k]) for k in keys)
class filelog(revlog.revlog):
def __init__(self, opener, path):
super(filelog, self).__init__(opener,
"/".join(("data", path + ".i")))
def read(self, node):
t = self.revision(node)
if not t.startswith('\1\n'):
return t
s = t.index('\1\n', 2)
return t[s + 2:]
def add(self, text, meta, transaction, link, p1=None, p2=None):
if meta or text.startswith('\1\n'):
text = "\1\n%s\1\n%s" % (_packmeta(meta), text)
return self.addrevision(text, transaction, link, p1, p2)
def renamed(self, node):
if self.parents(node)[0] != revlog.nullid:
return False
t = self.revision(node)
m = _parsemeta(t)[0]
if m and "copy" in m:
return (m["copy"], revlog.bin(m["copyrev"]))
return False
def size(self, rev):
"""return the size of a given revision"""
# for revisions with renames, we have to go the slow way
node = self.node(rev)
if self.renamed(node):
return len(self.read(node))
# XXX if self.read(node).startswith("\1\n"), this returns (size+4)
return super(filelog, self).size(rev)
def cmp(self, node, text):
"""compare text with a given file revision
returns True if text is different than what is stored.
"""
t = text
if text.startswith('\1\n'):
t = '\1\n\1\n' + text
samehashes = not super(filelog, self).cmp(node, t)
if samehashes:
return False
# renaming a file produces a different hash, even if the data
# remains unchanged. Check if it's the case (slow):
if self.renamed(node):
t2 = self.read(node)
return t2 != text
return True
def _file(self, f):
return filelog(self.opener, f)
| 29.645161 | 74 | 0.564744 |
import revlog
import re
_mdre = re.compile('\1\n')
def _parsemeta(text):
if text[:2] != '\1\n':
return None, None, None
s = _mdre.search(text, 2).start()
mtext = text[2:s]
meta = {}
keys = []
for l in mtext.splitlines():
k, v = l.split(": ", 1)
meta[k] = v
keys.append(k)
return meta, keys, (s + 2)
def _packmeta(meta, keys=None):
if not keys:
keys = sorted(meta.iterkeys())
return "".join("%s: %s\n" % (k, meta[k]) for k in keys)
class filelog(revlog.revlog):
def __init__(self, opener, path):
super(filelog, self).__init__(opener,
"/".join(("data", path + ".i")))
def read(self, node):
t = self.revision(node)
if not t.startswith('\1\n'):
return t
s = t.index('\1\n', 2)
return t[s + 2:]
def add(self, text, meta, transaction, link, p1=None, p2=None):
if meta or text.startswith('\1\n'):
text = "\1\n%s\1\n%s" % (_packmeta(meta), text)
return self.addrevision(text, transaction, link, p1, p2)
def renamed(self, node):
if self.parents(node)[0] != revlog.nullid:
return False
t = self.revision(node)
m = _parsemeta(t)[0]
if m and "copy" in m:
return (m["copy"], revlog.bin(m["copyrev"]))
return False
def size(self, rev):
# for revisions with renames, we have to go the slow way
node = self.node(rev)
if self.renamed(node):
return len(self.read(node))
# XXX if self.read(node).startswith("\1\n"), this returns (size+4)
return super(filelog, self).size(rev)
def cmp(self, node, text):
t = text
if text.startswith('\1\n'):
t = '\1\n\1\n' + text
samehashes = not super(filelog, self).cmp(node, t)
if samehashes:
return False
# renaming a file produces a different hash, even if the data
# remains unchanged. Check if it's the case (slow):
if self.renamed(node):
t2 = self.read(node)
return t2 != text
return True
def _file(self, f):
return filelog(self.opener, f)
| true | true |
1c47737253ed550c0b8f08ac8b7f413886c1457e | 14,684 | py | Python | train.py | solmn/parallel_wavenet | 45e9eceb7a2d1982b3d45823332575eb26f333c0 | [
"MIT"
] | 3 | 2018-10-30T13:45:14.000Z | 2020-03-29T06:56:10.000Z | train.py | solmn/parallel_wavenet | 45e9eceb7a2d1982b3d45823332575eb26f333c0 | [
"MIT"
] | null | null | null | train.py | solmn/parallel_wavenet | 45e9eceb7a2d1982b3d45823332575eb26f333c0 | [
"MIT"
] | null | null | null | """Training script for the WaveNet network on the VCTK corpus.
This script trains a network with the WaveNet using data from the VCTK corpus,
which can be freely downloaded at the following site (~10 GB):
http://homepages.inf.ed.ac.uk/jyamagis/page3/page58/page58.html
"""
from __future__ import print_function
import argparse
from datetime import datetime
import json
import os
import sys
import time
import tensorflow as tf
from tensorflow.python.client import timeline
from wavenet import WaveNetModel, AudioReader, optimizer_factory
BATCH_SIZE = 1
DATA_DIRECTORY = './dataset/LJSpeech/wavs/'
LOGDIR_ROOT = './logdir'
CHECKPOINT_EVERY = 100
NUM_STEPS = int(1e6)
LEARNING_RATE = 2 *1e-5
WAVENET_PARAMS = './wavenet_params.json'
STARTED_DATESTRING = "{0:%Y-%m-%dT%H-%M-%S}".format(datetime.now())
SAMPLE_SIZE = 8000
L2_REGULARIZATION_STRENGTH = 0
SILENCE_THRESHOLD = 0.1
EPSILON = 1e-8
MOMENTUM = 0.9
MAX_TO_KEEP = 5
METADATA = False
def get_arguments():
def _str_to_bool(s):
"""Convert string to bool (in argparse context)."""
if s.lower() not in ['true', 'false']:
raise ValueError('Argument needs to be a '
'boolean, got {}'.format(s))
return {'true': True, 'false': False}[s.lower()]
parser = argparse.ArgumentParser(description='WaveNet example network')
parser.add_argument('--batch_size', type=int, default=BATCH_SIZE,
help='How many wav files to process at once. Default: ' + str(BATCH_SIZE) + '.')
parser.add_argument('--data_dir', type=str, default=DATA_DIRECTORY,
help='The directory containing the VCTK corpus.')
parser.add_argument('--store_metadata', type=bool, default=METADATA,
help='Whether to store advanced debugging information '
'(execution time, memory consumption) for use with '
'TensorBoard. Default: ' + str(METADATA) + '.')
parser.add_argument('--logdir', type=str, default=None,
help='Directory in which to store the logging '
'information for TensorBoard. '
'If the model already exists, it will restore '
'the state and will continue training. '
'Cannot use with --logdir_root and --restore_from.')
parser.add_argument('--logdir_root', type=str, default=None,
help='Root directory to place the logging '
'output and generated model. These are stored '
'under the dated subdirectory of --logdir_root. '
'Cannot use with --logdir.')
parser.add_argument('--restore_from', type=str, default=None,
help='Directory in which to restore the model from. '
'This creates the new model under the dated directory '
'in --logdir_root. '
'Cannot use with --logdir.')
parser.add_argument('--checkpoint_every', type=int,
default=CHECKPOINT_EVERY,
help='How many steps to save each checkpoint after. Default: ' + str(CHECKPOINT_EVERY) + '.')
parser.add_argument('--num_steps', type=int, default=NUM_STEPS,
help='Number of training steps. Default: ' + str(NUM_STEPS) + '.')
parser.add_argument('--learning_rate', type=float, default=LEARNING_RATE,
help='Learning rate for training. Default: ' + str(LEARNING_RATE) + '.')
parser.add_argument('--wavenet_params', type=str, default=WAVENET_PARAMS,
help='JSON file with the network parameters. Default: ' + WAVENET_PARAMS + '.')
parser.add_argument('--sample_size', type=int, default=SAMPLE_SIZE,
help='Concatenate and cut audio samples to this many '
'samples. Default: ' + str(SAMPLE_SIZE) + '.')
parser.add_argument('--l2_regularization_strength', type=float,
default=L2_REGULARIZATION_STRENGTH,
help='Coefficient in the L2 regularization. '
'Default: False')
parser.add_argument('--silence_threshold', type=float,
default=SILENCE_THRESHOLD,
help='Volume threshold below which to trim the start '
'and the end from the training set samples. Default: ' + str(SILENCE_THRESHOLD) + '.')
parser.add_argument('--optimizer', type=str, default='adam',
choices=optimizer_factory.keys(),
help='Select the optimizer specified by this option. Default: adam.')
parser.add_argument('--momentum', type=float,
default=MOMENTUM, help='Specify the momentum to be '
'used by sgd or rmsprop optimizer. Ignored by the '
'adam optimizer. Default: ' + str(MOMENTUM) + '.')
parser.add_argument('--histograms', type=_str_to_bool, default=False,
help='Whether to store histogram summaries. Default: False')
parser.add_argument('--gc_channels', type=int, default=None,
help='Number of global condition channels. Default: None. Expecting: Int')
parser.add_argument('--lc_channels', type=int, default=None,
help='Number of local condition channels. Default: None. Expecting: Int')
parser.add_argument('--max_checkpoints', type=int, default=MAX_TO_KEEP,
help='Maximum amount of checkpoints that will be kept alive. Default: '
+ str(MAX_TO_KEEP) + '.')
return parser.parse_args()
def save(saver, sess, logdir, step):
model_name = 'model.ckpt'
checkpoint_path = os.path.join(logdir, model_name)
print('Storing checkpoint to {} ...'.format(logdir), end="")
sys.stdout.flush()
if not os.path.exists(logdir):
os.makedirs(logdir)
saver.save(sess, checkpoint_path, global_step=step)
print(' Done.')
def load(saver, sess, logdir):
# logdir = "logdir/train/2018-09-07T19-20-47/"
print("Trying to restore saved checkpoints from {} ...".format(logdir),
end="")
ckpt = tf.train.get_checkpoint_state(logdir)
if ckpt:
print(" Checkpoint found: {}".format(ckpt.model_checkpoint_path))
global_step = int(ckpt.model_checkpoint_path
.split('/')[-1]
.split('-')[-1])
print(" Global step was: {}".format(global_step))
print(" Restoring...", end="")
saver.restore(sess, ckpt.model_checkpoint_path)
print(" Done.")
return global_step
else:
print(" No checkpoint found.")
return None
def get_default_logdir(logdir_root):
logdir = os.path.join(logdir_root, 'train', STARTED_DATESTRING)
return logdir
def validate_directories(args):
"""Validate and arrange directory related arguments."""
# Validation
if args.logdir and args.logdir_root:
raise ValueError("--logdir and --logdir_root cannot be "
"specified at the same time.")
if args.logdir and args.restore_from:
raise ValueError(
"--logdir and --restore_from cannot be specified at the same "
"time. This is to keep your previous model from unexpected "
"overwrites.\n"
"Use --logdir_root to specify the root of the directory which "
"will be automatically created with current date and time, or use "
"only --logdir to just continue the training from the last "
"checkpoint.")
# Arrangement
logdir_root = args.logdir_root
if logdir_root is None:
logdir_root = LOGDIR_ROOT
logdir = args.logdir
if logdir is None:
logdir = get_default_logdir(logdir_root)
print('Using default logdir: {}'.format(logdir))
restore_from = args.restore_from
if restore_from is None:
# args.logdir and args.restore_from are exclusive,
# so it is guaranteed the logdir here is newly created.
restore_from = logdir
return {
'logdir': logdir,
'logdir_root': args.logdir_root,
'restore_from': restore_from
}
def main():
args = get_arguments()
try:
directories = validate_directories(args)
except ValueError as e:
print("Some arguments are wrong:")
print(str(e))
return
logdir = directories['logdir']
restore_from = directories['restore_from']
# Even if we restored the model, we will treat it as new training
# if the trained model is written into an arbitrary location.
is_overwritten_training = logdir != restore_from
with open(args.wavenet_params, 'r') as f:
wavenet_params = json.load(f)
# Create coordinator.
coord = tf.train.Coordinator()
# Load raw waveform from VCTK corpus.
with tf.name_scope('create_inputs'):
# Allow silence trimming to be skipped by specifying a threshold near
# zero.
silence_threshold = args.silence_threshold if args.silence_threshold > \
EPSILON else None
gc_enabled = args.gc_channels is not None
reader = AudioReader(
args.data_dir,
coord,
sample_rate=wavenet_params['sample_rate'],
gc_enabled=gc_enabled,
receptive_field=WaveNetModel.calculate_receptive_field(wavenet_params["filter_width"],
wavenet_params["dilations"],
wavenet_params["scalar_input"],
wavenet_params["initial_filter_width"]),
sample_size=args.sample_size,
silence_threshold=silence_threshold)
audio_batch = reader.dequeue(args.batch_size)
if gc_enabled:
gc_id_batch = reader.dequeue_gc(args.batch_size)
else:
gc_id_batch = None
# Create network.
net = WaveNetModel(
batch_size=args.batch_size,
dilations=wavenet_params["dilations"],
filter_width=wavenet_params["filter_width"],
residual_channels=wavenet_params["residual_channels"],
dilation_channels=wavenet_params["dilation_channels"],
skip_channels=wavenet_params["skip_channels"],
quantization_channels=wavenet_params["quantization_channels"],
output_channels = wavenet_params["output_channels"],
log_scale_min = wavenet_params["log_scale_min"],
use_biases=wavenet_params["use_biases"],
scalar_input=wavenet_params["scalar_input"],
initial_filter_width=wavenet_params["initial_filter_width"],
histograms=args.histograms,
local_condition_channels = args.lc_channels,
global_condition_channels=args.gc_channels,
global_condition_cardinality=reader.gc_category_cardinality)
if args.l2_regularization_strength == 0:
args.l2_regularization_strength = None
loss = net.loss(input_batch=audio_batch,
global_condition_batch=gc_id_batch,
l2_regularization_strength=args.l2_regularization_strength)
optimizer = optimizer_factory[args.optimizer](
learning_rate=args.learning_rate,
momentum=args.momentum)
trainable = tf.trainable_variables()
optim = optimizer.minimize(loss, var_list=trainable)
# Set up logging for TensorBoard.
writer = tf.summary.FileWriter(logdir)
writer.add_graph(tf.get_default_graph())
run_metadata = tf.RunMetadata()
summaries = tf.summary.merge_all()
# Set up session
sess = tf.Session(config=tf.ConfigProto(log_device_placement=False))
init = tf.global_variables_initializer()
sess.run(init)
# Saver for storing checkpoints of the model.
saver = tf.train.Saver(var_list=tf.trainable_variables(), max_to_keep=args.max_checkpoints)
try:
saved_global_step = load(saver, sess, restore_from)
if is_overwritten_training or saved_global_step is None:
# The first training step will be saved_global_step + 1,
# therefore we put -1 here for new or overwritten trainings.
saved_global_step = -1
except:
print("Something went wrong while restoring checkpoint. "
"We will terminate training to avoid accidentally overwriting "
"the previous model.")
raise
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
reader.start_threads(sess)
step = None
last_saved_step = saved_global_step
try:
for step in range(saved_global_step + 1, args.num_steps):
start_time = time.time()
if args.store_metadata and step % 50 == 0:
# Slow run that stores extra information for debugging.
print('Storing metadata')
run_options = tf.RunOptions(
trace_level=tf.RunOptions.FULL_TRACE)
summary, loss_value, _ = sess.run(
[summaries, loss, optim],
options=run_options,
run_metadata=run_metadata)
writer.add_summary(summary, step)
writer.add_run_metadata(run_metadata,
'step_{:04d}'.format(step))
tl = timeline.Timeline(run_metadata.step_stats)
timeline_path = os.path.join(logdir, 'timeline.trace')
with open(timeline_path, 'w') as f:
f.write(tl.generate_chrome_trace_format(show_memory=True))
else:
summary, loss_value, _ = sess.run([summaries, loss, optim])
writer.add_summary(summary, step)
duration = time.time() - start_time
print('step {:d} - loss = {:.3f}, ({:.3f} sec/step)'
.format(step, loss_value, duration))
if step % args.checkpoint_every == 0:
save(saver, sess, logdir, step)
last_saved_step = step
except KeyboardInterrupt:
# Introduce a line break after ^C is displayed so save message
# is on its own line.
print()
finally:
if step > last_saved_step:
save(saver, sess, logdir, step)
coord.request_stop()
coord.join(threads)
if __name__ == '__main__':
main() | 42.686047 | 117 | 0.611959 |
from __future__ import print_function
import argparse
from datetime import datetime
import json
import os
import sys
import time
import tensorflow as tf
from tensorflow.python.client import timeline
from wavenet import WaveNetModel, AudioReader, optimizer_factory
BATCH_SIZE = 1
DATA_DIRECTORY = './dataset/LJSpeech/wavs/'
LOGDIR_ROOT = './logdir'
CHECKPOINT_EVERY = 100
NUM_STEPS = int(1e6)
LEARNING_RATE = 2 *1e-5
WAVENET_PARAMS = './wavenet_params.json'
STARTED_DATESTRING = "{0:%Y-%m-%dT%H-%M-%S}".format(datetime.now())
SAMPLE_SIZE = 8000
L2_REGULARIZATION_STRENGTH = 0
SILENCE_THRESHOLD = 0.1
EPSILON = 1e-8
MOMENTUM = 0.9
MAX_TO_KEEP = 5
METADATA = False
def get_arguments():
def _str_to_bool(s):
if s.lower() not in ['true', 'false']:
raise ValueError('Argument needs to be a '
'boolean, got {}'.format(s))
return {'true': True, 'false': False}[s.lower()]
parser = argparse.ArgumentParser(description='WaveNet example network')
parser.add_argument('--batch_size', type=int, default=BATCH_SIZE,
help='How many wav files to process at once. Default: ' + str(BATCH_SIZE) + '.')
parser.add_argument('--data_dir', type=str, default=DATA_DIRECTORY,
help='The directory containing the VCTK corpus.')
parser.add_argument('--store_metadata', type=bool, default=METADATA,
help='Whether to store advanced debugging information '
'(execution time, memory consumption) for use with '
'TensorBoard. Default: ' + str(METADATA) + '.')
parser.add_argument('--logdir', type=str, default=None,
help='Directory in which to store the logging '
'information for TensorBoard. '
'If the model already exists, it will restore '
'the state and will continue training. '
'Cannot use with --logdir_root and --restore_from.')
parser.add_argument('--logdir_root', type=str, default=None,
help='Root directory to place the logging '
'output and generated model. These are stored '
'under the dated subdirectory of --logdir_root. '
'Cannot use with --logdir.')
parser.add_argument('--restore_from', type=str, default=None,
help='Directory in which to restore the model from. '
'This creates the new model under the dated directory '
'in --logdir_root. '
'Cannot use with --logdir.')
parser.add_argument('--checkpoint_every', type=int,
default=CHECKPOINT_EVERY,
help='How many steps to save each checkpoint after. Default: ' + str(CHECKPOINT_EVERY) + '.')
parser.add_argument('--num_steps', type=int, default=NUM_STEPS,
help='Number of training steps. Default: ' + str(NUM_STEPS) + '.')
parser.add_argument('--learning_rate', type=float, default=LEARNING_RATE,
help='Learning rate for training. Default: ' + str(LEARNING_RATE) + '.')
parser.add_argument('--wavenet_params', type=str, default=WAVENET_PARAMS,
help='JSON file with the network parameters. Default: ' + WAVENET_PARAMS + '.')
parser.add_argument('--sample_size', type=int, default=SAMPLE_SIZE,
help='Concatenate and cut audio samples to this many '
'samples. Default: ' + str(SAMPLE_SIZE) + '.')
parser.add_argument('--l2_regularization_strength', type=float,
default=L2_REGULARIZATION_STRENGTH,
help='Coefficient in the L2 regularization. '
'Default: False')
parser.add_argument('--silence_threshold', type=float,
default=SILENCE_THRESHOLD,
help='Volume threshold below which to trim the start '
'and the end from the training set samples. Default: ' + str(SILENCE_THRESHOLD) + '.')
parser.add_argument('--optimizer', type=str, default='adam',
choices=optimizer_factory.keys(),
help='Select the optimizer specified by this option. Default: adam.')
parser.add_argument('--momentum', type=float,
default=MOMENTUM, help='Specify the momentum to be '
'used by sgd or rmsprop optimizer. Ignored by the '
'adam optimizer. Default: ' + str(MOMENTUM) + '.')
parser.add_argument('--histograms', type=_str_to_bool, default=False,
help='Whether to store histogram summaries. Default: False')
parser.add_argument('--gc_channels', type=int, default=None,
help='Number of global condition channels. Default: None. Expecting: Int')
parser.add_argument('--lc_channels', type=int, default=None,
help='Number of local condition channels. Default: None. Expecting: Int')
parser.add_argument('--max_checkpoints', type=int, default=MAX_TO_KEEP,
help='Maximum amount of checkpoints that will be kept alive. Default: '
+ str(MAX_TO_KEEP) + '.')
return parser.parse_args()
def save(saver, sess, logdir, step):
model_name = 'model.ckpt'
checkpoint_path = os.path.join(logdir, model_name)
print('Storing checkpoint to {} ...'.format(logdir), end="")
sys.stdout.flush()
if not os.path.exists(logdir):
os.makedirs(logdir)
saver.save(sess, checkpoint_path, global_step=step)
print(' Done.')
def load(saver, sess, logdir):
print("Trying to restore saved checkpoints from {} ...".format(logdir),
end="")
ckpt = tf.train.get_checkpoint_state(logdir)
if ckpt:
print(" Checkpoint found: {}".format(ckpt.model_checkpoint_path))
global_step = int(ckpt.model_checkpoint_path
.split('/')[-1]
.split('-')[-1])
print(" Global step was: {}".format(global_step))
print(" Restoring...", end="")
saver.restore(sess, ckpt.model_checkpoint_path)
print(" Done.")
return global_step
else:
print(" No checkpoint found.")
return None
def get_default_logdir(logdir_root):
logdir = os.path.join(logdir_root, 'train', STARTED_DATESTRING)
return logdir
def validate_directories(args):
if args.logdir and args.logdir_root:
raise ValueError("--logdir and --logdir_root cannot be "
"specified at the same time.")
if args.logdir and args.restore_from:
raise ValueError(
"--logdir and --restore_from cannot be specified at the same "
"time. This is to keep your previous model from unexpected "
"overwrites.\n"
"Use --logdir_root to specify the root of the directory which "
"will be automatically created with current date and time, or use "
"only --logdir to just continue the training from the last "
"checkpoint.")
logdir_root = args.logdir_root
if logdir_root is None:
logdir_root = LOGDIR_ROOT
logdir = args.logdir
if logdir is None:
logdir = get_default_logdir(logdir_root)
print('Using default logdir: {}'.format(logdir))
restore_from = args.restore_from
if restore_from is None:
restore_from = logdir
return {
'logdir': logdir,
'logdir_root': args.logdir_root,
'restore_from': restore_from
}
def main():
args = get_arguments()
try:
directories = validate_directories(args)
except ValueError as e:
print("Some arguments are wrong:")
print(str(e))
return
logdir = directories['logdir']
restore_from = directories['restore_from']
is_overwritten_training = logdir != restore_from
with open(args.wavenet_params, 'r') as f:
wavenet_params = json.load(f)
coord = tf.train.Coordinator()
with tf.name_scope('create_inputs'):
silence_threshold = args.silence_threshold if args.silence_threshold > \
EPSILON else None
gc_enabled = args.gc_channels is not None
reader = AudioReader(
args.data_dir,
coord,
sample_rate=wavenet_params['sample_rate'],
gc_enabled=gc_enabled,
receptive_field=WaveNetModel.calculate_receptive_field(wavenet_params["filter_width"],
wavenet_params["dilations"],
wavenet_params["scalar_input"],
wavenet_params["initial_filter_width"]),
sample_size=args.sample_size,
silence_threshold=silence_threshold)
audio_batch = reader.dequeue(args.batch_size)
if gc_enabled:
gc_id_batch = reader.dequeue_gc(args.batch_size)
else:
gc_id_batch = None
net = WaveNetModel(
batch_size=args.batch_size,
dilations=wavenet_params["dilations"],
filter_width=wavenet_params["filter_width"],
residual_channels=wavenet_params["residual_channels"],
dilation_channels=wavenet_params["dilation_channels"],
skip_channels=wavenet_params["skip_channels"],
quantization_channels=wavenet_params["quantization_channels"],
output_channels = wavenet_params["output_channels"],
log_scale_min = wavenet_params["log_scale_min"],
use_biases=wavenet_params["use_biases"],
scalar_input=wavenet_params["scalar_input"],
initial_filter_width=wavenet_params["initial_filter_width"],
histograms=args.histograms,
local_condition_channels = args.lc_channels,
global_condition_channels=args.gc_channels,
global_condition_cardinality=reader.gc_category_cardinality)
if args.l2_regularization_strength == 0:
args.l2_regularization_strength = None
loss = net.loss(input_batch=audio_batch,
global_condition_batch=gc_id_batch,
l2_regularization_strength=args.l2_regularization_strength)
optimizer = optimizer_factory[args.optimizer](
learning_rate=args.learning_rate,
momentum=args.momentum)
trainable = tf.trainable_variables()
optim = optimizer.minimize(loss, var_list=trainable)
writer = tf.summary.FileWriter(logdir)
writer.add_graph(tf.get_default_graph())
run_metadata = tf.RunMetadata()
summaries = tf.summary.merge_all()
sess = tf.Session(config=tf.ConfigProto(log_device_placement=False))
init = tf.global_variables_initializer()
sess.run(init)
saver = tf.train.Saver(var_list=tf.trainable_variables(), max_to_keep=args.max_checkpoints)
try:
saved_global_step = load(saver, sess, restore_from)
if is_overwritten_training or saved_global_step is None:
saved_global_step = -1
except:
print("Something went wrong while restoring checkpoint. "
"We will terminate training to avoid accidentally overwriting "
"the previous model.")
raise
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
reader.start_threads(sess)
step = None
last_saved_step = saved_global_step
try:
for step in range(saved_global_step + 1, args.num_steps):
start_time = time.time()
if args.store_metadata and step % 50 == 0:
print('Storing metadata')
run_options = tf.RunOptions(
trace_level=tf.RunOptions.FULL_TRACE)
summary, loss_value, _ = sess.run(
[summaries, loss, optim],
options=run_options,
run_metadata=run_metadata)
writer.add_summary(summary, step)
writer.add_run_metadata(run_metadata,
'step_{:04d}'.format(step))
tl = timeline.Timeline(run_metadata.step_stats)
timeline_path = os.path.join(logdir, 'timeline.trace')
with open(timeline_path, 'w') as f:
f.write(tl.generate_chrome_trace_format(show_memory=True))
else:
summary, loss_value, _ = sess.run([summaries, loss, optim])
writer.add_summary(summary, step)
duration = time.time() - start_time
print('step {:d} - loss = {:.3f}, ({:.3f} sec/step)'
.format(step, loss_value, duration))
if step % args.checkpoint_every == 0:
save(saver, sess, logdir, step)
last_saved_step = step
except KeyboardInterrupt:
print()
finally:
if step > last_saved_step:
save(saver, sess, logdir, step)
coord.request_stop()
coord.join(threads)
if __name__ == '__main__':
main() | true | true |
1c4773afb9dfe031efe91c301916c555e9dcc6a3 | 9,570 | py | Python | src/HYPERPLUME/hyperplume.py | Pabsm94/Easyplume | ee54194c1c0930b2a0ef442c47f80bd4570913d2 | [
"MIT"
] | null | null | null | src/HYPERPLUME/hyperplume.py | Pabsm94/Easyplume | ee54194c1c0930b2a0ef442c47f80bd4570913d2 | [
"MIT"
] | null | null | null | src/HYPERPLUME/hyperplume.py | Pabsm94/Easyplume | ee54194c1c0930b2a0ef442c47f80bd4570913d2 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Fri Apr 22 14:07:39 2016
@author: pablo
"""
import numpy as np
import abc
import matplotlib.pyplot as plt
class Hyperplume():
""" Parent class Hyperplume loads target plasma and defines common attributes as well as
shared methods in the AEM and SSM plume classes"""
__metaclass__= abc.ABCMeta # Python decorator used to define abstract methods at any location in the class
@abc.abstractclassmethod # Defining abstract method
def solver(self):
"""Solver Abstract Method to be particularised by each Plume code. It is only defined for
structure purposes in parent class Hyperplume"""
return
@abc.abstractclassmethod
def query(self,z,r):
"""Query abstract method returns plasma profile data at specified grid points. query method is
to be particularised by each plume code.It is only defined forstructure purposes
in parent class Hyperplume"""
return
def __init__(self,plasma={'Electrons': {'Gamma': 1,'T_0_electron': 2.1801714e-19,'q_electron': -1.6e-19},'Ions': {'mass_ion': 2.1801714e-25, 'q_ion': 1.6e-19}},z_span=np.linspace(0,100,500),r_span=np.linspace(0,40,500),n_init=0.0472*np.linspace(1,0,500)**2):
""" plume_constructor loads common class properties for AEM and SSM plume classes
Args:
plasma (dict): simple_plasma object dictionary containing basic plasma parameters.
z_span (numpy.ndarray): axial region where the problem will be integrated.
r_span (numpy.ndarray): initial far-field plasma radial profile.
n_init (numpy.ndarray): initial dimensional density front.
Usage:
>>> Plasma = {'Electrons': {'Gamma': 1,'T_0_electron': 2.1801714e-19,'q_electron': -1.6e-19},'Ions': {'mass_ion': 2.1801714e-25, 'q_ion': 1.6e-19}}
>>> z_span = np.linspace(0,100,100)
>>> r0 = np.linspace(0,3,100)
>>> n0 = np.exp(-6.15/2*r_span**2)
>>> Plume = Hyperplume(Plasma,z_span,r0,n0)
"""
self.plasma = plasma
self.Gamma = plasma['Electrons']['Gamma']
self.T_0 = plasma['Electrons']['T_0_electron']
self.m_ion = plasma['Ions']['mass_ion']
self.q_ion = plasma['Ions']['q_ion']
self.z_span = z_span
self.eta = r_span
self.n0 = n_init
def simple_plasma(self,charge=1.6e-19,ion_mass=2.1801714e-25,init_plasma_temp=2.1801714e-19,Gamma=1):
""" Method simple_plasma allows the user to quickly create a Plasma dictionary with two particle species (ions and electrons),
and well defined attributes.
Args:
charge (float): Electron charge given dimensional in units [C]
ion_mass(float): Ion mass given in dimensional units [Kg]
init_plasma_temp(float): Initial plasma temperature given in dimensional units [J]
Gamma(int or float): Dimensionless thermal expansion constant. Must be inside isothermal and polytropic boundaries [1,5/3]
Returns:
plasma (dict): Dictionary containing two simple plasma species (ions and electrons) with the before mentioned
properties stored in favorable form
Usage:
>>> Plasma = Hyperplume().simple_plasma(charge=1.6e-19,ion_mass=2.1801714e-25,init_plasma_temp=2.1801714e-19,Gamma=1)
"""
if Gamma < 1 or Gamma > 2: #checking thermal expansion model
print ('Gamma is outside isothermal or polytropic boundaries')
else:
plasma={'Ions':{'mass_ion': ion_mass,'q_ion':charge}, 'Electrons':{'q_electron': -charge,'T_0_electron':init_plasma_temp,'Gamma':Gamma} }
return plasma
def temp(self,n,n_0,T_0,Gamma):
""" Method temp calculates plasma temperature (T) as function of plasma density (n)
Args:
n(int or np.ndarray): plasma density at specific (z,r) location in the plume grid
n_0 (int):Iinitial density of plasma
T_0 (float): Initial temperature of plasma
Gamma (int): Dimensionless thermal expansion constant
Returns:
T (float or np.ndarray): Temperature of plasma at targeted (z,r) grid points in plume
Usage:
>>> T = Hyperplume().temp(n=0.65,n_0=1,T_0=2.1801714e-19,Gamma=1)
"""
if Gamma == 1: #Checking expansion model
T = T_0*(n*0 + 1)
else:
T = T_0*((n/n_0)**(Gamma-1))
return T
def phi (self,n,n_0,T_0,Gamma,e_charge):
"""Method phi calculates electric potential (\phi) as function of plasma density (n)
Args:
n(int or np.ndarray): plasma density at specific (z,r) location in the plume grid
n_0 (int):Iinitial density of plasma
T_0 (float): Initial temperature of plasma
Gamma (int): Dimensionless thermal expansion constant
e_charge (float):Electron charge
Returns:
phi(float or np.ndarray): Electric potential of plasma at (z,r) targeted grid point
Usage:
>>> phi = Hyperplume().phi(n=0.65,n_0=1,T_0=2.1801714e-19,Gamma=1,e_charge=-1.6e-19)
"""
if Gamma == 1: #Checking expansion model
phi = (T_0/e_charge)*np.log(n/n_0)
else :
phi = (T_0/e_charge)*(Gamma / ((Gamma - 1)) * ((n/n_0)**(Gamma-1)-1))
return phi
def n(self,n_0,T_0,phi,Gamma,e_charge):
"""Method n calculates plasma density (n) as function of plasma potential (\phi)
Args:
n_0 (int):Iinitial density of plasma
T_0 (float): Initial temperature of plasma
Gamma (int): Dimensionless thermal expansion constant
e_charge (float):Electron charge
Returns:
n (float or numpy.ndarray): Pasma density at (z,r) targeted grid point in the plume.
Usage:
n = Hyperplume.n(n_0=1,T_0=2.1801714e-19,phi=-5.7,Gamma=1,e_charge=-1.6e-19)
"""
if Gamma == 1: #Checking expansion model
n = n_0*np.exp(phi*e_charge/T_0)
else:
n = n_0*(((Gamma-1)/Gamma*phi*e_charge/T_0 + 1 )**1/(Gamma-1))
return n
def eta_deriver(self,x,y):
"""Method eta_derivar calculates the numerical derivatives of the variables along eta, with a
Args:
x (np.ndarray): represents the derivative step (dx,dy)
y (np.ndarray): vector to derive with respect to x
Returns:
y_prime(np.ndarray): derivaive of y over x stored in array format
Usage:
>>> x = np.array([0,0.5,1,1.2,2,2.3,2.6])
>>> y = np.array([10,17,23,27,36,40,45])
>>> dydx = Hyperplume.eta_deriver(x,y)
"""
dx = np.gradient(x)
y_prime = np.gradient(y,dx)
return y_prime
def plot(self,z=np.array([15,20,25,30]),r=np.array([20,25,30,35]),var_name='n',contour_levels=[0,1,2,3,4,5,6,7,8]):
""" Hyperplume Class method to plot the contours of important plasma variables along the specified (z,r) plume grid points
Args:
z (int,float, or np.ndarray): new interpolation axial region where plasma variabes are to be calculated and plotted. Must be inside z_grid limits
r (int,float, or np.ndarray): new interpolation axial region where plasma variabes are to be calculated and plotted. Must be inside z_grid limits
var_name (str): string containing the name of the variable to be visualized. Options are:
'lnn': logarithm of plasma density
'u_z': axial plume velocity
'u_r':radial plume velocity
'T': plasmaTemperature
'phi': ambipolar electric field
'eta': ion stream lines
contour_levels (array or of list): contour lables of plasma varialbled at the targets z,r points.
Returns:
None
Usage:
>>> Plasma = Hyperplume().SIMPLE_plasma()
>>> Plume = AEM()
"""
lnn,u_z,u_r,T,phi,error,eta = self.query(z,r) #Retrievibg plasma variables at z,r gid points
fig = plt.figure()
CE = plt.contour(z,r,eval(var_name),contour_levels)
plt.title(var_name)
plt.xlabel(r'$\ z/R_0 $')
plt.ylabel(r'$\ r/R_0 $')
plt.ylim(0,10)
plt.clabel(CE,CE.levels,fontsize=6)
plt.savefig(var_name + '.pdf',bbox_inches='tight')
fig.show()
| 34.301075 | 262 | 0.546604 |
import numpy as np
import abc
import matplotlib.pyplot as plt
class Hyperplume():
__metaclass__= abc.ABCMeta
@abc.abstractclassmethod
def solver(self):
return
@abc.abstractclassmethod
def query(self,z,r):
return
def __init__(self,plasma={'Electrons': {'Gamma': 1,'T_0_electron': 2.1801714e-19,'q_electron': -1.6e-19},'Ions': {'mass_ion': 2.1801714e-25, 'q_ion': 1.6e-19}},z_span=np.linspace(0,100,500),r_span=np.linspace(0,40,500),n_init=0.0472*np.linspace(1,0,500)**2):
self.plasma = plasma
self.Gamma = plasma['Electrons']['Gamma']
self.T_0 = plasma['Electrons']['T_0_electron']
self.m_ion = plasma['Ions']['mass_ion']
self.q_ion = plasma['Ions']['q_ion']
self.z_span = z_span
self.eta = r_span
self.n0 = n_init
def simple_plasma(self,charge=1.6e-19,ion_mass=2.1801714e-25,init_plasma_temp=2.1801714e-19,Gamma=1):
if Gamma < 1 or Gamma > 2:
print ('Gamma is outside isothermal or polytropic boundaries')
else:
plasma={'Ions':{'mass_ion': ion_mass,'q_ion':charge}, 'Electrons':{'q_electron': -charge,'T_0_electron':init_plasma_temp,'Gamma':Gamma} }
return plasma
def temp(self,n,n_0,T_0,Gamma):
if Gamma == 1:
T = T_0*(n*0 + 1)
else:
T = T_0*((n/n_0)**(Gamma-1))
return T
def phi (self,n,n_0,T_0,Gamma,e_charge):
if Gamma == 1:
phi = (T_0/e_charge)*np.log(n/n_0)
else :
phi = (T_0/e_charge)*(Gamma / ((Gamma - 1)) * ((n/n_0)**(Gamma-1)-1))
return phi
def n(self,n_0,T_0,phi,Gamma,e_charge):
if Gamma == 1:
n = n_0*np.exp(phi*e_charge/T_0)
else:
n = n_0*(((Gamma-1)/Gamma*phi*e_charge/T_0 + 1 )**1/(Gamma-1))
return n
def eta_deriver(self,x,y):
dx = np.gradient(x)
y_prime = np.gradient(y,dx)
return y_prime
def plot(self,z=np.array([15,20,25,30]),r=np.array([20,25,30,35]),var_name='n',contour_levels=[0,1,2,3,4,5,6,7,8]):
lnn,u_z,u_r,T,phi,error,eta = self.query(z,r)
fig = plt.figure()
CE = plt.contour(z,r,eval(var_name),contour_levels)
plt.title(var_name)
plt.xlabel(r'$\ z/R_0 $')
plt.ylabel(r'$\ r/R_0 $')
plt.ylim(0,10)
plt.clabel(CE,CE.levels,fontsize=6)
plt.savefig(var_name + '.pdf',bbox_inches='tight')
fig.show()
| true | true |
1c47745f1c0e2c39646a97885253608082c44006 | 46 | py | Python | __init__.py | lucaskjaero/WiktionaryParser | c60a7cb7e50ca929e02c8e6e258c23f4d4114c21 | [
"MIT"
] | 1 | 2021-08-24T17:51:41.000Z | 2021-08-24T17:51:41.000Z | __init__.py | lucaskjaero/WiktionaryParser | c60a7cb7e50ca929e02c8e6e258c23f4d4114c21 | [
"MIT"
] | null | null | null | __init__.py | lucaskjaero/WiktionaryParser | c60a7cb7e50ca929e02c8e6e258c23f4d4114c21 | [
"MIT"
] | 1 | 2020-12-14T16:22:31.000Z | 2020-12-14T16:22:31.000Z | from .wiktionaryparser import WiktionaryParser | 46 | 46 | 0.913043 | from .wiktionaryparser import WiktionaryParser | true | true |
1c477468c75e4642c2f29e87bfdbf22ef08e11fd | 4,043 | py | Python | models/definitions/flownet/inference.py | HaydenFaulkner/VidDet | 2dbc104a41bf1192a00ffde07695180eab18cea8 | [
"MIT"
] | 19 | 2019-08-05T12:20:17.000Z | 2020-10-29T11:33:50.000Z | models/definitions/flownet/inference.py | HaydenFaulkner/VideoYOLO | 2dbc104a41bf1192a00ffde07695180eab18cea8 | [
"MIT"
] | 2 | 2021-08-25T14:47:55.000Z | 2022-02-09T23:30:49.000Z | models/definitions/flownet/inference.py | HaydenFaulkner/VideoYOLO | 2dbc104a41bf1192a00ffde07695180eab18cea8 | [
"MIT"
] | 3 | 2020-03-02T14:52:18.000Z | 2020-06-05T07:51:18.000Z | import cv2
import mxnet as mx
import numpy as np
from scipy.misc import imresize
from tqdm import tqdm
from flownet import get_flownet
from utils import flow_to_image, crop, normalise
def process_two_images(model, imgs, ctx=None):
"""
Process two images into one flow image
Args:
model: The model to use
imgs: a list of 2 images
ctx: the model ctx
Returns:
"""
if len(imgs) != 2:
return None
if isinstance(imgs[0], str):
if os.path.exists(imgs[0]):
imgs[0] = cv2.cvtColor(cv2.imread(files[i]), cv2.COLOR_BGR2RGB)
else:
return None
if isinstance(imgs[1], str):
if os.path.exists(imgs[1]):
imgs[1] = cv2.cvtColor(cv2.imread(files[i]), cv2.COLOR_BGR2RGB)
else:
return None
imgs = crop(imgs)
imgs = np.array(imgs)
imgs = np.moveaxis(imgs, -1, 1)
imgs = normalise(imgs)
imgs = mx.nd.array(imgs, ctx=ctx)
imgs = mx.nd.expand_dims(imgs, 0) # add batch axis
flow = model(imgs) # run the model
flow = flow.asnumpy()
flow = flow.squeeze()
flow = flow.transpose(1, 2, 0)
img = flow_to_image(flow)
img = imresize(img, 4.0) # doing the bilinear interpolation on the img, NOT flow cause was too hard :'(
return img, flow
def process_imagedir(model, input_dir, output_dir=None, ctx=None):
"""
Process a directory of images
Args:
model:
input_dir:
output_dir:
ctx:
Returns:
"""
files = []
for ext in [".jpg", ".png", ".jpeg", ".JPG", ".PNG", ".JPEG"]:
files = glob.glob(input_dir + "/**/*" + ext, recursive=True)
if len(files) > 0:
break
if not len(files) > 0:
print("Couldn't find any files in {}".format(input_dir))
return None
files.sort()
for i in tqdm(range(len(files) - 1), desc='Calculating Flow'):
img, flow = process_two_images(model, files[i:i+2], ctx)
dir, file = os.path.split(files[i])
if output_dir is None:
output_dir = os.path.join(dir, 'flow')
os.makedirs(output_dir, exists_ok=True)
cv2.imwrite(os.path.join(output_dir, file), cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
return output_dir
def process_video(model, input_path, output_path=None, ctx=None):
"""
Process a video into a flow video
Args:
model:
input_path:
output_path:
ctx:
Returns:
"""
capture = cv2.VideoCapture(input_path)
frames = []
while_safety = 0
while len(frames) < 200:# int(capture.get(cv2.CAP_PROP_FRAME_COUNT))-1:
_, image = capture.read() # read an image from the capture
if while_safety > 500: # break the while if our safety maxs out at 500
break
if image is None:
while_safety += 1
continue
while_safety = 0 # reset the safety count
frames.append(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
capture.release()
if len(frames) < 2:
return None
if output_path is None:
output_path = input_path[:-4] + '_flow.mp4'
cropped_frames = crop(frames)
h, w, _= cropped_frames[0].shape
video = cv2.VideoWriter(output_path, cv2.VideoWriter_fourcc('m', 'p', '4', 'v'), 25, (w, h))
for i in tqdm(range(len(frames)-1), desc='Calculating Flow'):
mx.nd.waitall()
img, flow = process_two_images(model, frames[i:i+2], ctx)
video.write(img)
video.release() # release the video
return output_path
if __name__ == '__main__':
# just for debugging
# save_path = "models/definitions/flownet/weights/FlowNet2-S_checkpoint.params"
save_path = "models/definitions/flownet/weights/FlowNet2-C_checkpoint.params"
ctx = mx.gpu(0)
# net = get_flownet('S', pretrained=True, ctx=ctx)
net = get_flownet('C', pretrained=True, ctx=ctx)
net.hybridize()
input_path = "/path/to/test.mp4"
process_video(net, input_path, ctx=ctx)
print("DONE")
| 26.083871 | 108 | 0.606728 | import cv2
import mxnet as mx
import numpy as np
from scipy.misc import imresize
from tqdm import tqdm
from flownet import get_flownet
from utils import flow_to_image, crop, normalise
def process_two_images(model, imgs, ctx=None):
if len(imgs) != 2:
return None
if isinstance(imgs[0], str):
if os.path.exists(imgs[0]):
imgs[0] = cv2.cvtColor(cv2.imread(files[i]), cv2.COLOR_BGR2RGB)
else:
return None
if isinstance(imgs[1], str):
if os.path.exists(imgs[1]):
imgs[1] = cv2.cvtColor(cv2.imread(files[i]), cv2.COLOR_BGR2RGB)
else:
return None
imgs = crop(imgs)
imgs = np.array(imgs)
imgs = np.moveaxis(imgs, -1, 1)
imgs = normalise(imgs)
imgs = mx.nd.array(imgs, ctx=ctx)
imgs = mx.nd.expand_dims(imgs, 0)
flow = model(imgs)
flow = flow.asnumpy()
flow = flow.squeeze()
flow = flow.transpose(1, 2, 0)
img = flow_to_image(flow)
img = imresize(img, 4.0)
return img, flow
def process_imagedir(model, input_dir, output_dir=None, ctx=None):
files = []
for ext in [".jpg", ".png", ".jpeg", ".JPG", ".PNG", ".JPEG"]:
files = glob.glob(input_dir + "/**/*" + ext, recursive=True)
if len(files) > 0:
break
if not len(files) > 0:
print("Couldn't find any files in {}".format(input_dir))
return None
files.sort()
for i in tqdm(range(len(files) - 1), desc='Calculating Flow'):
img, flow = process_two_images(model, files[i:i+2], ctx)
dir, file = os.path.split(files[i])
if output_dir is None:
output_dir = os.path.join(dir, 'flow')
os.makedirs(output_dir, exists_ok=True)
cv2.imwrite(os.path.join(output_dir, file), cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
return output_dir
def process_video(model, input_path, output_path=None, ctx=None):
capture = cv2.VideoCapture(input_path)
frames = []
while_safety = 0
while len(frames) < 200:
_, image = capture.read()
if while_safety > 500:
break
if image is None:
while_safety += 1
continue
while_safety = 0
frames.append(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
capture.release()
if len(frames) < 2:
return None
if output_path is None:
output_path = input_path[:-4] + '_flow.mp4'
cropped_frames = crop(frames)
h, w, _= cropped_frames[0].shape
video = cv2.VideoWriter(output_path, cv2.VideoWriter_fourcc('m', 'p', '4', 'v'), 25, (w, h))
for i in tqdm(range(len(frames)-1), desc='Calculating Flow'):
mx.nd.waitall()
img, flow = process_two_images(model, frames[i:i+2], ctx)
video.write(img)
video.release()
return output_path
if __name__ == '__main__':
save_path = "models/definitions/flownet/weights/FlowNet2-C_checkpoint.params"
ctx = mx.gpu(0)
net = get_flownet('C', pretrained=True, ctx=ctx)
net.hybridize()
input_path = "/path/to/test.mp4"
process_video(net, input_path, ctx=ctx)
print("DONE")
| true | true |
1c47763f1386690bf0efd66398f708660e2f5d45 | 5,537 | py | Python | scripts/automation/trex_control_plane/astf/trex_astf_lib/trex_astf_global_info.py | alialnu/trex-core | ae4ab05a6215fd0a859adde40dac6afa8bf0f950 | [
"Apache-2.0"
] | null | null | null | scripts/automation/trex_control_plane/astf/trex_astf_lib/trex_astf_global_info.py | alialnu/trex-core | ae4ab05a6215fd0a859adde40dac6afa8bf0f950 | [
"Apache-2.0"
] | null | null | null | scripts/automation/trex_control_plane/astf/trex_astf_lib/trex_astf_global_info.py | alialnu/trex-core | ae4ab05a6215fd0a859adde40dac6afa8bf0f950 | [
"Apache-2.0"
] | null | null | null | import socket
class ASTFGlobalInfoBase(object):
_g_params = {}
class inner(object):
def __init__(self, params, name):
self._fields = {}
self._params = params
self._name = name
def __setattr__(self, name, val):
if name.startswith("_"):
return super(ASTFGlobalInfoBase.inner, self).__setattr__(name, val)
for p in self._params:
if name == p["name"]:
if "sub_type" in p:
if p["sub_type"]=="ipv6_addr":
if (type(val)!=str):
raise AttributeError("{0} in {1} should have one of the following types: {2}"
.format(name, self._name, str))
b=socket.inet_pton(socket.AF_INET6, val)
l = list(b);
# in case of Python 2
if not(type(l[0]) is int):
l=[ord(i) for i in l]
self._fields[name] = l;
return;
if "type" in p and type(val) not in p["type"]:
raise AttributeError("{0} in {1} should have one of the following types: {2}"
.format(name, self._name, p["type"]))
self._fields[name] = val
return
raise AttributeError("%r has no attribute %s" % (self._name, name))
def __getattr__(self, name):
if name.startswith("_"):
return super(ASTFGlobalInfoBase.inner, self).__getattr__(name)
for p in self._params:
if name == p["name"]:
return self._fields[name]
raise AttributeError("%r has no attribute %s" % (self._name, name))
def to_json(self):
return self._fields
def __init__(self, params=_g_params, name="globalp"):
self._fields = {}
self._params = params
self._name = name
def __setattr__(self, name, val):
if name.startswith("_"):
return super(ASTFGlobalInfoBase, self).__setattr__(name, val)
if name in self._params:
if type(self._params[name]) is dict:
next_level_params = self._params[name].keys()
else:
next_level_params = []
for n in self._params[name]:
next_level_params.append(n["name"])
raise AttributeError("{0} in {1} should be followed by one of {2}".format(name, self._name, next_level_params))
else:
raise AttributeError("{0} is not part of valid params".format(name))
def __getattr__(self, name):
if name.startswith("_"):
return super(ASTFGlobalInfoBase.in_tcp, self).__getattr__(name)
if name in self._params:
long_name = self._name + "." + name
if type(self._params[name]) is dict:
return self._fields.setdefault(name, ASTFGlobalInfoBase(params=self._params[name], name=long_name))
elif type(self._params[name]) is list:
return self._fields.setdefault(name, ASTFGlobalInfoBase.inner(params=self._params[name], name=long_name))
raise AttributeError("{0} has no attribute {1} it has {2}".format(self._name, name, self._params.keys()))
def to_json(self):
ret = {}
for field in self._fields.keys():
ret[field] = self._fields[field].to_json()
return ret
class ASTFGlobalInfo(ASTFGlobalInfoBase):
_g_params = {
"scheduler" : [
{"name": "rampup_sec", "type": [int]},
{"name": "accurate", "type": [int]}
],
"ipv6": [
{"name": "src_msb", "sub_type" : "ipv6_addr" },
{"name": "dst_msb", "sub_type" : "ipv6_addr" },
{"name": "enable", "type": [int]}
],
"tcp": [
{"name": "mss", "type": [int]},
{"name": "initwnd", "type": [int]},
{"name": "rxbufsize", "type": [int]},
{"name": "txbufsize", "type": [int]},
{"name": "rexmtthresh", "type": [int]},
{"name": "do_rfc1323", "type": [int]},
{"name": "keepinit", "type": [int]},
{"name": "keepidle", "type": [int]},
{"name": "keepintvl", "type": [int]},
{"name": "delay_ack_msec", "type": [int]},
{"name": "no_delay", "type": [int]},
],
"ip": [
{"name": "tos", "type": [int]},
{"name": "ttl", "type": [int]}
],
}
def __init__(self, params=_g_params, name="GlobalInfo"):
return super(ASTFGlobalInfo, self).__init__(params, name)
class ASTFGlobalInfoPerTemplate(ASTFGlobalInfoBase):
_g_params = {
"tcp": [
{"name": "initwnd", "type": [int]},
{"name": "mss", "type": [int]},
{"name": "no_delay", "type": [int]},
{"name": "rxbufsize", "type": [int]},
{"name": "txbufsize", "type": [int]},
],
"ip": [
{"name": "tos", "type": [int]},
{"name": "ttl", "type": [int]}
],
}
def __init__(self, params=_g_params, name="GlobalInfoPerTemplate"):
return super(ASTFGlobalInfoPerTemplate, self).__init__(params, name)
| 37.161074 | 123 | 0.483475 | import socket
class ASTFGlobalInfoBase(object):
_g_params = {}
class inner(object):
def __init__(self, params, name):
self._fields = {}
self._params = params
self._name = name
def __setattr__(self, name, val):
if name.startswith("_"):
return super(ASTFGlobalInfoBase.inner, self).__setattr__(name, val)
for p in self._params:
if name == p["name"]:
if "sub_type" in p:
if p["sub_type"]=="ipv6_addr":
if (type(val)!=str):
raise AttributeError("{0} in {1} should have one of the following types: {2}"
.format(name, self._name, str))
b=socket.inet_pton(socket.AF_INET6, val)
l = list(b);
if not(type(l[0]) is int):
l=[ord(i) for i in l]
self._fields[name] = l;
return;
if "type" in p and type(val) not in p["type"]:
raise AttributeError("{0} in {1} should have one of the following types: {2}"
.format(name, self._name, p["type"]))
self._fields[name] = val
return
raise AttributeError("%r has no attribute %s" % (self._name, name))
def __getattr__(self, name):
if name.startswith("_"):
return super(ASTFGlobalInfoBase.inner, self).__getattr__(name)
for p in self._params:
if name == p["name"]:
return self._fields[name]
raise AttributeError("%r has no attribute %s" % (self._name, name))
def to_json(self):
return self._fields
def __init__(self, params=_g_params, name="globalp"):
self._fields = {}
self._params = params
self._name = name
def __setattr__(self, name, val):
if name.startswith("_"):
return super(ASTFGlobalInfoBase, self).__setattr__(name, val)
if name in self._params:
if type(self._params[name]) is dict:
next_level_params = self._params[name].keys()
else:
next_level_params = []
for n in self._params[name]:
next_level_params.append(n["name"])
raise AttributeError("{0} in {1} should be followed by one of {2}".format(name, self._name, next_level_params))
else:
raise AttributeError("{0} is not part of valid params".format(name))
def __getattr__(self, name):
if name.startswith("_"):
return super(ASTFGlobalInfoBase.in_tcp, self).__getattr__(name)
if name in self._params:
long_name = self._name + "." + name
if type(self._params[name]) is dict:
return self._fields.setdefault(name, ASTFGlobalInfoBase(params=self._params[name], name=long_name))
elif type(self._params[name]) is list:
return self._fields.setdefault(name, ASTFGlobalInfoBase.inner(params=self._params[name], name=long_name))
raise AttributeError("{0} has no attribute {1} it has {2}".format(self._name, name, self._params.keys()))
def to_json(self):
ret = {}
for field in self._fields.keys():
ret[field] = self._fields[field].to_json()
return ret
class ASTFGlobalInfo(ASTFGlobalInfoBase):
_g_params = {
"scheduler" : [
{"name": "rampup_sec", "type": [int]},
{"name": "accurate", "type": [int]}
],
"ipv6": [
{"name": "src_msb", "sub_type" : "ipv6_addr" },
{"name": "dst_msb", "sub_type" : "ipv6_addr" },
{"name": "enable", "type": [int]}
],
"tcp": [
{"name": "mss", "type": [int]},
{"name": "initwnd", "type": [int]},
{"name": "rxbufsize", "type": [int]},
{"name": "txbufsize", "type": [int]},
{"name": "rexmtthresh", "type": [int]},
{"name": "do_rfc1323", "type": [int]},
{"name": "keepinit", "type": [int]},
{"name": "keepidle", "type": [int]},
{"name": "keepintvl", "type": [int]},
{"name": "delay_ack_msec", "type": [int]},
{"name": "no_delay", "type": [int]},
],
"ip": [
{"name": "tos", "type": [int]},
{"name": "ttl", "type": [int]}
],
}
def __init__(self, params=_g_params, name="GlobalInfo"):
return super(ASTFGlobalInfo, self).__init__(params, name)
class ASTFGlobalInfoPerTemplate(ASTFGlobalInfoBase):
_g_params = {
"tcp": [
{"name": "initwnd", "type": [int]},
{"name": "mss", "type": [int]},
{"name": "no_delay", "type": [int]},
{"name": "rxbufsize", "type": [int]},
{"name": "txbufsize", "type": [int]},
],
"ip": [
{"name": "tos", "type": [int]},
{"name": "ttl", "type": [int]}
],
}
def __init__(self, params=_g_params, name="GlobalInfoPerTemplate"):
return super(ASTFGlobalInfoPerTemplate, self).__init__(params, name)
| true | true |
1c4777590dcdd7cd0868594deb226eb09b523f7d | 16,358 | py | Python | senlin/objects/fields.py | openstack/senlin | 390779ca1e08f819683e79993696f945f1c0393e | [
"Apache-2.0"
] | 45 | 2015-10-18T02:56:50.000Z | 2022-03-01T15:28:02.000Z | senlin/objects/fields.py | openstack/senlin | 390779ca1e08f819683e79993696f945f1c0393e | [
"Apache-2.0"
] | 2 | 2019-04-26T10:44:47.000Z | 2020-12-16T19:45:34.000Z | senlin/objects/fields.py | openstack/senlin | 390779ca1e08f819683e79993696f945f1c0393e | [
"Apache-2.0"
] | 45 | 2015-10-19T02:35:57.000Z | 2021-09-28T09:01:42.000Z | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_serialization import jsonutils
from oslo_utils import strutils
from oslo_utils import uuidutils
from oslo_versionedobjects import fields
import re
from senlin.common import consts
from senlin.common.i18n import _
CONF = cfg.CONF
# Field alias for code readability
# BooleanField = fields.BooleanField
FlexibleBooleanField = fields.FlexibleBooleanField
StringField = fields.StringField
IntegerField = fields.IntegerField
FloatField = fields.FloatField
UUIDField = fields.UUIDField
DateTimeField = fields.DateTimeField
DictOfStringsField = fields.DictOfStringsField
ListOfStringsField = fields.ListOfStringsField
ListOfEnumField = fields.ListOfEnumField
class Boolean(fields.FieldType):
# NOTE: The following definition is much more stricter than the oslo
# version. Also note that the treatment of default values here:
# we are using the user specified default value when invoking
# the 'bool_from_string' until function.
def __init__(self, default=False):
super(Boolean, self).__init__()
self._default = default
def coerce(self, obj, attr, value):
return strutils.bool_from_string(value, strict=True,
default=self._default)
def get_schema(self):
return {'type': ['boolean']}
class NonNegativeInteger(fields.FieldType):
# NOTE: This definition is kept because we want the error message from
# 'int' conversion to be user friendly.
@staticmethod
def coerce(obj, attr, value):
try:
v = int(value)
except (TypeError, ValueError):
raise ValueError(_("The value for %(attr)s must be an integer: "
"'%(value)s'.") %
{'attr': attr, 'value': value})
if v < 0:
err = _("Value must be >= 0 for field '%s'.") % attr
raise ValueError(err)
return v
def get_schema(self):
return {
'type': ['integer', 'string'],
'minimum': 0
}
# Senlin has a stricter field checking for object fields.
class Object(fields.Object):
def get_schema(self):
schema = super(Object, self).get_schema()
# we are not checking whether self._obj_name is registered, an
# exception will be raised anyway if it is not registered.
data_key = 'senlin_object.data'
schema['properties'][data_key]['additionalProperties'] = False
return schema
class UUID(fields.FieldType):
_PATTERN = (r'^[a-fA-F0-9]{8}-?[a-fA-F0-9]{4}-?[a-fA-F0-9]{4}-?[a-fA-F0-9]'
r'{4}-?[a-fA-F0-9]{12}$')
@staticmethod
def coerce(obj, attr, value):
if not uuidutils.is_uuid_like(value):
msg = _("The value for %(attr)s is not a valid UUID: '%(value)s'."
) % {'attr': attr, 'value': value}
raise ValueError(msg)
return str(value)
def get_schema(self):
return {'type': ['string'], 'pattern': self._PATTERN}
class Json(fields.FieldType):
def coerce(self, obj, attr, value):
if isinstance(value, str):
try:
return jsonutils.loads(value)
except ValueError:
msg = _("The value (%s) is not a valid JSON.") % value
raise ValueError(msg)
return value
def from_primitive(self, obj, attr, value):
return self.coerce(obj, attr, value)
def to_primitive(self, obj, attr, value):
return jsonutils.dumps(value)
def stringify(self, value):
if isinstance(value, str):
try:
return jsonutils.loads(value)
except ValueError:
raise
return str(value)
def get_schema(self):
return {'type': ['object']}
class NotificationPriority(fields.Enum):
# The priorities here are derived from oslo_messaging.notify.notifier
ALL = consts.NOTIFICATION_PRIORITIES
def __init__(self):
super(NotificationPriority, self).__init__(self.ALL)
class NotificationPhase(fields.Enum):
ALL = consts.NOTIFICATION_PHASES
def __init__(self):
super(NotificationPhase, self).__init__(self.ALL)
class Name(fields.String):
def __init__(self, min_len=1, max_len=255):
super(Name, self).__init__()
self.min_len = min_len
self.max_len = max_len
def coerce(self, obj, attr, value):
err = None
if len(value) < self.min_len:
err = _("The value for the %(attr)s field must be at least "
"%(count)d characters long."
) % {'attr': attr, 'count': self.min_len}
elif len(value) > self.max_len:
err = _("The value for the %(attr)s field must be less than "
"%(count)d characters long."
) % {'attr': attr, 'count': self.max_len}
else:
# NOTE: This is pretty restrictive. We can relax it later when
# there are requests to do so
regex = re.compile(u'^[a-zA-Z\u4e00-\u9fa5\d\.\_\~-]*$',
re.IGNORECASE)
if not regex.search(value):
err = _("The value for the '%(attr)s' (%(value)s) contains "
"illegal characters. It must contain only "
"alphanumeric or \"_-.~\" characters and must start "
"with letter."
) % {'attr': attr, 'value': value}
if err:
raise ValueError(err)
return super(Name, self).coerce(obj, attr, value)
def get_schema(self):
return {
'type': ['string'],
'minLength': self.min_len,
'maxLength': self.max_len
}
class Capacity(fields.Integer):
def __init__(self, minimum=0, maximum=None):
super(Capacity, self).__init__()
CONF.import_opt("max_nodes_per_cluster", "senlin.conf")
if minimum > CONF.max_nodes_per_cluster:
err = _("The value of 'minimum' cannot be greater than the global "
"constraint (%(m)d).") % {'m': CONF.max_nodes_per_cluster}
raise ValueError(err)
self.minimum = minimum
if maximum is not None:
if maximum < minimum:
err = _("The value of 'maximum' must be greater than or equal "
"to that of the 'minimum' specified.")
raise ValueError(err)
if maximum > CONF.max_nodes_per_cluster:
err = _("The value of 'maximum' cannot be greater than the "
"global constraint (%(m)d)."
) % {'m': CONF.max_nodes_per_cluster}
raise ValueError(err)
self.maximum = maximum
else:
self.maximum = CONF.max_nodes_per_cluster
def coerce(self, obj, attr, value):
try:
v = int(value)
except Exception:
raise ValueError(_("The value for %(attr)s must be an integer: "
"'%(value)s'.") %
{'attr': attr, 'value': value})
if v < self.minimum:
raise ValueError(_("The value for the %(a)s field must be greater "
"than or equal to %(n)d.") %
{'a': attr, 'n': self.minimum})
elif v > self.maximum:
raise ValueError(_("The value for the %(a)s field must be less "
"than or equal to %(n)d.") %
{'a': attr, 'n': self.maximum})
return super(Capacity, self).coerce(obj, attr, v)
def get_schema(self):
return {
'type': ['integer', 'string'],
'minimum': self.minimum,
'maximum': self.maximum,
'pattern': '^[0-9]*$',
}
class Sort(fields.String):
def __init__(self, valid_keys):
super(Sort, self).__init__()
self.valid_keys = valid_keys
def coerce(self, obj, attr, value):
for s in value.split(','):
s_key, _sep, s_dir = s.partition(':')
err = None
if not s_key:
err = _("Missing sort key for '%s'.") % attr
raise ValueError(err)
if s_key not in self.valid_keys:
err = _("Unsupported sort key '%(value)s' for '%(attr)s'."
) % {'attr': attr, 'value': s_key}
if s_dir and s_dir not in ('asc', 'desc'):
err = _("Unsupported sort dir '%(value)s' for '%(attr)s'."
) % {'attr': attr, 'value': s_dir}
if err:
raise ValueError(err)
return super(Sort, self).coerce(obj, attr, value)
def get_schema(self):
return {
'type': ['string'],
}
class IdentityList(fields.List):
def __init__(self, element_type, min_items=0, unique=True, nullable=False,
**kwargs):
super(IdentityList, self).__init__(element_type, **kwargs)
self.min_items = min_items
self.unique_items = unique
self.nullable = nullable
def coerce(self, obj, attr, value):
res = super(IdentityList, self).coerce(obj, attr, value)
if len(res) < self.min_items:
raise ValueError(_("Value for '%(attr)s' must have at least "
"%(num)s item(s).") %
{'attr': attr, 'num': self.min_items})
if len(set(res)) != len(res) and self.unique_items:
raise ValueError(_("Items for '%(attr)s' must be unique") %
{'attr': attr})
return res
def get_schema(self):
schema = super(IdentityList, self).get_schema()
if self.nullable:
schema['type'].append('null')
schema['minItems'] = self.min_items
schema['uniqueItems'] = self.unique_items
return schema
class BaseEnum(fields.FieldType):
# NOTE: We are not basing Enum on String because String is not working
# correctly when handling None value.
def __init__(self, nullable=False):
valid_values = list(self.__class__.ALL)
if not valid_values:
raise ValueError(_("No list of valid values provided for enum."))
for value in valid_values:
if not isinstance(value, str):
raise ValueError(_("Enum field only support string values."))
self._valid_values = list(valid_values)
self._nullable = nullable
super(BaseEnum, self).__init__()
def coerce(self, obj, attr, value):
value = str(value)
if value not in self._valid_values:
raise ValueError(_("Value '%(value)s' is not acceptable for "
"field '%(attr)s'.") %
{'value': value, 'attr': attr})
return value
def stringify(self, value):
if value is None:
return None
return '\'%s\'' % value
class AdjustmentType(BaseEnum):
ALL = consts.ADJUSTMENT_TYPES
def get_schema(self):
return {'type': ['string'],
'enum': self._valid_values}
class ClusterActionName(BaseEnum):
ALL = consts.CLUSTER_ACTION_NAMES
def get_schema(self):
return {'type': ['string'],
'enum': self._valid_values}
class ClusterStatus(BaseEnum):
ALL = consts.CLUSTER_STATUSES
class NodeStatus(BaseEnum):
ALL = consts.NODE_STATUSES
class ActionStatus(BaseEnum):
ALL = consts.ACTION_STATUSES
class ReceiverType(BaseEnum):
ALL = consts.RECEIVER_TYPES
def get_schema(self):
return {'type': ['string'],
'enum': self._valid_values}
class UniqueDict(fields.Dict):
def coerce(self, obj, attr, value):
res = super(UniqueDict, self).coerce(obj, attr, value)
new_nodes = res.values()
if len(new_nodes) != len(set(new_nodes)):
raise ValueError(_("Map contains duplicated values"))
return res
# TODO(Qiming): remove this when oslo patch is released
# https://review.openstack.org/#/c/360095
class NonNegativeIntegerField(fields.AutoTypedField):
AUTO_TYPE = NonNegativeInteger()
class BooleanField(fields.AutoTypedField):
AUTO_TYPE = Boolean()
# An override to the oslo.versionedobjects version so that we are using
# our own Object definition.
class ObjectField(fields.AutoTypedField):
def __init__(self, objtype, subclasses=False, **kwargs):
self.AUTO_TYPE = Object(objtype, subclasses)
self.objname = objtype
super(ObjectField, self).__init__(**kwargs)
class JsonField(fields.AutoTypedField):
AUTO_TYPE = Json()
class ListField(fields.AutoTypedField):
AUTO_TYPE = fields.List(fields.FieldType())
class NotificationPriorityField(fields.BaseEnumField):
AUTO_TYPE = NotificationPriority()
class NotificationPhaseField(fields.BaseEnumField):
AUTO_TYPE = NotificationPhase()
class NameField(fields.AutoTypedField):
AUTO_TYPE = Name()
class UUIDField(fields.AutoTypedField):
AUTO_TYPE = UUID()
class CapacityField(fields.AutoTypedField):
AUTO_TYPE = None
def __init__(self, nullable=False, default=None, minimum=0, maximum=None):
self.AUTO_TYPE = Capacity(minimum=minimum, maximum=maximum)
super(CapacityField, self).__init__(nullable=nullable, default=default)
class SortField(fields.AutoTypedField):
AUTO_TYPE = None
def __init__(self, valid_keys, nullable=False, default=None):
self.AUTO_TYPE = Sort(valid_keys)
super(SortField, self).__init__(nullable=nullable, default=default)
class IdentityListField(fields.AutoTypedField):
AUTO_TYPE = None
def __init__(self, min_items=0, unique=True, nullable=False, default=None):
if default is None:
default = []
self.AUTO_TYPE = IdentityList(fields.String(), min_items=min_items,
unique=unique)
super(IdentityListField, self).__init__(nullable=nullable,
default=default)
class AdjustmentTypeField(fields.AutoTypedField):
AUTO_TYPE = None
def __init__(self, **kwargs):
nullable = kwargs.get('nullable', False)
self.AUTO_TYPE = AdjustmentType(nullable=nullable)
super(AdjustmentTypeField, self).__init__(**kwargs)
class ClusterActionNameField(fields.AutoTypedField):
AUTO_TYPE = None
def __init__(self, **kwargs):
nullable = kwargs.get('nullable', False)
self.AUTO_TYPE = ClusterActionName(nullable=nullable)
super(ClusterActionNameField, self).__init__(**kwargs)
class ClusterStatusField(fields.AutoTypedField):
AUTO_TYPE = ClusterStatus
class NodeStatusField(fields.AutoTypedField):
AUTO_TYPE = NodeStatus
class ActionStatusField(fields.AutoTypedField):
AUTO_TYPE = ActionStatus
class ReceiverTypeField(fields.AutoTypedField):
AUTO_TYPE = None
def __init__(self, **kwargs):
nullable = kwargs.get('nullable', False)
self.AUTO_TYPE = ReceiverType(nullable=nullable)
super(ReceiverTypeField, self).__init__(**kwargs)
class NodeReplaceMapField(fields.AutoTypedField):
AUTO_TYPE = UniqueDict(fields.String())
class CustomListField(ListField):
def __init__(self, attr_name, **kwargs):
self.attr_name = attr_name
super(CustomListField, self).__init__(**kwargs)
def coerce(self, obj, attr, value):
objs = super(CustomListField, self).coerce(obj, attr, value)
custom_list = []
for i in objs:
custom_list.append(getattr(i, self.attr_name))
return custom_list
| 30.575701 | 79 | 0.602152 |
from oslo_config import cfg
from oslo_serialization import jsonutils
from oslo_utils import strutils
from oslo_utils import uuidutils
from oslo_versionedobjects import fields
import re
from senlin.common import consts
from senlin.common.i18n import _
CONF = cfg.CONF
FlexibleBooleanField = fields.FlexibleBooleanField
StringField = fields.StringField
IntegerField = fields.IntegerField
FloatField = fields.FloatField
UUIDField = fields.UUIDField
DateTimeField = fields.DateTimeField
DictOfStringsField = fields.DictOfStringsField
ListOfStringsField = fields.ListOfStringsField
ListOfEnumField = fields.ListOfEnumField
class Boolean(fields.FieldType):
def __init__(self, default=False):
super(Boolean, self).__init__()
self._default = default
def coerce(self, obj, attr, value):
return strutils.bool_from_string(value, strict=True,
default=self._default)
def get_schema(self):
return {'type': ['boolean']}
class NonNegativeInteger(fields.FieldType):
@staticmethod
def coerce(obj, attr, value):
try:
v = int(value)
except (TypeError, ValueError):
raise ValueError(_("The value for %(attr)s must be an integer: "
"'%(value)s'.") %
{'attr': attr, 'value': value})
if v < 0:
err = _("Value must be >= 0 for field '%s'.") % attr
raise ValueError(err)
return v
def get_schema(self):
return {
'type': ['integer', 'string'],
'minimum': 0
}
class Object(fields.Object):
def get_schema(self):
schema = super(Object, self).get_schema()
data_key = 'senlin_object.data'
schema['properties'][data_key]['additionalProperties'] = False
return schema
class UUID(fields.FieldType):
_PATTERN = (r'^[a-fA-F0-9]{8}-?[a-fA-F0-9]{4}-?[a-fA-F0-9]{4}-?[a-fA-F0-9]'
r'{4}-?[a-fA-F0-9]{12}$')
@staticmethod
def coerce(obj, attr, value):
if not uuidutils.is_uuid_like(value):
msg = _("The value for %(attr)s is not a valid UUID: '%(value)s'."
) % {'attr': attr, 'value': value}
raise ValueError(msg)
return str(value)
def get_schema(self):
return {'type': ['string'], 'pattern': self._PATTERN}
class Json(fields.FieldType):
def coerce(self, obj, attr, value):
if isinstance(value, str):
try:
return jsonutils.loads(value)
except ValueError:
msg = _("The value (%s) is not a valid JSON.") % value
raise ValueError(msg)
return value
def from_primitive(self, obj, attr, value):
return self.coerce(obj, attr, value)
def to_primitive(self, obj, attr, value):
return jsonutils.dumps(value)
def stringify(self, value):
if isinstance(value, str):
try:
return jsonutils.loads(value)
except ValueError:
raise
return str(value)
def get_schema(self):
return {'type': ['object']}
class NotificationPriority(fields.Enum):
ALL = consts.NOTIFICATION_PRIORITIES
def __init__(self):
super(NotificationPriority, self).__init__(self.ALL)
class NotificationPhase(fields.Enum):
ALL = consts.NOTIFICATION_PHASES
def __init__(self):
super(NotificationPhase, self).__init__(self.ALL)
class Name(fields.String):
def __init__(self, min_len=1, max_len=255):
super(Name, self).__init__()
self.min_len = min_len
self.max_len = max_len
def coerce(self, obj, attr, value):
err = None
if len(value) < self.min_len:
err = _("The value for the %(attr)s field must be at least "
"%(count)d characters long."
) % {'attr': attr, 'count': self.min_len}
elif len(value) > self.max_len:
err = _("The value for the %(attr)s field must be less than "
"%(count)d characters long."
) % {'attr': attr, 'count': self.max_len}
else:
regex = re.compile(u'^[a-zA-Z\u4e00-\u9fa5\d\.\_\~-]*$',
re.IGNORECASE)
if not regex.search(value):
err = _("The value for the '%(attr)s' (%(value)s) contains "
"illegal characters. It must contain only "
"alphanumeric or \"_-.~\" characters and must start "
"with letter."
) % {'attr': attr, 'value': value}
if err:
raise ValueError(err)
return super(Name, self).coerce(obj, attr, value)
def get_schema(self):
return {
'type': ['string'],
'minLength': self.min_len,
'maxLength': self.max_len
}
class Capacity(fields.Integer):
def __init__(self, minimum=0, maximum=None):
super(Capacity, self).__init__()
CONF.import_opt("max_nodes_per_cluster", "senlin.conf")
if minimum > CONF.max_nodes_per_cluster:
err = _("The value of 'minimum' cannot be greater than the global "
"constraint (%(m)d).") % {'m': CONF.max_nodes_per_cluster}
raise ValueError(err)
self.minimum = minimum
if maximum is not None:
if maximum < minimum:
err = _("The value of 'maximum' must be greater than or equal "
"to that of the 'minimum' specified.")
raise ValueError(err)
if maximum > CONF.max_nodes_per_cluster:
err = _("The value of 'maximum' cannot be greater than the "
"global constraint (%(m)d)."
) % {'m': CONF.max_nodes_per_cluster}
raise ValueError(err)
self.maximum = maximum
else:
self.maximum = CONF.max_nodes_per_cluster
def coerce(self, obj, attr, value):
try:
v = int(value)
except Exception:
raise ValueError(_("The value for %(attr)s must be an integer: "
"'%(value)s'.") %
{'attr': attr, 'value': value})
if v < self.minimum:
raise ValueError(_("The value for the %(a)s field must be greater "
"than or equal to %(n)d.") %
{'a': attr, 'n': self.minimum})
elif v > self.maximum:
raise ValueError(_("The value for the %(a)s field must be less "
"than or equal to %(n)d.") %
{'a': attr, 'n': self.maximum})
return super(Capacity, self).coerce(obj, attr, v)
def get_schema(self):
return {
'type': ['integer', 'string'],
'minimum': self.minimum,
'maximum': self.maximum,
'pattern': '^[0-9]*$',
}
class Sort(fields.String):
def __init__(self, valid_keys):
super(Sort, self).__init__()
self.valid_keys = valid_keys
def coerce(self, obj, attr, value):
for s in value.split(','):
s_key, _sep, s_dir = s.partition(':')
err = None
if not s_key:
err = _("Missing sort key for '%s'.") % attr
raise ValueError(err)
if s_key not in self.valid_keys:
err = _("Unsupported sort key '%(value)s' for '%(attr)s'."
) % {'attr': attr, 'value': s_key}
if s_dir and s_dir not in ('asc', 'desc'):
err = _("Unsupported sort dir '%(value)s' for '%(attr)s'."
) % {'attr': attr, 'value': s_dir}
if err:
raise ValueError(err)
return super(Sort, self).coerce(obj, attr, value)
def get_schema(self):
return {
'type': ['string'],
}
class IdentityList(fields.List):
def __init__(self, element_type, min_items=0, unique=True, nullable=False,
**kwargs):
super(IdentityList, self).__init__(element_type, **kwargs)
self.min_items = min_items
self.unique_items = unique
self.nullable = nullable
def coerce(self, obj, attr, value):
res = super(IdentityList, self).coerce(obj, attr, value)
if len(res) < self.min_items:
raise ValueError(_("Value for '%(attr)s' must have at least "
"%(num)s item(s).") %
{'attr': attr, 'num': self.min_items})
if len(set(res)) != len(res) and self.unique_items:
raise ValueError(_("Items for '%(attr)s' must be unique") %
{'attr': attr})
return res
def get_schema(self):
schema = super(IdentityList, self).get_schema()
if self.nullable:
schema['type'].append('null')
schema['minItems'] = self.min_items
schema['uniqueItems'] = self.unique_items
return schema
class BaseEnum(fields.FieldType):
def __init__(self, nullable=False):
valid_values = list(self.__class__.ALL)
if not valid_values:
raise ValueError(_("No list of valid values provided for enum."))
for value in valid_values:
if not isinstance(value, str):
raise ValueError(_("Enum field only support string values."))
self._valid_values = list(valid_values)
self._nullable = nullable
super(BaseEnum, self).__init__()
def coerce(self, obj, attr, value):
value = str(value)
if value not in self._valid_values:
raise ValueError(_("Value '%(value)s' is not acceptable for "
"field '%(attr)s'.") %
{'value': value, 'attr': attr})
return value
def stringify(self, value):
if value is None:
return None
return '\'%s\'' % value
class AdjustmentType(BaseEnum):
ALL = consts.ADJUSTMENT_TYPES
def get_schema(self):
return {'type': ['string'],
'enum': self._valid_values}
class ClusterActionName(BaseEnum):
ALL = consts.CLUSTER_ACTION_NAMES
def get_schema(self):
return {'type': ['string'],
'enum': self._valid_values}
class ClusterStatus(BaseEnum):
ALL = consts.CLUSTER_STATUSES
class NodeStatus(BaseEnum):
ALL = consts.NODE_STATUSES
class ActionStatus(BaseEnum):
ALL = consts.ACTION_STATUSES
class ReceiverType(BaseEnum):
ALL = consts.RECEIVER_TYPES
def get_schema(self):
return {'type': ['string'],
'enum': self._valid_values}
class UniqueDict(fields.Dict):
def coerce(self, obj, attr, value):
res = super(UniqueDict, self).coerce(obj, attr, value)
new_nodes = res.values()
if len(new_nodes) != len(set(new_nodes)):
raise ValueError(_("Map contains duplicated values"))
return res
NegativeIntegerField(fields.AutoTypedField):
AUTO_TYPE = NonNegativeInteger()
class BooleanField(fields.AutoTypedField):
AUTO_TYPE = Boolean()
class ObjectField(fields.AutoTypedField):
def __init__(self, objtype, subclasses=False, **kwargs):
self.AUTO_TYPE = Object(objtype, subclasses)
self.objname = objtype
super(ObjectField, self).__init__(**kwargs)
class JsonField(fields.AutoTypedField):
AUTO_TYPE = Json()
class ListField(fields.AutoTypedField):
AUTO_TYPE = fields.List(fields.FieldType())
class NotificationPriorityField(fields.BaseEnumField):
AUTO_TYPE = NotificationPriority()
class NotificationPhaseField(fields.BaseEnumField):
AUTO_TYPE = NotificationPhase()
class NameField(fields.AutoTypedField):
AUTO_TYPE = Name()
class UUIDField(fields.AutoTypedField):
AUTO_TYPE = UUID()
class CapacityField(fields.AutoTypedField):
AUTO_TYPE = None
def __init__(self, nullable=False, default=None, minimum=0, maximum=None):
self.AUTO_TYPE = Capacity(minimum=minimum, maximum=maximum)
super(CapacityField, self).__init__(nullable=nullable, default=default)
class SortField(fields.AutoTypedField):
AUTO_TYPE = None
def __init__(self, valid_keys, nullable=False, default=None):
self.AUTO_TYPE = Sort(valid_keys)
super(SortField, self).__init__(nullable=nullable, default=default)
class IdentityListField(fields.AutoTypedField):
AUTO_TYPE = None
def __init__(self, min_items=0, unique=True, nullable=False, default=None):
if default is None:
default = []
self.AUTO_TYPE = IdentityList(fields.String(), min_items=min_items,
unique=unique)
super(IdentityListField, self).__init__(nullable=nullable,
default=default)
class AdjustmentTypeField(fields.AutoTypedField):
AUTO_TYPE = None
def __init__(self, **kwargs):
nullable = kwargs.get('nullable', False)
self.AUTO_TYPE = AdjustmentType(nullable=nullable)
super(AdjustmentTypeField, self).__init__(**kwargs)
class ClusterActionNameField(fields.AutoTypedField):
AUTO_TYPE = None
def __init__(self, **kwargs):
nullable = kwargs.get('nullable', False)
self.AUTO_TYPE = ClusterActionName(nullable=nullable)
super(ClusterActionNameField, self).__init__(**kwargs)
class ClusterStatusField(fields.AutoTypedField):
AUTO_TYPE = ClusterStatus
class NodeStatusField(fields.AutoTypedField):
AUTO_TYPE = NodeStatus
class ActionStatusField(fields.AutoTypedField):
AUTO_TYPE = ActionStatus
class ReceiverTypeField(fields.AutoTypedField):
AUTO_TYPE = None
def __init__(self, **kwargs):
nullable = kwargs.get('nullable', False)
self.AUTO_TYPE = ReceiverType(nullable=nullable)
super(ReceiverTypeField, self).__init__(**kwargs)
class NodeReplaceMapField(fields.AutoTypedField):
AUTO_TYPE = UniqueDict(fields.String())
class CustomListField(ListField):
def __init__(self, attr_name, **kwargs):
self.attr_name = attr_name
super(CustomListField, self).__init__(**kwargs)
def coerce(self, obj, attr, value):
objs = super(CustomListField, self).coerce(obj, attr, value)
custom_list = []
for i in objs:
custom_list.append(getattr(i, self.attr_name))
return custom_list
| true | true |
1c477804be4c4bf6d36610dc17cf96819da6d6fc | 45,319 | py | Python | nessai/nestedsampler.py | Rodrigo-Tenorio/nessai | 2b4175da61b3a7250d1154a126ad93481836df0d | [
"MIT"
] | null | null | null | nessai/nestedsampler.py | Rodrigo-Tenorio/nessai | 2b4175da61b3a7250d1154a126ad93481836df0d | [
"MIT"
] | null | null | null | nessai/nestedsampler.py | Rodrigo-Tenorio/nessai | 2b4175da61b3a7250d1154a126ad93481836df0d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Functions and objects related to the main nested sampling algorithm.
"""
from collections import deque
import datetime
import logging
import os
import pickle
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import numpy as np
import seaborn as sns
import torch
from tqdm import tqdm
from .livepoint import get_dtype, DEFAULT_FLOAT_DTYPE
from .plot import plot_indices, plot_trace
from .evidence import _NSIntegralState
from .proposal import FlowProposal
from .utils import (
safe_file_dump,
compute_indices_ks_test,
rolling_mean,
)
sns.set()
sns.set_style('ticks')
logger = logging.getLogger(__name__)
class NestedSampler:
"""
Nested Sampler class.
Initialisation arguments:
Parameters
----------
model: :obj:`nessai.model.Model`
User defined model
nlive : int, optional
Number of live points.
output : str
Path for output
stopping : float, optional
Stop when remaining samples wouldn't change logZ estimate by this much.
max_iteration : int, optional
Maximum number of iterations to run before force sampler to stop.
If stopping criteria is met before max. is reached sampler will stop.
checkpointing : bool, optional
Boolean to toggle checkpointing, must be enabled to resume the sampler.
If false the sampler is still saved at the end of sampling.
resume_file : str, optional
If specified sampler will be resumed from this file. Still requires
correct model.
seed : int, optional
seed for the initialisation of the pseudorandom chain
n_pool : int, optional
Number of threads to when for creating the multiprocessing pool.
pool : object
User defined multiprocessing pool that will be used when evaluating
the likelihood.
close_pool : bool
Boolean to indicated if the pool should be closed at the end of the
nested sampling loop. If False, the user must manually close the pool.
plot : bool (True)
Boolean to toggle plotting
proposal_plots : bool (True)
Boolean to enable additional plots for the population stage of the
sampler. Overwritten by plot.
prior_sampling : bool (False)
produce nlive samples from the prior.
analytic_priors : bool (False)
Boolean that indicates that the `new_point` method in the model
draws directly from the priors meaning rejection sampling is not
needed.
maximum_uninformed : int (1000)
Maximum number of iterations before forcing the sampler to switch to
using the proposal method with the flow.
uninformed_proposal : :obj:`nessai.proposal.Proposal`: (None)
Class to use for initial sampling before training the flow. If
None RejectionProposal or AnalyticProposal are used depending if
`analytic_priors` is False or True.
uninformed_acceptance_threshold : float (None)
Acceptance threshold for initialising sampling, if acceptance falls
below this value sampler switches to flow-based proposal. If None
then value is set to 10 times `acceptance_threshold`
uninformed_proposal_kwargs : dict, ({})
Dictionary of keyword argument to pass to the class use for
the initial sampling when it is initialised.
flow_class : :obj:`nessai.proposal.FlowProposal`
Class to use for flow-based proposal method
flow_config : dict ({})
Dictionary used to configure instance of `nessai.flowmodel.FlowModel`,
this includes configuring the normalising flow and the training.
training_frequency : int (None)
Number of iterations between re-training the flow. If None flow
is only re-trained based on other criteria.
train_on_empty : bool (True)
If true the flow is retrained every time the proposal pool is
empty. If false it is only training according to the other criteria.
cooldown : int (100)
Minimum number of iterations between training. Can be overridden if
`train_on_empty=True` and the pool is empty.
memory : int, False (False)
Number of old live points to use in training. If False only the current
live points are used.
reset_weights : bool, int, (False)
Boolean to toggle resetting the flow weights whenever re-training.
If an integer is specified the flow is reset every nth time it is
trained.
reset_permutations: bool, int, (False)
Boolean to toggle resetting the permutation layers in the flow whenever
re-training. If an integer is specified the flow is reset every nth
time it is trained.
reset_acceptance : bool, (True)
If true use mean acceptance of samples produced with current flow
as a criteria for retraining
retrain_acceptance : bool (False)
Force the flow to be reset if the acceptance falls below the acceptance
threshold. Requires `reset_acceptance=True`
acceptance_threshold : float (0.01)
Threshold to determine if the flow should be retrained, will not
retrain if cooldown is not satisfied.
kwargs :
Keyword arguments passed to the flow proposal class
"""
def __init__(
self,
model,
nlive=2000,
output=None,
stopping=0.1,
max_iteration=None,
checkpointing=True,
checkpoint_on_training=False,
resume_file=None,
seed=None,
pool=None,
close_pool=True,
n_pool=None,
plot=True,
proposal_plots=False,
prior_sampling=False,
analytic_priors=False,
maximum_uninformed=None,
uninformed_proposal=None,
uninformed_acceptance_threshold=None,
uninformed_proposal_kwargs=None,
flow_class=None,
flow_config=None,
training_frequency=None,
train_on_empty=True,
cooldown=200,
memory=False,
reset_weights=False,
reset_permutations=False,
retrain_acceptance=True,
reset_acceptance=False,
acceptance_threshold=0.01,
**kwargs
):
logger.info('Initialising nested sampler')
self.info_enabled = logger.isEnabledFor(logging.INFO)
model.verify_model()
self.model = model
self.model.configure_pool(pool=pool, n_pool=n_pool)
self.close_pool = close_pool
self.nlive = nlive
self.live_points = None
self.prior_sampling = prior_sampling
self.setup_random_seed(seed)
self.accepted = 0
self.rejected = 1
self.initialised = False
self.checkpointing = checkpointing
self.checkpoint_on_training = checkpoint_on_training
self.iteration = 0
self.acceptance_history = deque(maxlen=(nlive // 10))
self.mean_acceptance_history = []
self.block_acceptance = 1.
self.mean_block_acceptance = 1.
self.block_iteration = 0
self.retrain_acceptance = retrain_acceptance
self.reset_acceptance = reset_acceptance
self.insertion_indices = []
self.rolling_p = []
self.resumed = False
self.tolerance = stopping
self.condition = np.inf
self.logLmin = -np.inf
self.logLmax = -np.inf
self.nested_samples = []
self.logZ = None
self.state = _NSIntegralState(self.nlive, track_gradients=plot)
self.plot = plot
self.resume_file = self.setup_output(output, resume_file)
self.output = output
# Timing
self.training_time = datetime.timedelta()
self.sampling_time = datetime.timedelta()
self.sampling_start_time = datetime.datetime.now()
# Resume flags
self.completed_training = True
self.finalised = False
# History
self.likelihood_evaluations = []
self.training_iterations = []
self.min_likelihood = []
self.max_likelihood = []
self.logZ_history = []
self.dZ_history = []
self.population_acceptance = []
self.population_radii = []
self.population_iterations = []
self.checkpoint_iterations = []
self.acceptance_threshold = acceptance_threshold
self.train_on_empty = train_on_empty
self.cooldown = cooldown
self.memory = memory
self.configure_max_iteration(max_iteration)
self.configure_flow_reset(reset_weights, reset_permutations)
self.configure_training_frequency(training_frequency)
if uninformed_proposal_kwargs is None:
uninformed_proposal_kwargs = {}
self.configure_uninformed_proposal(uninformed_proposal,
analytic_priors,
maximum_uninformed,
uninformed_acceptance_threshold,
**uninformed_proposal_kwargs)
self.configure_flow_proposal(flow_class, flow_config, proposal_plots,
**kwargs)
# Uninformed proposal is used for prior sampling
# If maximum uninformed is greater than 0, the it will be used for
# another n iterations or until it becomes inefficient
self.store_live_points = False
if self.store_live_points:
self.live_points_dir = f'{self.output}/live_points/'
os.makedirs(self.live_points_dir, exist_ok=True)
self.replacement_points = []
@property
def log_evidence(self):
return self.state.logZ
@property
def information(self):
return self.state.info[-1]
@property
def likelihood_calls(self):
return self.model.likelihood_evaluations
@property
def likelihood_evaluation_time(self):
return self.model.likelihood_evaluation_time
@property
def proposal_population_time(self):
t = self._uninformed_proposal.population_time
t += self._flow_proposal.population_time
return t
@property
def acceptance(self):
return self.iteration / self.likelihood_calls
@property
def current_sampling_time(self):
if self.finalised:
return self.sampling_time
else:
return self.sampling_time \
+ (datetime.datetime.now() - self.sampling_start_time)
@property
def last_updated(self):
"""Last time the normalising flow was retrained"""
if self.training_iterations:
return self.training_iterations[-1]
else:
return 0
@property
def mean_acceptance(self):
"""
Mean acceptance of the last nlive // 10 points
"""
if self.acceptance_history:
return np.mean(self.acceptance_history)
else:
return np.nan
def configure_max_iteration(self, max_iteration):
"""Configure the maximum iteration.
If None then no maximum is set.
Parameter
---------
max_iteration : int, None
Maximum iteration.
"""
if max_iteration is None:
self.max_iteration = np.inf
else:
self.max_iteration = max_iteration
def configure_training_frequency(self, training_frequency):
"""Configure the training frequency.
If None, 'inf' or 'None' flow will only train when empty.
"""
if training_frequency in [None, 'inf', 'None']:
logger.warning('Proposal will only train when empty')
self.training_frequency = np.inf
else:
self.training_frequency = training_frequency
def configure_uninformed_proposal(self,
uninformed_proposal,
analytic_priors,
maximum_uninformed,
uninformed_acceptance_threshold,
**kwargs):
"""
Setup the uninformed proposal method (is NOT trained)
Parameters
----------
uninformed_proposal : None or obj
Class to use for uninformed proposal
analytic_priors : bool
If True `AnalyticProposal` is used to directly sample from the
priors rather than using rejection sampling.
maximum_uninformed : {False, None, int, float}
Maximum number of iterations before switching to FlowProposal.
If None, two times nlive is used. If False uninformed sampling is
not used.
uninformed_acceptance_threshold : float or None:
Threshold to use for uninformed proposal, once reached proposal
method will switch. If None acceptance_threshold is used if
greater than 0.1 else 10 x acceptance_threshold is used.
kwargs
Kwargs are passed to init method for uninformed proposal class
"""
if maximum_uninformed is None:
self.uninformed_sampling = True
self.maximum_uninformed = 2 * self.nlive
elif not maximum_uninformed:
self.uninformed_sampling = False
self.maximum_uninformed = 0
else:
self.uninformed_sampling = True
self.maximum_uninformed = float(maximum_uninformed)
if uninformed_acceptance_threshold is None:
if self.acceptance_threshold < 0.1:
self.uninformed_acceptance_threshold = \
10 * self.acceptance_threshold
else:
self.uninformed_acceptance_threshold = \
self.acceptance_threshold
else:
self.uninformed_acceptance_threshold = \
uninformed_acceptance_threshold
if uninformed_proposal is None:
if analytic_priors:
from .proposal import AnalyticProposal as uninformed_proposal
else:
from .proposal import RejectionProposal as uninformed_proposal
kwargs['poolsize'] = self.nlive
logger.debug(f'Using uninformed proposal: {uninformed_proposal}')
logger.debug(f'Parsing kwargs to uninformed proposal: {kwargs}')
self._uninformed_proposal = uninformed_proposal(
self.model, **kwargs
)
def configure_flow_proposal(self, flow_class, flow_config, proposal_plots,
**kwargs):
"""
Set up the flow-based proposal method
Parameters
----------
flow_class : None or obj or str
Class to use for proposal. If None FlowProposal is used.
flow_config : dict
Configuration dictionary passed to the class.
proposal_plots : bool or str
Configuration of plotting in proposal class.
**kwargs :
Kwargs passed to init function.
"""
proposal_output = self.output + '/proposal/'
if not self.plot:
proposal_plots = False
if flow_class is not None:
if isinstance(flow_class, str):
flow_class = flow_class.lower()
if flow_class == 'gwflowproposal':
from .gw.proposal import GWFlowProposal as flow_class
elif flow_class == 'augmentedgwflowproposal':
from .gw.proposal import (
AugmentedGWFlowProposal as flow_class)
elif flow_class == 'legacygwflowproposal':
from .gw.legacy import LegacyGWFlowProposal as flow_class
elif flow_class == 'flowproposal':
flow_class = FlowProposal
elif flow_class == 'augmentedflowproposal':
from .proposal import AugmentedFlowProposal
flow_class = AugmentedFlowProposal
else:
raise ValueError(f'Unknown flow class: {flow_class}')
elif not issubclass(flow_class, FlowProposal):
raise RuntimeError('Flow class must be string or class that '
'inherits from FlowProposal')
else:
flow_class = FlowProposal
if kwargs.get('poolsize', None) is None:
kwargs['poolsize'] = self.nlive
logger.debug(f'Using flow class: {flow_class}')
logger.info(f'Parsing kwargs to FlowProposal: {kwargs}')
self._flow_proposal = flow_class(
self.model,
flow_config=flow_config,
output=proposal_output,
plot=proposal_plots,
**kwargs
)
def setup_output(self, output, resume_file=None):
"""
Set up the output folder
Parameters
----------
output : str
Directory where the results will be stored
resume_file : optional
Specific file to use for checkpointing. If not specified the
default is used (nested_sampler_resume.pkl)
Returns
-------
resume_file : str
File used for checkpointing
"""
if not os.path.exists(output):
os.makedirs(output, exist_ok=True)
if resume_file is None:
resume_file = os.path.join(output, "nested_sampler_resume.pkl")
else:
resume_file = os.path.join(output, resume_file)
if self.plot:
os.makedirs(output + '/diagnostics/', exist_ok=True)
return resume_file
def setup_random_seed(self, seed):
"""
initialise the random seed
"""
self.seed = seed
if self.seed is not None:
logger.debug(f'Setting random seed to {seed}')
np.random.seed(seed=self.seed)
torch.manual_seed(self.seed)
def configure_flow_reset(self, reset_weights, reset_permutations):
"""Configure how often the flow parameters are reset.
Values are converted to floats.
Parameters
----------
reset_weights : int, float or bool
Frequency with which the weights will be reset.
reset_permutations : int, float or bool
Frequency with which the permutations will be reset.
"""
if isinstance(reset_weights, (int, float)):
self.reset_weights = float(reset_weights)
else:
raise TypeError(
'`reset_weights` must be a bool, int or float')
if isinstance(reset_permutations, (int, float)):
self.reset_permutations = float(reset_permutations)
else:
raise TypeError(
'`reset_permutations` must be a bool, int or float')
def check_insertion_indices(self, rolling=True, filename=None):
"""
Checking the distribution of the insertion indices either during
the nested sampling run (rolling=True) or for the whole run
(rolling=False).
"""
if rolling:
indices = self.insertion_indices[-self.nlive:]
else:
indices = self.insertion_indices
D, p = compute_indices_ks_test(indices, self.nlive)
if p is not None:
if rolling:
logger.warning(f'Rolling KS test: D={D:.4}, p-value={p:.4}')
self.rolling_p.append(p)
else:
logger.warning(f'Final KS test: D={D:.4}, p-value={p:.4}')
if filename is not None:
np.savetxt(os.path.join(self.output, filename),
self.insertion_indices, newline='\n', delimiter=' ')
def log_likelihood(self, x):
"""
Wrapper for the model likelihood so evaluations are counted
"""
return self.model.log_likelihood(x)
def yield_sample(self, oldparam):
"""
Draw points and applying rejection sampling
"""
while True:
counter = 0
while True:
counter += 1
newparam = self.proposal.draw(oldparam.copy())
# Prior is computed in the proposal
if newparam['logP'] != -np.inf:
if not newparam['logL']:
newparam['logL'] = \
self.model.evaluate_log_likelihood(newparam)
if newparam['logL'] > self.logLmin:
self.logLmax = max(self.logLmax, newparam['logL'])
oldparam = newparam.copy()
break
# Only here if proposed and then empty
# This returns the old point and allows for a training check
if not self.proposal.populated:
break
yield counter, oldparam
def insert_live_point(self, live_point):
"""
Insert a live point
"""
# This is the index including the current worst point, so final index
# is one less, otherwise index=0 would never be possible
index = np.searchsorted(self.live_points['logL'], live_point['logL'])
self.live_points[:index - 1] = self.live_points[1:index]
self.live_points[index - 1] = live_point
return index - 1
def consume_sample(self):
"""
Replace a sample for single thread
"""
worst = self.live_points[0].copy()
self.logLmin = worst['logL']
self.state.increment(worst['logL'])
self.nested_samples.append(worst)
self.condition = np.logaddexp(self.state.logZ,
self.logLmax
- self.iteration / float(self.nlive)) \
- self.state.logZ
# Replace the points we just consumed with the next acceptable ones
# Make sure we are mixing the chains
self.iteration += 1
self.block_iteration += 1
count = 0
while(True):
c, proposed = next(self.yield_sample(worst))
count += c
if proposed['logL'] > self.logLmin:
# Assuming point was proposed
# replace worst point with new one
index = self.insert_live_point(proposed)
self.insertion_indices.append(index)
self.accepted += 1
self.block_acceptance += 1 / count
self.acceptance_history.append(1 / count)
break
else:
# Only get here if the yield sample returns worse point
# which can only happen if the pool is empty
self.rejected += 1
self.check_state()
# if retrained whilst proposing a sample then update the
# iteration count since will be zero otherwise
if not self.block_iteration:
self.block_iteration += 1
self.mean_block_acceptance = self.block_acceptance \
/ self.block_iteration
if self.info_enabled:
logger.info(f"{self.iteration:5d}: n: {count:3d} "
f"b_acc: {self.mean_block_acceptance:.3f} "
f"H: {self.state.info[-1]:.2f} "
f"logL: {self.logLmin:.5f} --> {proposed['logL']:.5f} "
f"dZ: {self.condition:.3f} "
f"logZ: {self.state.logZ:.3f} "
f"+/- {np.sqrt(self.state.info[-1] / self.nlive):.3f} "
f"logLmax: {self.logLmax:.2f}")
def populate_live_points(self):
"""
Initialise the pool of live points.
"""
i = 0
live_points = np.empty(self.nlive,
dtype=get_dtype(self.model.names,
DEFAULT_FLOAT_DTYPE))
with tqdm(total=self.nlive, desc='Drawing live points') as pbar:
while i < self.nlive:
while i < self.nlive:
count, live_point = next(
self.yield_sample(self.model.new_point()))
if np.isnan(live_point['logL']):
logger.warning(
'Likelihood function returned NaN for '
f'live_point {live_point}'
)
logger.warning(
'You may want to check your likelihood function'
)
break
if (
np.isfinite(live_point['logP'])
and np.isfinite(live_point['logL'])
):
live_points[i] = live_point
i += 1
pbar.update()
break
self.live_points = np.sort(live_points, order='logL')
if self.store_live_points:
np.savetxt(self.live_points_dir + '/initial_live_points.dat',
self.live_points,
header='\t'.join(self.live_points.dtype.names))
def initialise(self, live_points=True):
"""
Initialise the nested sampler
Parameters
----------
live_points : bool, optional (True)
If true and there are no live points, new live points are
drawn using `populate_live_points` else all other initialisation
steps are complete but live points remain empty.
"""
flags = [False] * 3
if not self._flow_proposal.initialised:
self._flow_proposal.initialise()
flags[0] = True
if not self._uninformed_proposal.initialised:
self._uninformed_proposal.initialise()
flags[1] = True
if (
self.iteration < self.maximum_uninformed
and self.uninformed_sampling
):
self.proposal = self._uninformed_proposal
else:
self.proposal = self._flow_proposal
if live_points and self.live_points is None:
self.populate_live_points()
flags[2] = True
if self.condition > self.tolerance:
self.finalised = False
if all(flags):
self.initialised = True
def check_proposal_switch(self, force=False):
"""
Check if the proposal should be switch from uninformed to
flowproposal given the current state.
If the flow proposal is already in use, no changes are made.
Parameters
----------
force : bool, optional
If True proposal is forced to switch.
Returns
-------
bool
Flag to indicated if proposal was switched
"""
if (
(self.mean_acceptance < self.uninformed_acceptance_threshold)
or (self.iteration >= self.maximum_uninformed)
or force
):
if self.proposal is self._flow_proposal:
logger.warning('Already using flowproposal')
return True
logger.warning('Switching to FlowProposal')
self.proposal = self._flow_proposal
self.proposal.ns_acceptance = self.mean_block_acceptance
self.uninformed_sampling = False
return True
# If using uninformed sampling, don't check training
else:
return False
def check_training(self):
"""
Check if the normalising flow should be trained
Checks that can force training:
- Training was previously stopped before completion
- The pool is empty and the proposal was not in the process
of populating when stopped.
Checks that cannot force training is still on cooldown:
- Acceptance falls below threshold and `retrain_acceptance` is
true
- The number of iterations since last training is equal to the
training frequency
Returns
-------
train : bool
Try to train if true
force : bool
Force the training irrespective of cooldown
"""
if not self.completed_training:
logger.debug('Training flow (resume)')
return True, True
elif (not self.proposal.populated and
self.train_on_empty and
not self.proposal.populating):
logger.debug('Training flow (proposal empty)')
return True, True
elif (self.mean_block_acceptance < self.acceptance_threshold and
self.retrain_acceptance):
logger.debug('Training flow (acceptance)')
return True, False
elif (self.iteration - self.last_updated) == self.training_frequency:
logger.debug('Training flow (iteration)')
return True, False
else:
return False, False
def check_flow_model_reset(self):
"""
Check if the normalising flow model should be reset.
Checks acceptance if `reset_acceptance` is True and always checks
how many times the flow has been trained.
Flow will not be reset if it has not been trained. To force a reset
manually call `proposal.reset_model_weights`.
"""
if not self.proposal.training_count:
return
if (self.reset_acceptance
and self.mean_block_acceptance < self.acceptance_threshold):
self.proposal.reset_model_weights(weights=True, permutations=True)
return
self.proposal.reset_model_weights(
weights=(
self.reset_weights and
not (self.proposal.training_count % self.reset_weights)
),
permutations=(
self.reset_permutations and
not (self.proposal.training_count % self.reset_permutations)
),
)
def train_proposal(self, force=False):
"""
Try to train the proposal. Proposal will not train if cooldown is not
exceeded unless force is True.
Parameters
----------
force : bool
Override training checks
"""
if (self.iteration - self.last_updated < self.cooldown and not force):
logger.debug('Not training, still cooling down!')
else:
self.completed_training = False
self.check_flow_model_reset()
training_data = self.live_points.copy()
if self.memory and (len(self.nested_samples) >= self.memory):
training_data = np.concatenate([
training_data, self.nested_samples[-self.memory:].copy()])
st = datetime.datetime.now()
self.proposal.train(training_data)
self.training_time += (datetime.datetime.now() - st)
self.training_iterations.append(self.iteration)
self.block_iteration = 0
self.block_acceptance = 0.
self.completed_training = True
if self.checkpoint_on_training:
self.checkpoint(periodic=True)
def check_state(self, force=False):
"""
Check if state should be updated prior to drawing a new sample
Force will override the cooldown mechanism.
"""
if self.uninformed_sampling:
if self.check_proposal_switch():
force = True
else:
return
# General override
train = False
if force:
train = True
logger.debug('Training flow (force)')
elif not train:
train, force = self.check_training()
if train or force:
self.train_proposal(force=force)
def plot_state(self, filename=None):
"""
Produce plots with the current state of the nested sampling run.
Plots are saved to the output directory specified at initialisation.
Parameters
----------
filename : str, optional
If specified the figure will be saved, otherwise the figure is
returned.
"""
fig, ax = plt.subplots(6, 1, sharex=True, figsize=(12, 12))
ax = ax.ravel()
it = (np.arange(len(self.min_likelihood))) * (self.nlive // 10)
it[-1] = self.iteration
colours = ['#4575b4', '#d73027', '#fad117']
ls = ['-', '--', ':']
for t in self.training_iterations:
for a in ax:
a.axvline(t, ls='-', color='lightgrey')
if not self.train_on_empty:
for p in self.population_iterations:
for a in ax:
a.axvline(p, ls='-', color='tab:orange')
for i in self.checkpoint_iterations:
for a in ax:
a.axvline(i, ls=':', color='#66ccff')
for a in ax:
a.axvline(self.iteration, c='#ff9900', ls='-.')
ax[0].plot(it, self.min_likelihood, label='Min logL',
c=colours[0], ls=ls[0])
ax[0].plot(it, self.max_likelihood, label='Max logL',
c=colours[1], ls=ls[1])
ax[0].set_ylabel('logL')
ax[0].legend(frameon=False)
logX_its = np.arange(len(self.state.log_vols))
ax[1].plot(
logX_its, self.state.log_vols, ls=ls[0], c=colours[0],
label='log X'
)
ax[1].set_ylabel('Log X')
ax[1].legend(frameon=False)
if self.state.track_gradients:
ax_logX_grad = plt.twinx(ax[1])
# Use dotted linestyle (ls[2]) because dashed isn't clear
ax_logX_grad.plot(
logX_its,
rolling_mean(np.abs(self.state.gradients), self.nlive // 10),
c=colours[1],
ls=ls[2],
label='Gradient'
)
ax_logX_grad.set_ylabel(r'$|d\log L/d \log X|$')
ax_logX_grad.set_yscale('log')
handles, labels = ax[1].get_legend_handles_labels()
handles_tw, labels_tw = ax_logX_grad.get_legend_handles_labels()
ax[1].legend(
handles + handles_tw, labels + labels_tw, frameon=False
)
ax[2].plot(it, self.likelihood_evaluations, c=colours[0], ls=ls[0],
label='Evaluations')
ax[2].set_ylabel('logL evaluations')
ax[3].plot(it, self.logZ_history, label='logZ', c=colours[0], ls=ls[0])
ax[3].set_ylabel('logZ')
ax[3].legend(frameon=False)
ax_dz = plt.twinx(ax[3])
ax_dz.plot(it, self.dZ_history, label='dZ', c=colours[1], ls=ls[1])
ax_dz.set_ylabel('dZ')
handles, labels = ax[3].get_legend_handles_labels()
handles_dz, labels_dz = ax_dz.get_legend_handles_labels()
ax[3].legend(handles + handles_dz, labels + labels_dz, frameon=False)
ax[4].plot(it, self.mean_acceptance_history, c=colours[0],
label='Proposal')
ax[4].plot(self.population_iterations, self.population_acceptance,
c=colours[1], ls=ls[1], label='Population')
ax[4].set_ylabel('Acceptance')
ax[4].set_ylim((-0.1, 1.1))
handles, labels = ax[4].get_legend_handles_labels()
ax_r = plt.twinx(ax[4])
ax_r.plot(self.population_iterations, self.population_radii,
label='Radius', color=colours[2], ls=ls[2])
ax_r.set_ylabel('Population radius')
handles_r, labels_r = ax_r.get_legend_handles_labels()
ax[4].legend(handles + handles_r, labels + labels_r, frameon=False)
if len(self.rolling_p):
it = (np.arange(len(self.rolling_p)) + 1) * self.nlive
ax[5].plot(it, self.rolling_p, 'o', c=colours[0], label='p-value')
ax[5].set_ylabel('p-value')
ax[5].set_ylim([-0.1, 1.1])
ax[-1].set_xlabel('Iteration')
fig.suptitle(f'Sampling time: {self.current_sampling_time}',
fontsize=16)
handles = [
Line2D([0], [0], color='#ff9900', linestyle='-.',
label='Current iteration'),
Line2D([0], [0], color='lightgrey', linestyle='-',
markersize=10, markeredgewidth=1.5, label='Training'),
Line2D([0], [0], color='#66ccff', linestyle=':',
label='Checkpoint'),
]
fig.legend(
handles=handles, frameon=False, ncol=3, loc=(0.6, 0.0)
)
fig.tight_layout()
fig.subplots_adjust(top=0.95)
if filename is not None:
fig.savefig(filename)
plt.close(fig)
else:
return fig
def plot_trace(self, filename=None):
"""
Make trace plots for the nested samples.
Parameters
----------
filename : str, optional
If filename is None, the figure is returned. Else the figure
is saved with that file name.
"""
if self.nested_samples:
fig = plot_trace(self.state.log_vols[1:], self.nested_samples,
filename=filename)
return fig
else:
logger.warning('Could not produce trace plot. No nested samples!')
def plot_insertion_indices(self, filename=None, **kwargs):
"""
Make a plot of all the insertion indices.
Parameters
----------
filename : str, optional
If filename is None, the figure is returned. Else the figure
is saved with that file name.
kwargs :
Keyword arguments passed to `nessai.plot.plot_indices`.
"""
return plot_indices(
self.insertion_indices,
self.nlive,
filename=filename,
**kwargs
)
def update_state(self, force=False):
"""
Update state after replacing a live point
"""
# Check if acceptance is not None, this indicates the proposal
# was populated
if not self.proposal._checked_population:
self.population_acceptance.append(
self.proposal.population_acceptance)
self.population_radii.append(self.proposal.r)
self.population_iterations.append(self.iteration)
self.proposal._checked_population = True
if not (self.iteration % (self.nlive // 10)) or force:
self.likelihood_evaluations.append(
self.model.likelihood_evaluations)
self.min_likelihood.append(self.logLmin)
self.max_likelihood.append(self.logLmax)
self.logZ_history.append(self.state.logZ)
self.dZ_history.append(self.condition)
self.mean_acceptance_history.append(self.mean_acceptance)
if not (self.iteration % self.nlive) or force:
logger.warning(
f"it: {self.iteration:5d}: "
f"n eval: {self.likelihood_calls} "
f"H: {self.state.info[-1]:.2f} "
f"dZ: {self.condition:.3f} logZ: {self.state.logZ:.3f} "
f"+/- {np.sqrt(self.state.info[-1] / self.nlive):.3f} "
f"logLmax: {self.logLmax:.2f}")
if self.checkpointing:
self.checkpoint(periodic=True)
if not force:
self.check_insertion_indices()
if self.plot:
plot_indices(self.insertion_indices[-self.nlive:],
self.nlive,
plot_breakdown=False,
filename=(f'{self.output}/diagnostics/'
'insertion_indices_'
f'{self.iteration}.png'))
if self.plot:
self.plot_state(filename=f'{self.output}/state.png')
self.plot_trace(filename=f'{self.output}/trace.png')
if self.uninformed_sampling:
self.block_acceptance = 0.
self.block_iteration = 0
self.proposal.ns_acceptance = self.mean_block_acceptance
def checkpoint(self, periodic=False):
"""
Checkpoint the classes internal state
Parameters
----------
periodic : bool
Indicates if the checkpoint is regular periodic checkpointing
or forced by a signal. If forced by a signal, it will show up on
the state plot.
"""
if not periodic:
self.checkpoint_iterations += [self.iteration]
self.sampling_time += \
(datetime.datetime.now() - self.sampling_start_time)
logger.critical('Checkpointing nested sampling')
safe_file_dump(self, self.resume_file, pickle, save_existing=True)
self.sampling_start_time = datetime.datetime.now()
def check_resume(self):
"""
Check the normalising flow is correctly configured is the sampler
was resumed.
"""
if self.resumed:
if self.uninformed_sampling is False:
self.check_proposal_switch(force=True)
# If pool is populated reset the flag since it is set to
# false during initialisation
if hasattr(self._flow_proposal, 'resume_populated'):
if (self._flow_proposal.resume_populated and
self._flow_proposal.indices):
self._flow_proposal.populated = True
logger.info('Resumed with populated pool')
self.resumed = False
def finalise(self):
"""
Finalise things after sampling
"""
logger.info('Finalising')
for i, p in enumerate(self.live_points):
self.state.increment(p['logL'], nlive=self.nlive-i)
self.nested_samples.append(p)
# Refine evidence estimate
self.update_state(force=True)
self.state.finalise()
# output the chain and evidence
self.finalised = True
def nested_sampling_loop(self):
"""
Main nested sampling loop
"""
self.sampling_start_time = datetime.datetime.now()
if not self.initialised:
self.initialise(live_points=True)
if self.prior_sampling:
self.nested_samples = self.live_points.copy()
if self.close_pool:
self.model.close_pool()
return self.nested_samples
self.check_resume()
if self.iteration:
self.update_state()
logger.critical('Starting nested sampling loop')
while self.condition > self.tolerance:
self.check_state()
self.consume_sample()
self.update_state()
if self.iteration >= self.max_iteration:
break
# final adjustments
# avoid repeating final adjustments if resuming a completed run.
if not self.finalised and (self.condition <= self.tolerance):
self.finalise()
logger.critical(f'Final evidence: {self.state.logZ:.3f} +/- '
f'{np.sqrt(self.state.info[-1] / self.nlive):.3f}')
logger.critical('Information: {0:.2f}'.format(self.state.info[-1]))
self.check_insertion_indices(rolling=False)
# This includes updating the total sampling time
self.checkpoint(periodic=True)
if self.close_pool:
self.model.close_pool()
logger.info(f'Total sampling time: {self.sampling_time}')
logger.info(f'Total training time: {self.training_time}')
logger.info(f'Total population time: {self.proposal_population_time}')
logger.info(
f'Total likelihood evaluations: {self.likelihood_calls:3d}')
logger.info(
'Time spent evaluating likelihood: '
f'{self.likelihood_evaluation_time}'
)
return self.state.logZ, np.array(self.nested_samples)
@classmethod
def resume(cls, filename, model, flow_config={}, weights_file=None):
"""
Resumes the interrupted state from a checkpoint pickle file.
Parameters
----------
filename : str
Pickle pickle to resume from
model : :obj:`nessai.model.Model`
User-defined model
flow_config : dict, optional
Dictionary for configuring the flow
weights_file : str, optional
Weights files to use in place of the weights file stored in the
pickle file.
Returns
-------
obj
Instance of NestedSampler
"""
logger.critical('Resuming NestedSampler from ' + filename)
with open(filename, 'rb') as f:
obj = pickle.load(f)
model.likelihood_evaluations += obj.likelihood_evaluations[-1]
obj.model = model
obj._uninformed_proposal.resume(model)
obj._flow_proposal.resume(model, flow_config, weights_file)
obj.resumed = True
return obj
def __getstate__(self):
state = self.__dict__.copy()
del state['model']
return state
def __setstate__(self, state):
self.__dict__ = state
| 36.313301 | 79 | 0.583045 |
from collections import deque
import datetime
import logging
import os
import pickle
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import numpy as np
import seaborn as sns
import torch
from tqdm import tqdm
from .livepoint import get_dtype, DEFAULT_FLOAT_DTYPE
from .plot import plot_indices, plot_trace
from .evidence import _NSIntegralState
from .proposal import FlowProposal
from .utils import (
safe_file_dump,
compute_indices_ks_test,
rolling_mean,
)
sns.set()
sns.set_style('ticks')
logger = logging.getLogger(__name__)
class NestedSampler:
def __init__(
self,
model,
nlive=2000,
output=None,
stopping=0.1,
max_iteration=None,
checkpointing=True,
checkpoint_on_training=False,
resume_file=None,
seed=None,
pool=None,
close_pool=True,
n_pool=None,
plot=True,
proposal_plots=False,
prior_sampling=False,
analytic_priors=False,
maximum_uninformed=None,
uninformed_proposal=None,
uninformed_acceptance_threshold=None,
uninformed_proposal_kwargs=None,
flow_class=None,
flow_config=None,
training_frequency=None,
train_on_empty=True,
cooldown=200,
memory=False,
reset_weights=False,
reset_permutations=False,
retrain_acceptance=True,
reset_acceptance=False,
acceptance_threshold=0.01,
**kwargs
):
logger.info('Initialising nested sampler')
self.info_enabled = logger.isEnabledFor(logging.INFO)
model.verify_model()
self.model = model
self.model.configure_pool(pool=pool, n_pool=n_pool)
self.close_pool = close_pool
self.nlive = nlive
self.live_points = None
self.prior_sampling = prior_sampling
self.setup_random_seed(seed)
self.accepted = 0
self.rejected = 1
self.initialised = False
self.checkpointing = checkpointing
self.checkpoint_on_training = checkpoint_on_training
self.iteration = 0
self.acceptance_history = deque(maxlen=(nlive // 10))
self.mean_acceptance_history = []
self.block_acceptance = 1.
self.mean_block_acceptance = 1.
self.block_iteration = 0
self.retrain_acceptance = retrain_acceptance
self.reset_acceptance = reset_acceptance
self.insertion_indices = []
self.rolling_p = []
self.resumed = False
self.tolerance = stopping
self.condition = np.inf
self.logLmin = -np.inf
self.logLmax = -np.inf
self.nested_samples = []
self.logZ = None
self.state = _NSIntegralState(self.nlive, track_gradients=plot)
self.plot = plot
self.resume_file = self.setup_output(output, resume_file)
self.output = output
self.training_time = datetime.timedelta()
self.sampling_time = datetime.timedelta()
self.sampling_start_time = datetime.datetime.now()
self.completed_training = True
self.finalised = False
self.likelihood_evaluations = []
self.training_iterations = []
self.min_likelihood = []
self.max_likelihood = []
self.logZ_history = []
self.dZ_history = []
self.population_acceptance = []
self.population_radii = []
self.population_iterations = []
self.checkpoint_iterations = []
self.acceptance_threshold = acceptance_threshold
self.train_on_empty = train_on_empty
self.cooldown = cooldown
self.memory = memory
self.configure_max_iteration(max_iteration)
self.configure_flow_reset(reset_weights, reset_permutations)
self.configure_training_frequency(training_frequency)
if uninformed_proposal_kwargs is None:
uninformed_proposal_kwargs = {}
self.configure_uninformed_proposal(uninformed_proposal,
analytic_priors,
maximum_uninformed,
uninformed_acceptance_threshold,
**uninformed_proposal_kwargs)
self.configure_flow_proposal(flow_class, flow_config, proposal_plots,
**kwargs)
self.store_live_points = False
if self.store_live_points:
self.live_points_dir = f'{self.output}/live_points/'
os.makedirs(self.live_points_dir, exist_ok=True)
self.replacement_points = []
@property
def log_evidence(self):
return self.state.logZ
@property
def information(self):
return self.state.info[-1]
@property
def likelihood_calls(self):
return self.model.likelihood_evaluations
@property
def likelihood_evaluation_time(self):
return self.model.likelihood_evaluation_time
@property
def proposal_population_time(self):
t = self._uninformed_proposal.population_time
t += self._flow_proposal.population_time
return t
@property
def acceptance(self):
return self.iteration / self.likelihood_calls
@property
def current_sampling_time(self):
if self.finalised:
return self.sampling_time
else:
return self.sampling_time \
+ (datetime.datetime.now() - self.sampling_start_time)
@property
def last_updated(self):
if self.training_iterations:
return self.training_iterations[-1]
else:
return 0
@property
def mean_acceptance(self):
if self.acceptance_history:
return np.mean(self.acceptance_history)
else:
return np.nan
def configure_max_iteration(self, max_iteration):
if max_iteration is None:
self.max_iteration = np.inf
else:
self.max_iteration = max_iteration
def configure_training_frequency(self, training_frequency):
if training_frequency in [None, 'inf', 'None']:
logger.warning('Proposal will only train when empty')
self.training_frequency = np.inf
else:
self.training_frequency = training_frequency
def configure_uninformed_proposal(self,
uninformed_proposal,
analytic_priors,
maximum_uninformed,
uninformed_acceptance_threshold,
**kwargs):
if maximum_uninformed is None:
self.uninformed_sampling = True
self.maximum_uninformed = 2 * self.nlive
elif not maximum_uninformed:
self.uninformed_sampling = False
self.maximum_uninformed = 0
else:
self.uninformed_sampling = True
self.maximum_uninformed = float(maximum_uninformed)
if uninformed_acceptance_threshold is None:
if self.acceptance_threshold < 0.1:
self.uninformed_acceptance_threshold = \
10 * self.acceptance_threshold
else:
self.uninformed_acceptance_threshold = \
self.acceptance_threshold
else:
self.uninformed_acceptance_threshold = \
uninformed_acceptance_threshold
if uninformed_proposal is None:
if analytic_priors:
from .proposal import AnalyticProposal as uninformed_proposal
else:
from .proposal import RejectionProposal as uninformed_proposal
kwargs['poolsize'] = self.nlive
logger.debug(f'Using uninformed proposal: {uninformed_proposal}')
logger.debug(f'Parsing kwargs to uninformed proposal: {kwargs}')
self._uninformed_proposal = uninformed_proposal(
self.model, **kwargs
)
def configure_flow_proposal(self, flow_class, flow_config, proposal_plots,
**kwargs):
proposal_output = self.output + '/proposal/'
if not self.plot:
proposal_plots = False
if flow_class is not None:
if isinstance(flow_class, str):
flow_class = flow_class.lower()
if flow_class == 'gwflowproposal':
from .gw.proposal import GWFlowProposal as flow_class
elif flow_class == 'augmentedgwflowproposal':
from .gw.proposal import (
AugmentedGWFlowProposal as flow_class)
elif flow_class == 'legacygwflowproposal':
from .gw.legacy import LegacyGWFlowProposal as flow_class
elif flow_class == 'flowproposal':
flow_class = FlowProposal
elif flow_class == 'augmentedflowproposal':
from .proposal import AugmentedFlowProposal
flow_class = AugmentedFlowProposal
else:
raise ValueError(f'Unknown flow class: {flow_class}')
elif not issubclass(flow_class, FlowProposal):
raise RuntimeError('Flow class must be string or class that '
'inherits from FlowProposal')
else:
flow_class = FlowProposal
if kwargs.get('poolsize', None) is None:
kwargs['poolsize'] = self.nlive
logger.debug(f'Using flow class: {flow_class}')
logger.info(f'Parsing kwargs to FlowProposal: {kwargs}')
self._flow_proposal = flow_class(
self.model,
flow_config=flow_config,
output=proposal_output,
plot=proposal_plots,
**kwargs
)
def setup_output(self, output, resume_file=None):
if not os.path.exists(output):
os.makedirs(output, exist_ok=True)
if resume_file is None:
resume_file = os.path.join(output, "nested_sampler_resume.pkl")
else:
resume_file = os.path.join(output, resume_file)
if self.plot:
os.makedirs(output + '/diagnostics/', exist_ok=True)
return resume_file
def setup_random_seed(self, seed):
self.seed = seed
if self.seed is not None:
logger.debug(f'Setting random seed to {seed}')
np.random.seed(seed=self.seed)
torch.manual_seed(self.seed)
def configure_flow_reset(self, reset_weights, reset_permutations):
if isinstance(reset_weights, (int, float)):
self.reset_weights = float(reset_weights)
else:
raise TypeError(
'`reset_weights` must be a bool, int or float')
if isinstance(reset_permutations, (int, float)):
self.reset_permutations = float(reset_permutations)
else:
raise TypeError(
'`reset_permutations` must be a bool, int or float')
def check_insertion_indices(self, rolling=True, filename=None):
if rolling:
indices = self.insertion_indices[-self.nlive:]
else:
indices = self.insertion_indices
D, p = compute_indices_ks_test(indices, self.nlive)
if p is not None:
if rolling:
logger.warning(f'Rolling KS test: D={D:.4}, p-value={p:.4}')
self.rolling_p.append(p)
else:
logger.warning(f'Final KS test: D={D:.4}, p-value={p:.4}')
if filename is not None:
np.savetxt(os.path.join(self.output, filename),
self.insertion_indices, newline='\n', delimiter=' ')
def log_likelihood(self, x):
return self.model.log_likelihood(x)
def yield_sample(self, oldparam):
while True:
counter = 0
while True:
counter += 1
newparam = self.proposal.draw(oldparam.copy())
if newparam['logP'] != -np.inf:
if not newparam['logL']:
newparam['logL'] = \
self.model.evaluate_log_likelihood(newparam)
if newparam['logL'] > self.logLmin:
self.logLmax = max(self.logLmax, newparam['logL'])
oldparam = newparam.copy()
break
if not self.proposal.populated:
break
yield counter, oldparam
def insert_live_point(self, live_point):
index = np.searchsorted(self.live_points['logL'], live_point['logL'])
self.live_points[:index - 1] = self.live_points[1:index]
self.live_points[index - 1] = live_point
return index - 1
def consume_sample(self):
worst = self.live_points[0].copy()
self.logLmin = worst['logL']
self.state.increment(worst['logL'])
self.nested_samples.append(worst)
self.condition = np.logaddexp(self.state.logZ,
self.logLmax
- self.iteration / float(self.nlive)) \
- self.state.logZ
self.iteration += 1
self.block_iteration += 1
count = 0
while(True):
c, proposed = next(self.yield_sample(worst))
count += c
if proposed['logL'] > self.logLmin:
index = self.insert_live_point(proposed)
self.insertion_indices.append(index)
self.accepted += 1
self.block_acceptance += 1 / count
self.acceptance_history.append(1 / count)
break
else:
self.rejected += 1
self.check_state()
if not self.block_iteration:
self.block_iteration += 1
self.mean_block_acceptance = self.block_acceptance \
/ self.block_iteration
if self.info_enabled:
logger.info(f"{self.iteration:5d}: n: {count:3d} "
f"b_acc: {self.mean_block_acceptance:.3f} "
f"H: {self.state.info[-1]:.2f} "
f"logL: {self.logLmin:.5f} --> {proposed['logL']:.5f} "
f"dZ: {self.condition:.3f} "
f"logZ: {self.state.logZ:.3f} "
f"+/- {np.sqrt(self.state.info[-1] / self.nlive):.3f} "
f"logLmax: {self.logLmax:.2f}")
def populate_live_points(self):
i = 0
live_points = np.empty(self.nlive,
dtype=get_dtype(self.model.names,
DEFAULT_FLOAT_DTYPE))
with tqdm(total=self.nlive, desc='Drawing live points') as pbar:
while i < self.nlive:
while i < self.nlive:
count, live_point = next(
self.yield_sample(self.model.new_point()))
if np.isnan(live_point['logL']):
logger.warning(
'Likelihood function returned NaN for '
f'live_point {live_point}'
)
logger.warning(
'You may want to check your likelihood function'
)
break
if (
np.isfinite(live_point['logP'])
and np.isfinite(live_point['logL'])
):
live_points[i] = live_point
i += 1
pbar.update()
break
self.live_points = np.sort(live_points, order='logL')
if self.store_live_points:
np.savetxt(self.live_points_dir + '/initial_live_points.dat',
self.live_points,
header='\t'.join(self.live_points.dtype.names))
def initialise(self, live_points=True):
flags = [False] * 3
if not self._flow_proposal.initialised:
self._flow_proposal.initialise()
flags[0] = True
if not self._uninformed_proposal.initialised:
self._uninformed_proposal.initialise()
flags[1] = True
if (
self.iteration < self.maximum_uninformed
and self.uninformed_sampling
):
self.proposal = self._uninformed_proposal
else:
self.proposal = self._flow_proposal
if live_points and self.live_points is None:
self.populate_live_points()
flags[2] = True
if self.condition > self.tolerance:
self.finalised = False
if all(flags):
self.initialised = True
def check_proposal_switch(self, force=False):
if (
(self.mean_acceptance < self.uninformed_acceptance_threshold)
or (self.iteration >= self.maximum_uninformed)
or force
):
if self.proposal is self._flow_proposal:
logger.warning('Already using flowproposal')
return True
logger.warning('Switching to FlowProposal')
self.proposal = self._flow_proposal
self.proposal.ns_acceptance = self.mean_block_acceptance
self.uninformed_sampling = False
return True
else:
return False
def check_training(self):
if not self.completed_training:
logger.debug('Training flow (resume)')
return True, True
elif (not self.proposal.populated and
self.train_on_empty and
not self.proposal.populating):
logger.debug('Training flow (proposal empty)')
return True, True
elif (self.mean_block_acceptance < self.acceptance_threshold and
self.retrain_acceptance):
logger.debug('Training flow (acceptance)')
return True, False
elif (self.iteration - self.last_updated) == self.training_frequency:
logger.debug('Training flow (iteration)')
return True, False
else:
return False, False
def check_flow_model_reset(self):
if not self.proposal.training_count:
return
if (self.reset_acceptance
and self.mean_block_acceptance < self.acceptance_threshold):
self.proposal.reset_model_weights(weights=True, permutations=True)
return
self.proposal.reset_model_weights(
weights=(
self.reset_weights and
not (self.proposal.training_count % self.reset_weights)
),
permutations=(
self.reset_permutations and
not (self.proposal.training_count % self.reset_permutations)
),
)
def train_proposal(self, force=False):
if (self.iteration - self.last_updated < self.cooldown and not force):
logger.debug('Not training, still cooling down!')
else:
self.completed_training = False
self.check_flow_model_reset()
training_data = self.live_points.copy()
if self.memory and (len(self.nested_samples) >= self.memory):
training_data = np.concatenate([
training_data, self.nested_samples[-self.memory:].copy()])
st = datetime.datetime.now()
self.proposal.train(training_data)
self.training_time += (datetime.datetime.now() - st)
self.training_iterations.append(self.iteration)
self.block_iteration = 0
self.block_acceptance = 0.
self.completed_training = True
if self.checkpoint_on_training:
self.checkpoint(periodic=True)
def check_state(self, force=False):
if self.uninformed_sampling:
if self.check_proposal_switch():
force = True
else:
return
# General override
train = False
if force:
train = True
logger.debug('Training flow (force)')
elif not train:
train, force = self.check_training()
if train or force:
self.train_proposal(force=force)
def plot_state(self, filename=None):
fig, ax = plt.subplots(6, 1, sharex=True, figsize=(12, 12))
ax = ax.ravel()
it = (np.arange(len(self.min_likelihood))) * (self.nlive // 10)
it[-1] = self.iteration
colours = ['']
for t in self.training_iterations:
for a in ax:
a.axvline(t, ls='-', color='lightgrey')
if not self.train_on_empty:
for p in self.population_iterations:
for a in ax:
a.axvline(p, ls='-', color='tab:orange')
for i in self.checkpoint_iterations:
for a in ax:
a.axvline(i, ls=':', color='
for a in ax:
a.axvline(self.iteration, c='
ax[0].plot(it, self.min_likelihood, label='Min logL',
c=colours[0], ls=ls[0])
ax[0].plot(it, self.max_likelihood, label='Max logL',
c=colours[1], ls=ls[1])
ax[0].set_ylabel('logL')
ax[0].legend(frameon=False)
logX_its = np.arange(len(self.state.log_vols))
ax[1].plot(
logX_its, self.state.log_vols, ls=ls[0], c=colours[0],
label='log X'
)
ax[1].set_ylabel('Log X')
ax[1].legend(frameon=False)
if self.state.track_gradients:
ax_logX_grad = plt.twinx(ax[1])
# Use dotted linestyle (ls[2]) because dashed isn't clear
ax_logX_grad.plot(
logX_its,
rolling_mean(np.abs(self.state.gradients), self.nlive // 10),
c=colours[1],
ls=ls[2],
label='Gradient'
)
ax_logX_grad.set_ylabel(r'$|d\log L/d \log X|$')
ax_logX_grad.set_yscale('log')
handles, labels = ax[1].get_legend_handles_labels()
handles_tw, labels_tw = ax_logX_grad.get_legend_handles_labels()
ax[1].legend(
handles + handles_tw, labels + labels_tw, frameon=False
)
ax[2].plot(it, self.likelihood_evaluations, c=colours[0], ls=ls[0],
label='Evaluations')
ax[2].set_ylabel('logL evaluations')
ax[3].plot(it, self.logZ_history, label='logZ', c=colours[0], ls=ls[0])
ax[3].set_ylabel('logZ')
ax[3].legend(frameon=False)
ax_dz = plt.twinx(ax[3])
ax_dz.plot(it, self.dZ_history, label='dZ', c=colours[1], ls=ls[1])
ax_dz.set_ylabel('dZ')
handles, labels = ax[3].get_legend_handles_labels()
handles_dz, labels_dz = ax_dz.get_legend_handles_labels()
ax[3].legend(handles + handles_dz, labels + labels_dz, frameon=False)
ax[4].plot(it, self.mean_acceptance_history, c=colours[0],
label='Proposal')
ax[4].plot(self.population_iterations, self.population_acceptance,
c=colours[1], ls=ls[1], label='Population')
ax[4].set_ylabel('Acceptance')
ax[4].set_ylim((-0.1, 1.1))
handles, labels = ax[4].get_legend_handles_labels()
ax_r = plt.twinx(ax[4])
ax_r.plot(self.population_iterations, self.population_radii,
label='Radius', color=colours[2], ls=ls[2])
ax_r.set_ylabel('Population radius')
handles_r, labels_r = ax_r.get_legend_handles_labels()
ax[4].legend(handles + handles_r, labels + labels_r, frameon=False)
if len(self.rolling_p):
it = (np.arange(len(self.rolling_p)) + 1) * self.nlive
ax[5].plot(it, self.rolling_p, 'o', c=colours[0], label='p-value')
ax[5].set_ylabel('p-value')
ax[5].set_ylim([-0.1, 1.1])
ax[-1].set_xlabel('Iteration')
fig.suptitle(f'Sampling time: {self.current_sampling_time}',
fontsize=16)
handles = [
Line2D([0], [0], color='#ff9900', linestyle='-.',
label='Current iteration'),
Line2D([0], [0], color='lightgrey', linestyle='-',
markersize=10, markeredgewidth=1.5, label='Training'),
Line2D([0], [0], color='#66ccff', linestyle=':',
label='Checkpoint'),
]
fig.legend(
handles=handles, frameon=False, ncol=3, loc=(0.6, 0.0)
)
fig.tight_layout()
fig.subplots_adjust(top=0.95)
if filename is not None:
fig.savefig(filename)
plt.close(fig)
else:
return fig
def plot_trace(self, filename=None):
if self.nested_samples:
fig = plot_trace(self.state.log_vols[1:], self.nested_samples,
filename=filename)
return fig
else:
logger.warning('Could not produce trace plot. No nested samples!')
def plot_insertion_indices(self, filename=None, **kwargs):
return plot_indices(
self.insertion_indices,
self.nlive,
filename=filename,
**kwargs
)
def update_state(self, force=False):
if not self.proposal._checked_population:
self.population_acceptance.append(
self.proposal.population_acceptance)
self.population_radii.append(self.proposal.r)
self.population_iterations.append(self.iteration)
self.proposal._checked_population = True
if not (self.iteration % (self.nlive // 10)) or force:
self.likelihood_evaluations.append(
self.model.likelihood_evaluations)
self.min_likelihood.append(self.logLmin)
self.max_likelihood.append(self.logLmax)
self.logZ_history.append(self.state.logZ)
self.dZ_history.append(self.condition)
self.mean_acceptance_history.append(self.mean_acceptance)
if not (self.iteration % self.nlive) or force:
logger.warning(
f"it: {self.iteration:5d}: "
f"n eval: {self.likelihood_calls} "
f"H: {self.state.info[-1]:.2f} "
f"dZ: {self.condition:.3f} logZ: {self.state.logZ:.3f} "
f"+/- {np.sqrt(self.state.info[-1] / self.nlive):.3f} "
f"logLmax: {self.logLmax:.2f}")
if self.checkpointing:
self.checkpoint(periodic=True)
if not force:
self.check_insertion_indices()
if self.plot:
plot_indices(self.insertion_indices[-self.nlive:],
self.nlive,
plot_breakdown=False,
filename=(f'{self.output}/diagnostics/'
'insertion_indices_'
f'{self.iteration}.png'))
if self.plot:
self.plot_state(filename=f'{self.output}/state.png')
self.plot_trace(filename=f'{self.output}/trace.png')
if self.uninformed_sampling:
self.block_acceptance = 0.
self.block_iteration = 0
self.proposal.ns_acceptance = self.mean_block_acceptance
def checkpoint(self, periodic=False):
if not periodic:
self.checkpoint_iterations += [self.iteration]
self.sampling_time += \
(datetime.datetime.now() - self.sampling_start_time)
logger.critical('Checkpointing nested sampling')
safe_file_dump(self, self.resume_file, pickle, save_existing=True)
self.sampling_start_time = datetime.datetime.now()
def check_resume(self):
if self.resumed:
if self.uninformed_sampling is False:
self.check_proposal_switch(force=True)
if hasattr(self._flow_proposal, 'resume_populated'):
if (self._flow_proposal.resume_populated and
self._flow_proposal.indices):
self._flow_proposal.populated = True
logger.info('Resumed with populated pool')
self.resumed = False
def finalise(self):
logger.info('Finalising')
for i, p in enumerate(self.live_points):
self.state.increment(p['logL'], nlive=self.nlive-i)
self.nested_samples.append(p)
self.update_state(force=True)
self.state.finalise()
self.finalised = True
def nested_sampling_loop(self):
self.sampling_start_time = datetime.datetime.now()
if not self.initialised:
self.initialise(live_points=True)
if self.prior_sampling:
self.nested_samples = self.live_points.copy()
if self.close_pool:
self.model.close_pool()
return self.nested_samples
self.check_resume()
if self.iteration:
self.update_state()
logger.critical('Starting nested sampling loop')
while self.condition > self.tolerance:
self.check_state()
self.consume_sample()
self.update_state()
if self.iteration >= self.max_iteration:
break
if not self.finalised and (self.condition <= self.tolerance):
self.finalise()
logger.critical(f'Final evidence: {self.state.logZ:.3f} +/- '
f'{np.sqrt(self.state.info[-1] / self.nlive):.3f}')
logger.critical('Information: {0:.2f}'.format(self.state.info[-1]))
self.check_insertion_indices(rolling=False)
self.checkpoint(periodic=True)
if self.close_pool:
self.model.close_pool()
logger.info(f'Total sampling time: {self.sampling_time}')
logger.info(f'Total training time: {self.training_time}')
logger.info(f'Total population time: {self.proposal_population_time}')
logger.info(
f'Total likelihood evaluations: {self.likelihood_calls:3d}')
logger.info(
'Time spent evaluating likelihood: '
f'{self.likelihood_evaluation_time}'
)
return self.state.logZ, np.array(self.nested_samples)
@classmethod
def resume(cls, filename, model, flow_config={}, weights_file=None):
logger.critical('Resuming NestedSampler from ' + filename)
with open(filename, 'rb') as f:
obj = pickle.load(f)
model.likelihood_evaluations += obj.likelihood_evaluations[-1]
obj.model = model
obj._uninformed_proposal.resume(model)
obj._flow_proposal.resume(model, flow_config, weights_file)
obj.resumed = True
return obj
def __getstate__(self):
state = self.__dict__.copy()
del state['model']
return state
def __setstate__(self, state):
self.__dict__ = state
| true | true |
1c47785da9d34f0b1c8a9845b5a3002f171b51df | 8,815 | py | Python | src/sensor_placement.py | tolgadur/Sensor-Placement | ad33477d1fb14052e1a9e58d149d0b8e767ea318 | [
"MIT"
] | 3 | 2020-05-10T20:37:50.000Z | 2022-03-31T08:25:23.000Z | src/sensor_placement.py | tolgadur/Sensor-Placement | ad33477d1fb14052e1a9e58d149d0b8e767ea318 | [
"MIT"
] | null | null | null | src/sensor_placement.py | tolgadur/Sensor-Placement | ad33477d1fb14052e1a9e58d149d0b8e767ea318 | [
"MIT"
] | 2 | 2021-02-26T10:15:24.000Z | 2021-06-07T11:11:08.000Z | #!/usr/bin/python
import numpy as np
import heapq
import pandas as pd
""" FILE NAME: 'sensor_placement.py'
DESCRIPTION: This file is implementing the class that will be used for sensor
positioning according to solution proposed by Krause, Singh and Guestrin (2008).
"""
class SensorPlacement:
@staticmethod
def isMonotonic(cov, k, V, S, U):
""" This method checks if values in the dataset are monotonic or not. For
datasets > 2000 observations, non-monotonicity might lead to suboptimal
results.
Input:
- cov: covariance matrix
- k: number of Sensors to be placed
- V: indices of all position
- S: indices of all possible sensor positions
- U: indices of all impossible sensor positions
"""
A = np.array([])
for j in range(k):
S_A = np.setdiff1d(S, A).astype(int)
for y in S_A:
AHat = np.setdiff1d(V, np.append(A, [y]))
condition = SensorPlacement.__conditionalEntropy(cov, y, A) - SensorPlacement.__conditionalEntropy(cov, y, AHat)
if condition < 0:
print(condition)
return False
return True
@staticmethod
def __conditionalVariance(cov, y, A):
""" This method calculates the conditional variance of y given A. """
var = cov[y, y] - (cov[np.ix_([y], A)] @ np.linalg.inv(cov[np.ix_(A, A)]) @ cov[np.ix_(A, [y])])
# var = np.absolute(cov[y, y] - (cov[np.ix_([y], A)] @ np.linalg.inv(cov[np.ix_(A, A)]) @ cov[np.ix_(A, [y])]))
return var[0][0]
@staticmethod
def __conditionalEntropy(cov, y, A):
""" This method calculates the conditional entropy of y given A. """
conditionalVariance = SensorPlacement.__conditionalVariance(cov, y, A)
return 0.5 * np.log(2*np.pi*conditionalVariance)
@staticmethod
def __localConditionalEntropy(cov, y, A, epsilon):
""" This method calculates the conditional entropy of y given A for
all values where cov[y, A] > epsilon. """
A_ = SensorPlacement.__localSet(cov, y, A, epsilon)
return SensorPlacement.__conditionalEntropy(cov, y, A_)
@staticmethod
def __localConditionalVariance(cov, y, A, epsilon):
""" This method calculates the conditional variance of y given A for
all values where cov[y, A] > epsilon. """
A_ = SensorPlacement.__localSet(cov, y, A, epsilon)
return SensorPlacement.__conditionalVariance(cov, y, A_)
@staticmethod
def __localSet(cov, y, A, epsilon):
""" This method returns the set of points X in S for which K(y*, x) > epsilon.
Input:
- cov: covariance matrix
- S_i: array with all indices of i
- epsilon: hyperparameter
"""
return [x for x in A if cov[y, x] > epsilon]
@staticmethod
def naiveSensorPlacement(cov, k, V, S, U, A, subdomain=None, output=None):
""" This is an implementation of the first approximation method suggested in
the 'Near-Optimal Sensor Placement' paper.
Input:
- cov: covariance matrix
- k: number of Sensors to be placed
- V: indices of all position
- S: indices of all possible sensor positions
- U: indices of all impossible sensor positions
"""
print('Algorithm is starting for subdomain', subdomain, flush=True)
A = A
for j in range(k):
S_A = np.setdiff1d(S, A).astype(int)
delta = np.array([])
for y in S_A:
AHat = np.setdiff1d(V, np.append(A, [y]))
delta = np.append(delta, SensorPlacement.__conditionalVariance(cov, y, A) / \
SensorPlacement.__conditionalVariance(cov, y, AHat))
y_star = S_A[np.argmax(delta)]
A = np.append(A, y_star).astype(int)
print('subdomain ', subdomain, ': ', A, flush=True)
if subdomain != None:
output.put((subdomain, 2*A))
return 2*A
@staticmethod
def lazySensorPlacement(cov, k, V, S, U, A, subdomain=None, output=None):
""" This is an implementation of the second approximation method suggested in
the 'Near-Optimal Sensor Placement' paper. It uses a priority queue in order
to reduce the time complexity from O(k*n^4) to O(k*n^3).
Input:
- cov: covariance matrix
- k: number of Sensors to be placed
- V: indices of all position
- S: indices of all possible sensor positions
- U: indices of all impossible sensor positions
"""
print('Algorithm is starting for subdomain', subdomain, flush=True)
A = A
delta = -1 * np.inf * np.ones((len(S), 1))
heap = [(delta[i], S[i], -1) for i in range(len(delta))]
heapq.heapify(heap)
for j in range(k):
while True:
delta_star, y_star, current = heapq.heappop(heap)
if current == j:
break
AHat = np.setdiff1d(V, np.append(A, [y_star]))
criterion = SensorPlacement.__conditionalVariance(cov, y_star, A) / \
SensorPlacement.__conditionalVariance(cov, y_star, AHat)
heapq.heappush(heap, (-1 * criterion, y_star, j))
A = np.append(A, y_star).astype(int)
print('subdomain ', subdomain, ': ', 2*A, flush=True)
if subdomain != None:
output.put((subdomain, 2*A))
return 2*A
@staticmethod
def localKernelPlacement(cov, k, V, S, U, A, subdomain=None, output=None):
""" This is an implementation of the third approximation method suggested in
the 'Near-Optimal Sensor Placement' paper. It only considers local kernels
in order to reduce the time complexity O(k*n).
Input:
- cov: covariance matrix
- k: number of Sensors to be placed
- V: indices of all position
- S: indices of all possible sensor positions
- U: indices of all impossible sensor positions
"""
print('Algorithm is starting for subdomain', subdomain, flush=True)
A = A
epsilon = 1e-10
delta = np.array([]); N = S
for y in S:
V_y = np.setdiff1d(V, y).astype(int)
delta = np.append(delta, cov[y, y] / SensorPlacement.__localConditionalVariance(cov, y, V_y, epsilon))
for j in range(k):
y_star = N[np.argmax(delta)]
A = np.append(A, y_star).astype(int)
print('subdomain ', subdomain, ': ', A, flush=True)
N = SensorPlacement.__localSet(cov, y_star, S, epsilon)
N = np.setdiff1d(S, A).astype(int)
delta = np.array([])
for y in N:
AHat = np.setdiff1d(V, np.append(A, [y]))
delta = np.append(delta, SensorPlacement.__localConditionalVariance(cov, y, A, epsilon) / \
SensorPlacement.__localConditionalVariance(cov, y, AHat, epsilon))
if subdomain != None:
output.put((subdomain, 2*A))
return 2*A
@staticmethod
def lazyLocalKernelPlacement(cov, k, V, S, U, A, subdomain=None, output=None):
""" This is a mix between the lazySensorPlacement method and the localKernelPlacement
method.
Input:
- cov: covariance matrix
- k: number of Sensors to be placed
- V: indices of all position
- S: indices of all possible sensor positions
- U: indices of all impossible sensor positions
"""
print('Algorithm is starting for subdomain', subdomain, flush=True)
A = A
epsilon = 1e-10
delta = -1 * np.inf * np.ones((len(S), 1))
heap = [(delta[i], S[i], -1) for i in range(len(delta))]
heapq.heapify(heap)
for j in range(k):
while True:
delta_star, y_star, current = heapq.heappop(heap)
if current == j:
break
AHat = np.setdiff1d(V, np.append(A, [y_star]))
criterion = SensorPlacement.__localConditionalVariance(cov, y_star, A, epsilon) / \
SensorPlacement.__localConditionalVariance(cov, y_star, AHat, epsilon)
heapq.heappush(heap, (-1 * criterion, y_star, j))
A = np.append(A, y_star).astype(int)
print('subdomain ', subdomain, ': ', A, flush=True)
if subdomain != None:
output.put((subdomain, 2*A))
return 2*A
| 42.584541 | 128 | 0.569484 |
import numpy as np
import heapq
import pandas as pd
class SensorPlacement:
@staticmethod
def isMonotonic(cov, k, V, S, U):
A = np.array([])
for j in range(k):
S_A = np.setdiff1d(S, A).astype(int)
for y in S_A:
AHat = np.setdiff1d(V, np.append(A, [y]))
condition = SensorPlacement.__conditionalEntropy(cov, y, A) - SensorPlacement.__conditionalEntropy(cov, y, AHat)
if condition < 0:
print(condition)
return False
return True
@staticmethod
def __conditionalVariance(cov, y, A):
var = cov[y, y] - (cov[np.ix_([y], A)] @ np.linalg.inv(cov[np.ix_(A, A)]) @ cov[np.ix_(A, [y])])
return var[0][0]
@staticmethod
def __conditionalEntropy(cov, y, A):
conditionalVariance = SensorPlacement.__conditionalVariance(cov, y, A)
return 0.5 * np.log(2*np.pi*conditionalVariance)
@staticmethod
def __localConditionalEntropy(cov, y, A, epsilon):
A_ = SensorPlacement.__localSet(cov, y, A, epsilon)
return SensorPlacement.__conditionalEntropy(cov, y, A_)
@staticmethod
def __localConditionalVariance(cov, y, A, epsilon):
A_ = SensorPlacement.__localSet(cov, y, A, epsilon)
return SensorPlacement.__conditionalVariance(cov, y, A_)
@staticmethod
def __localSet(cov, y, A, epsilon):
return [x for x in A if cov[y, x] > epsilon]
@staticmethod
def naiveSensorPlacement(cov, k, V, S, U, A, subdomain=None, output=None):
print('Algorithm is starting for subdomain', subdomain, flush=True)
A = A
for j in range(k):
S_A = np.setdiff1d(S, A).astype(int)
delta = np.array([])
for y in S_A:
AHat = np.setdiff1d(V, np.append(A, [y]))
delta = np.append(delta, SensorPlacement.__conditionalVariance(cov, y, A) / \
SensorPlacement.__conditionalVariance(cov, y, AHat))
y_star = S_A[np.argmax(delta)]
A = np.append(A, y_star).astype(int)
print('subdomain ', subdomain, ': ', A, flush=True)
if subdomain != None:
output.put((subdomain, 2*A))
return 2*A
@staticmethod
def lazySensorPlacement(cov, k, V, S, U, A, subdomain=None, output=None):
print('Algorithm is starting for subdomain', subdomain, flush=True)
A = A
delta = -1 * np.inf * np.ones((len(S), 1))
heap = [(delta[i], S[i], -1) for i in range(len(delta))]
heapq.heapify(heap)
for j in range(k):
while True:
delta_star, y_star, current = heapq.heappop(heap)
if current == j:
break
AHat = np.setdiff1d(V, np.append(A, [y_star]))
criterion = SensorPlacement.__conditionalVariance(cov, y_star, A) / \
SensorPlacement.__conditionalVariance(cov, y_star, AHat)
heapq.heappush(heap, (-1 * criterion, y_star, j))
A = np.append(A, y_star).astype(int)
print('subdomain ', subdomain, ': ', 2*A, flush=True)
if subdomain != None:
output.put((subdomain, 2*A))
return 2*A
@staticmethod
def localKernelPlacement(cov, k, V, S, U, A, subdomain=None, output=None):
print('Algorithm is starting for subdomain', subdomain, flush=True)
A = A
epsilon = 1e-10
delta = np.array([]); N = S
for y in S:
V_y = np.setdiff1d(V, y).astype(int)
delta = np.append(delta, cov[y, y] / SensorPlacement.__localConditionalVariance(cov, y, V_y, epsilon))
for j in range(k):
y_star = N[np.argmax(delta)]
A = np.append(A, y_star).astype(int)
print('subdomain ', subdomain, ': ', A, flush=True)
N = SensorPlacement.__localSet(cov, y_star, S, epsilon)
N = np.setdiff1d(S, A).astype(int)
delta = np.array([])
for y in N:
AHat = np.setdiff1d(V, np.append(A, [y]))
delta = np.append(delta, SensorPlacement.__localConditionalVariance(cov, y, A, epsilon) / \
SensorPlacement.__localConditionalVariance(cov, y, AHat, epsilon))
if subdomain != None:
output.put((subdomain, 2*A))
return 2*A
@staticmethod
def lazyLocalKernelPlacement(cov, k, V, S, U, A, subdomain=None, output=None):
print('Algorithm is starting for subdomain', subdomain, flush=True)
A = A
epsilon = 1e-10
delta = -1 * np.inf * np.ones((len(S), 1))
heap = [(delta[i], S[i], -1) for i in range(len(delta))]
heapq.heapify(heap)
for j in range(k):
while True:
delta_star, y_star, current = heapq.heappop(heap)
if current == j:
break
AHat = np.setdiff1d(V, np.append(A, [y_star]))
criterion = SensorPlacement.__localConditionalVariance(cov, y_star, A, epsilon) / \
SensorPlacement.__localConditionalVariance(cov, y_star, AHat, epsilon)
heapq.heappush(heap, (-1 * criterion, y_star, j))
A = np.append(A, y_star).astype(int)
print('subdomain ', subdomain, ': ', A, flush=True)
if subdomain != None:
output.put((subdomain, 2*A))
return 2*A
| true | true |
1c4778cd6ee4e3e7a884ff4789b58f8fe5d8053a | 1,178 | py | Python | prime_numbers_test.py | mkiterian/prime-numbers | be8b3b1250ec8351964c2ef93f8d5e6463efcc7b | [
"MIT"
] | null | null | null | prime_numbers_test.py | mkiterian/prime-numbers | be8b3b1250ec8351964c2ef93f8d5e6463efcc7b | [
"MIT"
] | null | null | null | prime_numbers_test.py | mkiterian/prime-numbers | be8b3b1250ec8351964c2ef93f8d5e6463efcc7b | [
"MIT"
] | null | null | null | import unittest
from prime_numbers import generate_prime_numbers
class PrimeNumberTest(unittest.TestCase):
def test_n_is_an_integer(self):
#tests if n is an integer
with self.assertRaises(TypeError, msg='n is not an integer'):
generate_prime_numbers('number')
def test_if_number_is_a_positive_integer(self):
#test if number is a positive integer
self.assertEqual(generate_prime_numbers(-10), 'N should be a positive integer', msg='Number Should be a positive integer')
def test_if_returned_value_is_a_list(self):
#check if number return is a list
self.assertIsInstance(generate_prime_numbers(10), list)
def test_if_number_of_returned_numbers_is_correct(self):
#test if list returned has correct number
actual = len(generate_prime_numbers(11))
expected = 5
self.assertEqual(actual, expected, msg='Number of returned items is not as expected')
def test_generates_correct_prime_numbers(self):
#tests if function returns correct values given n is an integer
actual = generate_prime_numbers(10)
expected = [2,3,5,7]
self.assertEqual(actual, expected, msg='Expected [2,3,5,7] when n is 10')
if __name__ == '__main__':
unittest.main()
| 33.657143 | 124 | 0.77674 | import unittest
from prime_numbers import generate_prime_numbers
class PrimeNumberTest(unittest.TestCase):
def test_n_is_an_integer(self):
with self.assertRaises(TypeError, msg='n is not an integer'):
generate_prime_numbers('number')
def test_if_number_is_a_positive_integer(self):
self.assertEqual(generate_prime_numbers(-10), 'N should be a positive integer', msg='Number Should be a positive integer')
def test_if_returned_value_is_a_list(self):
self.assertIsInstance(generate_prime_numbers(10), list)
def test_if_number_of_returned_numbers_is_correct(self):
actual = len(generate_prime_numbers(11))
expected = 5
self.assertEqual(actual, expected, msg='Number of returned items is not as expected')
def test_generates_correct_prime_numbers(self):
actual = generate_prime_numbers(10)
expected = [2,3,5,7]
self.assertEqual(actual, expected, msg='Expected [2,3,5,7] when n is 10')
if __name__ == '__main__':
unittest.main()
| true | true |
1c4779a4e3f7663805d73bbf5c2232d96cc76f28 | 1,621 | py | Python | axley/cogs/misc.py | 1olipop/Axley | 9ace6706be58c2a8e066a0dbcdcc337b34cc5da7 | [
"Apache-2.0"
] | 18 | 2021-05-08T10:28:34.000Z | 2021-12-30T16:44:19.000Z | axley/cogs/misc.py | vedrecide/Axley | 9ace6706be58c2a8e066a0dbcdcc337b34cc5da7 | [
"Apache-2.0"
] | 1 | 2021-07-05T13:07:20.000Z | 2021-07-05T13:07:20.000Z | axley/cogs/misc.py | 1olipop/Axley | 9ace6706be58c2a8e066a0dbcdcc337b34cc5da7 | [
"Apache-2.0"
] | 6 | 2021-06-01T15:31:10.000Z | 2021-07-21T17:17:36.000Z | import discord
import psutil
import os
from discord.ext import commands
class Misc(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.process = psutil.Process(os.getpid())
@commands.command(name="Ping", description="Ping of the bot")
@commands.guild_only()
async def ping(self, ctx: commands.Context):
await ctx.message.reply(
"**Pong!** `{}ms`".format(round(self.bot.latency * 1000)),
mention_author=False,
)
@commands.command(name="Source", description="Source code of Axley <3")
@commands.guild_only()
async def source(self, ctx: commands.Context):
embed = discord.Embed(
color=0xD9E6D1, description=f"[Click Me!]({self.bot.github_repo})"
)
embed.set_footer(text="Kindly go through the LICENSE file in the repository before blindy checking and copying the codes")
await ctx.message.reply(embed=embed, mention_author=False)
@commands.command(
name="Stats",
aliases=["Botstats", "Botinfo"],
description="You can check bot statistics using this command",
)
@commands.guild_only()
async def stats(self, ctx: commands.Context):
ram_usage = self.process.memory_full_info().rss / 1024 ** 2
embed = discord.Embed(
color=0xD9E6D1,
description="> **RAM:** {:.2f} MB\n> **Commands:** {}\n".format(
ram_usage, len([a.name for a in self.bot.commands])
),
)
await ctx.message.reply(embed=embed, mention_author=False)
def setup(bot):
bot.add_cog(Misc(bot))
| 31.173077 | 130 | 0.623689 | import discord
import psutil
import os
from discord.ext import commands
class Misc(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.process = psutil.Process(os.getpid())
@commands.command(name="Ping", description="Ping of the bot")
@commands.guild_only()
async def ping(self, ctx: commands.Context):
await ctx.message.reply(
"**Pong!** `{}ms`".format(round(self.bot.latency * 1000)),
mention_author=False,
)
@commands.command(name="Source", description="Source code of Axley <3")
@commands.guild_only()
async def source(self, ctx: commands.Context):
embed = discord.Embed(
color=0xD9E6D1, description=f"[Click Me!]({self.bot.github_repo})"
)
embed.set_footer(text="Kindly go through the LICENSE file in the repository before blindy checking and copying the codes")
await ctx.message.reply(embed=embed, mention_author=False)
@commands.command(
name="Stats",
aliases=["Botstats", "Botinfo"],
description="You can check bot statistics using this command",
)
@commands.guild_only()
async def stats(self, ctx: commands.Context):
ram_usage = self.process.memory_full_info().rss / 1024 ** 2
embed = discord.Embed(
color=0xD9E6D1,
description="> **RAM:** {:.2f} MB\n> **Commands:** {}\n".format(
ram_usage, len([a.name for a in self.bot.commands])
),
)
await ctx.message.reply(embed=embed, mention_author=False)
def setup(bot):
bot.add_cog(Misc(bot))
| true | true |
1c477b36fa5f2df5e41a132388aaaf828bdc48a6 | 5,661 | py | Python | resource/o2o1/examples.py | chenrushan/chenrushan.github.io | e97745d006b002e1f91c25a74859d313e5c197ed | [
"MIT"
] | null | null | null | resource/o2o1/examples.py | chenrushan/chenrushan.github.io | e97745d006b002e1f91c25a74859d313e5c197ed | [
"MIT"
] | null | null | null | resource/o2o1/examples.py | chenrushan/chenrushan.github.io | e97745d006b002e1f91c25a74859d313e5c197ed | [
"MIT"
] | null | null | null | #!/usr/bin/env python2
# Author: juscodit@gmail.com
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
import math
import sys
from math import pow
from math import exp
delta = 0.025
### ==============================
### Ellipse function (f(x) = x1^2 + 50 x2^2)
### ==============================
def func(p):
return p[0] ** 2 + 50 * p[1] ** 2
def gr(p):
x, y = p
dx = 2 * x
dy = 100 * y
return np.array([dx, dy])
def he(p):
return np.matrix([[2, 0], [0, 100]])
# descent direction
def nddir(p):
hessian = he(p)
heinv = np.linalg.inv(hessian)
return np.squeeze(np.asarray(np.negative(np.dot(heinv, gr(p)))))
# step length at point @p with respect to descent direction @dd
def alpha(p, dd):
a, r, c1 = 1, 0.3, 1e-4
g = gr(p)
p1 = p + a * dd
while func(p1) > func(p) + c1 * a * np.dot(g, dd):
a *= r
p1 = p + a * dd
return a
def steepest_descent(p0):
result = []
result.append(p0)
pk = p0
i = 0
while np.linalg.norm(gr(pk)) > 0.001:
print "step %d [%f, %f]" % (i, pk[0], pk[1])
i += 1
dd = -gr(pk)
a = alpha(pk, dd)
pk = pk + a * dd
result.append(pk)
print "step %d [%f, %f]" % (i, pk[0], pk[1])
return result
def classical_newton(p0):
result = []
result.append(p0)
pk = p0
i = 0
while np.linalg.norm(gr(pk)) > 0.001:
print "step %d [%f, %f]" % (i, pk[0], pk[1])
i += 1
dd = nddir(pk)
# a = alpha(pk, dd)
pk = pk + dd
result.append(pk)
print "step %d [%f, %f]" % (i, pk[0], pk[1])
return result
# initial point
p0 = (-2.5, 0.05)
# get result of classical newton
nres = classical_newton(p0)
# get result of steepest descent
sres = steepest_descent(p0)
x = np.arange(-3, 3, delta)
y = np.arange(-0.7, 0.7, delta)
X, Y = np.meshgrid(x, y)
Z = X ** 2 + 50 * Y ** 2
# plot figure for classicl newton
fig = plt.figure()
plt.contour(X, Y, Z, [13, 9, 6, 4, 2, 1, 0.1])
plt.annotate("", xytext=nres[0], xy=nres[1], arrowprops=dict(arrowstyle='->', linewidth=2, color="red"))
for r in nres:
plt.plot(*r, marker='o', color='g')
plt.text(p0[0] - 0.3, p0[1] + 0.02, "(%.1f, %.1f)" % (p0[0], p0[1]), color="purple", fontsize=10)
plt.text(-1.5, 0.1, "1 step", fontsize=20)
plt.savefig("newton.png")
# plot figure for steepest descent
fig = plt.figure()
plt.contour(X, Y, Z, [13, 9, 6, 4, 2, 1, 0.1])
for i in xrange(len(sres) - 1):
plt.annotate("", xytext=sres[i], xy=sres[i+1], arrowprops=dict(arrowstyle="-"))
for r in sres:
plt.plot(*r, marker='o', color='g')
plt.text(p0[0] - 0.3, p0[1] + 0.02, "(%.1f, %.1f)" % (p0[0], p0[1]), color="purple", fontsize=10)
plt.text(-1.5, 0.15, "%d step" % len(sres), fontsize=20)
plt.savefig("steepest.png")
### ==============================
### Function x^2 and 10x^2
### ==============================
def steepest_gradient_x2(x0):
def func(x):
return x ** 2
def gr(x):
return 2 * x
def alpha(x, d):
a, r, c1 = 1, 0.3, 1e-4
while func(x + a * d) > func(x) + c1 * a * gr(x) * d:
a *= r
return a
def ddir(x):
return -gr(x)
result = []
xk = x0
i = 1
while np.linalg.norm(gr(xk)) > 0.001:
print "step %d [%f] loss [%f]" % (i, xk, func(xk))
i += 1
result.append((xk, func(xk)))
dd = ddir(xk)
a = alpha(xk, dd)
xk = xk + a * dd
result.append((xk, func(xk)))
return result
def steepest_gradient_10x2(x0):
def func(x):
return 10 * x ** 2
def gr(x):
return 20 * x
def alpha(x, d):
a, r, c1 = 1, 0.3, 1e-4
while func(x + a * d) > func(x) + c1 * a * gr(x) * d:
a *= r
return a
def ddir(x):
return -gr(x)
result = []
xk = x0
i = 1
while np.linalg.norm(gr(xk)) > 0.001:
print "step %d [%f] loss [%f]" % (i, xk, func(xk))
i += 1
result.append((xk, func(xk)))
dd = ddir(xk)
a = alpha(xk, dd)
xk = xk + a * dd
result.append((xk, func(xk)))
return result
x = np.arange(-1.6, 1.6, delta)
y1 = x ** 2
y2 = 10 * x ** 2
# plot for steepest descent on x^2
fig = plt.figure()
plt.plot(x, y1, linewidth=2)
plt.ylim(0, 2.8)
sg_x2_res = steepest_gradient_x2(-1.5)
for i in xrange(len(sg_x2_res) - 1):
plt.annotate("", xytext=sg_x2_res[i], xy=sg_x2_res[i+1], arrowprops=dict(arrowstyle="->"))
for r in sg_x2_res:
plt.plot(*r, marker='o', color='g')
plt.text(-1.0, 2.5, "$f(x) = x^2$", fontsize=25)
plt.text(-1.0, 2.15, "%d step" % len(sg_x2_res), fontsize=20)
plt.savefig('steepest_x2.png')
# plot for steepest descent on 10x^2
fig = plt.figure()
plt.plot(x, y2, linewidth=2)
plt.ylim(0, 28)
sg_10x2_res = steepest_gradient_10x2(-1.5)
for i in xrange(len(sg_10x2_res) - 1):
plt.annotate("", xytext=sg_10x2_res[i], xy=sg_10x2_res[i+1], arrowprops=dict(arrowstyle="->"))
for r in sg_10x2_res:
plt.plot(*r, marker='o', color='g')
plt.text(-1.0, 25, "$f(x) = 10 x^2$", fontsize=25)
plt.text(-1.0, 21.5, "%d step" % len(sg_10x2_res), fontsize=20)
plt.savefig('steepest_10x2.png')
# plot for curvature axis
x = np.arange(-3, 3, delta)
y = np.arange(-3, 3, delta)
X, Y = np.meshgrid(x, y)
Z = 4 * X ** 2 + 2 * Y ** 2 - 4 * X * Y
w, v = np.linalg.eig(np.array([[8, -4], [-4, 4]]))
print w, v[0], v[1]
plt.figure(figsize=(4, 4))
plt.contour(X, Y, Z, [13, 9, 6, 4, 2, 1, 0.1])
plt.annotate("", xytext=(0, 0), xy=(v[0, 1], v[1, 1]), arrowprops=dict(arrowstyle="->", color="red", linewidth=1))
plt.annotate("", xytext=(0, 0), xy=(2 * v[0, 0], 2 * v[1, 0]), arrowprops=dict(arrowstyle="->", color="red", linewidth=1))
plt.plot(0, 0, marker='o', color='r')
plt.text(-2.5, 2.2, "$f(x) = 4x_1^2 + 2x_2^2 - 4x_1 x_2$", fontsize=13)
plt.text(0.7, 0.9, "curvature axis", color="red", fontsize=10)
plt.savefig('axis.png')
plt.show()
| 23.106122 | 122 | 0.558205 |
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
import math
import sys
from math import pow
from math import exp
delta = 0.025
)
heinv = np.linalg.inv(hessian)
return np.squeeze(np.asarray(np.negative(np.dot(heinv, gr(p)))))
def alpha(p, dd):
a, r, c1 = 1, 0.3, 1e-4
g = gr(p)
p1 = p + a * dd
while func(p1) > func(p) + c1 * a * np.dot(g, dd):
a *= r
p1 = p + a * dd
return a
def steepest_descent(p0):
result = []
result.append(p0)
pk = p0
i = 0
while np.linalg.norm(gr(pk)) > 0.001:
print "step %d [%f, %f]" % (i, pk[0], pk[1])
i += 1
dd = -gr(pk)
a = alpha(pk, dd)
pk = pk + a * dd
result.append(pk)
print "step %d [%f, %f]" % (i, pk[0], pk[1])
return result
def classical_newton(p0):
result = []
result.append(p0)
pk = p0
i = 0
while np.linalg.norm(gr(pk)) > 0.001:
print "step %d [%f, %f]" % (i, pk[0], pk[1])
i += 1
dd = nddir(pk)
pk = pk + dd
result.append(pk)
print "step %d [%f, %f]" % (i, pk[0], pk[1])
return result
p0 = (-2.5, 0.05)
nres = classical_newton(p0)
sres = steepest_descent(p0)
x = np.arange(-3, 3, delta)
y = np.arange(-0.7, 0.7, delta)
X, Y = np.meshgrid(x, y)
Z = X ** 2 + 50 * Y ** 2
fig = plt.figure()
plt.contour(X, Y, Z, [13, 9, 6, 4, 2, 1, 0.1])
plt.annotate("", xytext=nres[0], xy=nres[1], arrowprops=dict(arrowstyle='->', linewidth=2, color="red"))
for r in nres:
plt.plot(*r, marker='o', color='g')
plt.text(p0[0] - 0.3, p0[1] + 0.02, "(%.1f, %.1f)" % (p0[0], p0[1]), color="purple", fontsize=10)
plt.text(-1.5, 0.1, "1 step", fontsize=20)
plt.savefig("newton.png")
fig = plt.figure()
plt.contour(X, Y, Z, [13, 9, 6, 4, 2, 1, 0.1])
for i in xrange(len(sres) - 1):
plt.annotate("", xytext=sres[i], xy=sres[i+1], arrowprops=dict(arrowstyle="-"))
for r in sres:
plt.plot(*r, marker='o', color='g')
plt.text(p0[0] - 0.3, p0[1] + 0.02, "(%.1f, %.1f)" % (p0[0], p0[1]), color="purple", fontsize=10)
plt.text(-1.5, 0.15, "%d step" % len(sres), fontsize=20)
plt.savefig("steepest.png")
x) + c1 * a * gr(x) * d:
a *= r
return a
def ddir(x):
return -gr(x)
result = []
xk = x0
i = 1
while np.linalg.norm(gr(xk)) > 0.001:
print "step %d [%f] loss [%f]" % (i, xk, func(xk))
i += 1
result.append((xk, func(xk)))
dd = ddir(xk)
a = alpha(xk, dd)
xk = xk + a * dd
result.append((xk, func(xk)))
return result
def steepest_gradient_10x2(x0):
def func(x):
return 10 * x ** 2
def gr(x):
return 20 * x
def alpha(x, d):
a, r, c1 = 1, 0.3, 1e-4
while func(x + a * d) > func(x) + c1 * a * gr(x) * d:
a *= r
return a
def ddir(x):
return -gr(x)
result = []
xk = x0
i = 1
while np.linalg.norm(gr(xk)) > 0.001:
print "step %d [%f] loss [%f]" % (i, xk, func(xk))
i += 1
result.append((xk, func(xk)))
dd = ddir(xk)
a = alpha(xk, dd)
xk = xk + a * dd
result.append((xk, func(xk)))
return result
x = np.arange(-1.6, 1.6, delta)
y1 = x ** 2
y2 = 10 * x ** 2
fig = plt.figure()
plt.plot(x, y1, linewidth=2)
plt.ylim(0, 2.8)
sg_x2_res = steepest_gradient_x2(-1.5)
for i in xrange(len(sg_x2_res) - 1):
plt.annotate("", xytext=sg_x2_res[i], xy=sg_x2_res[i+1], arrowprops=dict(arrowstyle="->"))
for r in sg_x2_res:
plt.plot(*r, marker='o', color='g')
plt.text(-1.0, 2.5, "$f(x) = x^2$", fontsize=25)
plt.text(-1.0, 2.15, "%d step" % len(sg_x2_res), fontsize=20)
plt.savefig('steepest_x2.png')
fig = plt.figure()
plt.plot(x, y2, linewidth=2)
plt.ylim(0, 28)
sg_10x2_res = steepest_gradient_10x2(-1.5)
for i in xrange(len(sg_10x2_res) - 1):
plt.annotate("", xytext=sg_10x2_res[i], xy=sg_10x2_res[i+1], arrowprops=dict(arrowstyle="->"))
for r in sg_10x2_res:
plt.plot(*r, marker='o', color='g')
plt.text(-1.0, 25, "$f(x) = 10 x^2$", fontsize=25)
plt.text(-1.0, 21.5, "%d step" % len(sg_10x2_res), fontsize=20)
plt.savefig('steepest_10x2.png')
x = np.arange(-3, 3, delta)
y = np.arange(-3, 3, delta)
X, Y = np.meshgrid(x, y)
Z = 4 * X ** 2 + 2 * Y ** 2 - 4 * X * Y
w, v = np.linalg.eig(np.array([[8, -4], [-4, 4]]))
print w, v[0], v[1]
plt.figure(figsize=(4, 4))
plt.contour(X, Y, Z, [13, 9, 6, 4, 2, 1, 0.1])
plt.annotate("", xytext=(0, 0), xy=(v[0, 1], v[1, 1]), arrowprops=dict(arrowstyle="->", color="red", linewidth=1))
plt.annotate("", xytext=(0, 0), xy=(2 * v[0, 0], 2 * v[1, 0]), arrowprops=dict(arrowstyle="->", color="red", linewidth=1))
plt.plot(0, 0, marker='o', color='r')
plt.text(-2.5, 2.2, "$f(x) = 4x_1^2 + 2x_2^2 - 4x_1 x_2$", fontsize=13)
plt.text(0.7, 0.9, "curvature axis", color="red", fontsize=10)
plt.savefig('axis.png')
plt.show()
| false | true |
1c477bb7d2693680a90d4f6220d45872d11fc4b0 | 1,663 | py | Python | vm.py | Ccode-lang/CHex | f8138da241a8b96fae5691de7a9d789a9dbcbeb2 | [
"MIT"
] | 1 | 2022-01-31T18:36:36.000Z | 2022-01-31T18:36:36.000Z | vm.py | Ccode-lang/CHex | f8138da241a8b96fae5691de7a9d789a9dbcbeb2 | [
"MIT"
] | null | null | null | vm.py | Ccode-lang/CHex | f8138da241a8b96fae5691de7a9d789a9dbcbeb2 | [
"MIT"
] | null | null | null | import os
import sys
import codecs
try:
file = open(sys.argv[1], "rb")
except:
print("File does not exist or is not given.")
sys.exit()
bytecode = file.read()
file.close()
bytecode = list(bytecode)
hexcode = []
for dec in bytecode:
hexcode += [hex(dec)]
# print(hexcode)
# magic number check
if hexcode[0] == "0x68" and hexcode[1] == "0x69":
pass
else:
print("Not a CHex bianary file.")
# set offset to 2 because of magic number
offset = 2
# init mem
memory = {}
while True:
try:
hex = hexcode[offset]
except:
sys.exit()
# blank hex
if hex == "0x0":
offset += 1
# print ascii from memory
elif hex == "0x1":
# print(memory[int(hexcode[offset + 1][2:], 16)][2:])
hexval = memory[int(hexcode[offset + 1][2:], 16)][2:]
if not len(hexval) == 2:
hexval = "0" + hexval
print(str(codecs.decode(hexval, "hex"), "utf-8"), end="")
offset += 2
# same as asm jmp
elif hex == "0x2":
offset = int(hexcode[offset + 1], 16)
# store value in mem
elif hex == "0x3":
memory[int(hexcode[offset + 1], 16)] = hexcode[offset + 2]
offset += 3
# jump to hex stored in memory
elif hex == "0x4":
offset = int(memory[int(hexcode[offset + 1], 16)], 16)
# check if values in memory are equal and jump if so
elif hex == "0x5":
if int(memory[int(hexcode[offset + 1], 16)], 16) == int(memory[int(hexcode[offset + 2], 16)], 16):
offset = int(hexcode[offset + 3], 16)
else:
offset += 4
else:
print("Unknown hex at offset: " + str(offset))
sys.exit()
| 27.262295 | 106 | 0.556825 | import os
import sys
import codecs
try:
file = open(sys.argv[1], "rb")
except:
print("File does not exist or is not given.")
sys.exit()
bytecode = file.read()
file.close()
bytecode = list(bytecode)
hexcode = []
for dec in bytecode:
hexcode += [hex(dec)]
if hexcode[0] == "0x68" and hexcode[1] == "0x69":
pass
else:
print("Not a CHex bianary file.")
offset = 2
memory = {}
while True:
try:
hex = hexcode[offset]
except:
sys.exit()
if hex == "0x0":
offset += 1
elif hex == "0x1":
hexval = memory[int(hexcode[offset + 1][2:], 16)][2:]
if not len(hexval) == 2:
hexval = "0" + hexval
print(str(codecs.decode(hexval, "hex"), "utf-8"), end="")
offset += 2
elif hex == "0x2":
offset = int(hexcode[offset + 1], 16)
elif hex == "0x3":
memory[int(hexcode[offset + 1], 16)] = hexcode[offset + 2]
offset += 3
elif hex == "0x4":
offset = int(memory[int(hexcode[offset + 1], 16)], 16)
elif hex == "0x5":
if int(memory[int(hexcode[offset + 1], 16)], 16) == int(memory[int(hexcode[offset + 2], 16)], 16):
offset = int(hexcode[offset + 3], 16)
else:
offset += 4
else:
print("Unknown hex at offset: " + str(offset))
sys.exit()
| true | true |
1c477bc4296ae17f76dbbd9dad1779671e3a34ae | 9,426 | py | Python | _backend_api/migrations/0022_initial.py | Amechi101/indieapp | 606c1346f65c343eb2cc8f7fba9d555b8c30a7fa | [
"MIT"
] | null | null | null | _backend_api/migrations/0022_initial.py | Amechi101/indieapp | 606c1346f65c343eb2cc8f7fba9d555b8c30a7fa | [
"MIT"
] | null | null | null | _backend_api/migrations/0022_initial.py | Amechi101/indieapp | 606c1346f65c343eb2cc8f7fba9d555b8c30a7fa | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Brand'
db.create_table(u'_backend_api_brand', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('brand_name', self.gf('django.db.models.fields.CharField')(max_length=255, unique=True, null=True, blank=True)),
('brand_founded', self.gf('django.db.models.fields.IntegerField')(max_length=4, null=True)),
('brand_origin_city', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('brand_origin_state', self.gf('django.db.models.fields.CharField')(max_length=2, null=True, blank=True)),
('brand_about_description', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('brand_collection_description', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('slug', self.gf('django.db.models.fields.SlugField')(max_length=255, unique=True, null=True, blank=True)),
('brand_logo', self.gf('cloudinary.models.CloudinaryField')(max_length=100, null=True, blank=True)),
('brand_feature_image', self.gf('cloudinary.models.CloudinaryField')(max_length=100, null=True, blank=True)),
('brand_about_image', self.gf('cloudinary.models.CloudinaryField')(max_length=100, null=True, blank=True)),
('brand_collection_image', self.gf('cloudinary.models.CloudinaryField')(max_length=100, null=True, blank=True)),
('brand_connect_image', self.gf('cloudinary.models.CloudinaryField')(max_length=100, null=True, blank=True)),
('brand_website_url', self.gf('django.db.models.fields.URLField')(max_length=200, null=True, blank=True)),
('brand_email', self.gf('django.db.models.fields.EmailField')(max_length=75, null=True, blank=True)),
('brand_state', self.gf('django.db.models.fields.BooleanField')(default=False)),
('brand_location_state', self.gf('django.db.models.fields.BooleanField')(default=False)),
('brand_email_state', self.gf('django.db.models.fields.BooleanField')(default=False)),
('brand_website_state', self.gf('django.db.models.fields.BooleanField')(default=False)),
('menswear', self.gf('django.db.models.fields.BooleanField')(default=False)),
('womenswear', self.gf('django.db.models.fields.BooleanField')(default=False)),
('date_added', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, blank=True)),
('last_modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, null=True, blank=True)),
))
db.send_create_signal(u'_backend_api', ['Brand'])
# Adding model 'Product'
db.create_table(u'_backend_api_product', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('product_name', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('product_price', self.gf('django.db.models.fields.DecimalField')(default='0.0', max_digits=30, decimal_places=2)),
('product_image', self.gf('cloudinary.models.CloudinaryField')(max_length=255, null=True, blank=True)),
('date_added', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, blank=True)),
('last_modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, null=True, blank=True)),
('brand', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['_backend_api.Brand'], null=True)),
))
db.send_create_signal(u'_backend_api', ['Product'])
# Adding model 'Location'
db.create_table(u'_backend_api_location', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('brand_address', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('brand_city', self.gf('django.db.models.fields.CharField')(max_length=50, null=True, blank=True)),
('brand_state', self.gf('django.db.models.fields.CharField')(max_length=2, null=True, blank=True)),
('brand', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['_backend_api.Brand'], null=True)),
))
db.send_create_signal(u'_backend_api', ['Location'])
def backwards(self, orm):
# Deleting model 'Brand'
db.delete_table(u'_backend_api_brand')
# Deleting model 'Product'
db.delete_table(u'_backend_api_product')
# Deleting model 'Location'
db.delete_table(u'_backend_api_location')
models = {
u'_backend_api.brand': {
'Meta': {'object_name': 'Brand'},
'brand_about_description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'brand_about_image': ('cloudinary.models.CloudinaryField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'brand_collection_description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'brand_collection_image': ('cloudinary.models.CloudinaryField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'brand_connect_image': ('cloudinary.models.CloudinaryField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'brand_email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'brand_email_state': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'brand_feature_image': ('cloudinary.models.CloudinaryField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'brand_founded': ('django.db.models.fields.IntegerField', [], {'max_length': '4', 'null': 'True'}),
'brand_location_state': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'brand_logo': ('cloudinary.models.CloudinaryField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'brand_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'brand_origin_city': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'brand_origin_state': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'brand_state': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'brand_website_state': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'brand_website_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'menswear': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'womenswear': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'_backend_api.location': {
'Meta': {'object_name': 'Location'},
'brand': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['_backend_api.Brand']", 'null': 'True'}),
'brand_address': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'brand_city': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'brand_state': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'_backend_api.product': {
'Meta': {'object_name': 'Product'},
'brand': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['_backend_api.Brand']", 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'product_image': ('cloudinary.models.CloudinaryField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'product_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'product_price': ('django.db.models.fields.DecimalField', [], {'default': "'0.0'", 'max_digits': '30', 'decimal_places': '2'})
}
}
complete_apps = ['_backend_api'] | 78.55 | 142 | 0.619032 |
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
db.create_table(u'_backend_api_brand', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('brand_name', self.gf('django.db.models.fields.CharField')(max_length=255, unique=True, null=True, blank=True)),
('brand_founded', self.gf('django.db.models.fields.IntegerField')(max_length=4, null=True)),
('brand_origin_city', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('brand_origin_state', self.gf('django.db.models.fields.CharField')(max_length=2, null=True, blank=True)),
('brand_about_description', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('brand_collection_description', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('slug', self.gf('django.db.models.fields.SlugField')(max_length=255, unique=True, null=True, blank=True)),
('brand_logo', self.gf('cloudinary.models.CloudinaryField')(max_length=100, null=True, blank=True)),
('brand_feature_image', self.gf('cloudinary.models.CloudinaryField')(max_length=100, null=True, blank=True)),
('brand_about_image', self.gf('cloudinary.models.CloudinaryField')(max_length=100, null=True, blank=True)),
('brand_collection_image', self.gf('cloudinary.models.CloudinaryField')(max_length=100, null=True, blank=True)),
('brand_connect_image', self.gf('cloudinary.models.CloudinaryField')(max_length=100, null=True, blank=True)),
('brand_website_url', self.gf('django.db.models.fields.URLField')(max_length=200, null=True, blank=True)),
('brand_email', self.gf('django.db.models.fields.EmailField')(max_length=75, null=True, blank=True)),
('brand_state', self.gf('django.db.models.fields.BooleanField')(default=False)),
('brand_location_state', self.gf('django.db.models.fields.BooleanField')(default=False)),
('brand_email_state', self.gf('django.db.models.fields.BooleanField')(default=False)),
('brand_website_state', self.gf('django.db.models.fields.BooleanField')(default=False)),
('menswear', self.gf('django.db.models.fields.BooleanField')(default=False)),
('womenswear', self.gf('django.db.models.fields.BooleanField')(default=False)),
('date_added', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, blank=True)),
('last_modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, null=True, blank=True)),
))
db.send_create_signal(u'_backend_api', ['Brand'])
db.create_table(u'_backend_api_product', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('product_name', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('product_price', self.gf('django.db.models.fields.DecimalField')(default='0.0', max_digits=30, decimal_places=2)),
('product_image', self.gf('cloudinary.models.CloudinaryField')(max_length=255, null=True, blank=True)),
('date_added', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, blank=True)),
('last_modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, null=True, blank=True)),
('brand', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['_backend_api.Brand'], null=True)),
))
db.send_create_signal(u'_backend_api', ['Product'])
db.create_table(u'_backend_api_location', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('brand_address', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('brand_city', self.gf('django.db.models.fields.CharField')(max_length=50, null=True, blank=True)),
('brand_state', self.gf('django.db.models.fields.CharField')(max_length=2, null=True, blank=True)),
('brand', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['_backend_api.Brand'], null=True)),
))
db.send_create_signal(u'_backend_api', ['Location'])
def backwards(self, orm):
db.delete_table(u'_backend_api_brand')
db.delete_table(u'_backend_api_product')
db.delete_table(u'_backend_api_location')
models = {
u'_backend_api.brand': {
'Meta': {'object_name': 'Brand'},
'brand_about_description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'brand_about_image': ('cloudinary.models.CloudinaryField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'brand_collection_description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'brand_collection_image': ('cloudinary.models.CloudinaryField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'brand_connect_image': ('cloudinary.models.CloudinaryField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'brand_email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'brand_email_state': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'brand_feature_image': ('cloudinary.models.CloudinaryField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'brand_founded': ('django.db.models.fields.IntegerField', [], {'max_length': '4', 'null': 'True'}),
'brand_location_state': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'brand_logo': ('cloudinary.models.CloudinaryField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'brand_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'brand_origin_city': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'brand_origin_state': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'brand_state': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'brand_website_state': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'brand_website_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'menswear': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'womenswear': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'_backend_api.location': {
'Meta': {'object_name': 'Location'},
'brand': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['_backend_api.Brand']", 'null': 'True'}),
'brand_address': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'brand_city': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'brand_state': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'_backend_api.product': {
'Meta': {'object_name': 'Product'},
'brand': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['_backend_api.Brand']", 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'product_image': ('cloudinary.models.CloudinaryField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'product_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'product_price': ('django.db.models.fields.DecimalField', [], {'default': "'0.0'", 'max_digits': '30', 'decimal_places': '2'})
}
}
complete_apps = ['_backend_api'] | true | true |
1c477c54727b29435a21a6019d3960076fc447e1 | 4,706 | py | Python | nibabel/minc2.py | tobon/nibabel | ff2b5457207bb5fd6097b08f7f11123dc660fda7 | [
"BSD-3-Clause"
] | null | null | null | nibabel/minc2.py | tobon/nibabel | ff2b5457207bb5fd6097b08f7f11123dc660fda7 | [
"BSD-3-Clause"
] | null | null | null | nibabel/minc2.py | tobon/nibabel | ff2b5457207bb5fd6097b08f7f11123dc660fda7 | [
"BSD-3-Clause"
] | null | null | null | # emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the NiBabel package for the
# copyright and license terms.
#
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
""" Preliminary MINC2 support
Use with care; I haven't tested this against a wide range of MINC files.
If you have a file that isn't read correctly, please send an example.
Test reading with something like::
import nibabel as nib
img = nib.load('my_funny.mnc')
data = img.get_data()
print(data.mean())
print(data.max())
print(data.min())
and compare against command line output of::
mincstats my_funny.mnc
"""
import numpy as np
from .optpkg import optional_package
h5py, have_h5py, setup_module = optional_package('h5py')
from .minc1 import Minc1File, Minc1Image, MincError
class Hdf5Bunch(object):
""" Make object for accessing attributes of variable
"""
def __init__(self, var):
for name, value in var.attrs.items():
setattr(self, name, value)
class Minc2File(Minc1File):
''' Class to wrap MINC2 format file
Although it has some of the same methods as a ``Header``, we use
this only when reading a MINC2 file, to pull out useful header
information, and for the method of reading the data out
'''
def __init__(self, mincfile):
self._mincfile = mincfile
minc_part = mincfile['minc-2.0']
# The whole image is the first of the entries in 'image'
image = minc_part['image']['0']
self._image = image['image']
self._dim_names = self._get_dimensions(self._image)
dimensions = minc_part['dimensions']
self._dims = [Hdf5Bunch(dimensions[s]) for s in self._dim_names]
# We don't currently support irregular spacing
# http://en.wikibooks.org/wiki/MINC/Reference/MINC2.0_File_Format_Reference#Dimension_variable_attributes
for dim in self._dims:
if dim.spacing != b'regular__':
raise ValueError('Irregular spacing not supported')
self._spatial_dims = [name for name in self._dim_names
if name.endswith('space')]
self._image_max = image['image-max']
self._image_min = image['image-min']
def _get_dimensions(self, var):
# Dimensions for a particular variable
# Differs for MINC1 and MINC2 - see:
# http://en.wikibooks.org/wiki/MINC/Reference/MINC2.0_File_Format_Reference#Associating_HDF5_dataspaces_with_MINC_dimensions
return var.attrs['dimorder'].split(',')
def get_data_dtype(self):
return self._image.dtype
def get_data_shape(self):
return self._image.shape
def _get_valid_range(self):
''' Return valid range for image data
The valid range can come from the image 'valid_range' or
failing that, from the data type range
'''
ddt = self.get_data_dtype()
info = np.iinfo(ddt.type)
try:
valid_range = self._image.attrs['valid_range']
except AttributeError:
valid_range = [info.min, info.max]
else:
if valid_range[0] < info.min or valid_range[1] > info.max:
raise ValueError('Valid range outside input '
'data type range')
return np.asarray(valid_range, dtype=np.float)
def get_scaled_data(self):
data = np.asarray(self._image)
return self._normalize(data)
class Minc2Image(Minc1Image):
''' Class for MINC2 images
The MINC2 image class uses the default header type, rather than a
specific MINC header type - and reads the relevant information from
the MINC file on load.
'''
# MINC2 does not do compressed whole files
_compressed_exts = ()
@classmethod
def from_file_map(klass, file_map):
holder = file_map['image']
if holder.filename is None:
raise MincError('MINC2 needs filename for load')
minc_file = Minc2File(h5py.File(holder.filename, 'r'))
affine = minc_file.get_affine()
if affine.shape != (4, 4):
raise MincError('Image does not have 3 spatial dimensions')
data_dtype = minc_file.get_data_dtype()
shape = minc_file.get_data_shape()
zooms = minc_file.get_zooms()
header = klass.header_class(data_dtype, shape, zooms)
data = klass.ImageArrayProxy(minc_file)
return klass(data, affine, header, extra=None, file_map=file_map)
load = Minc2Image.load
| 35.383459 | 132 | 0.632172 | true | true | |
1c477c95b5afb69f25ef37ab384ae3c2d5026cb5 | 4,980 | py | Python | tests/integrationtest/api/test_guards.py | RasmusGodske/eo-platform-utils | 4d7c5bdc102d1eb7a5edff096f2783dbdbaa283d | [
"Apache-2.0"
] | null | null | null | tests/integrationtest/api/test_guards.py | RasmusGodske/eo-platform-utils | 4d7c5bdc102d1eb7a5edff096f2783dbdbaa283d | [
"Apache-2.0"
] | null | null | null | tests/integrationtest/api/test_guards.py | RasmusGodske/eo-platform-utils | 4d7c5bdc102d1eb7a5edff096f2783dbdbaa283d | [
"Apache-2.0"
] | null | null | null | from typing import List
from uuid import uuid4
import pytest
from flask.testing import FlaskClient
from origin.api import (
Application,
EndpointGuard,
TokenGuard,
ScopedGuard,
)
from .endpoints import EmptyEndpoint
class TestGuards:
"""
TODO
"""
@pytest.mark.parametrize('guard', [
TokenGuard(),
ScopedGuard('scope1'),
])
def test__provide_no_token__should_return_status_401(
self,
guard: EndpointGuard,
app: Application,
client: FlaskClient,
):
"""
TODO
"""
# -- Arrange ---------------------------------------------------------
app.add_endpoint(
method='POST',
path='/something',
endpoint=EmptyEndpoint(),
guards=[guard],
)
# -- Act -------------------------------------------------------------
r = client.post('/something')
# -- Assert ----------------------------------------------------------
assert r.status_code == 401
@pytest.mark.parametrize('guard', [
TokenGuard(),
ScopedGuard('scope1'),
])
def test__provide_invalid_token__should_return_status_401(
self,
guard: EndpointGuard,
app: Application,
client: FlaskClient,
):
"""
TODO
"""
# -- Arrange ---------------------------------------------------------
app.add_endpoint(
method='POST',
path='/something',
endpoint=EmptyEndpoint(),
guards=[guard],
)
# -- Act -------------------------------------------------------------
r = client.post(
path='/something',
headers={'Authorization': 'Bearer: NOT-A-VALID-TOKEN'},
)
# -- Assert ----------------------------------------------------------
assert r.status_code == 401
@pytest.mark.parametrize('guard', [
TokenGuard(),
ScopedGuard('scope1'),
])
def test__provide_valid_token__should_return_status_200(
self,
guard: EndpointGuard,
app: Application,
client: FlaskClient,
valid_token_encoded: str,
):
"""
TODO
"""
# -- Arrange ---------------------------------------------------------
app.add_endpoint(
method='POST',
path='/something',
endpoint=EmptyEndpoint(),
guards=[guard],
)
# -- Act -------------------------------------------------------------
r = client.post(
path='/something',
headers={'Authorization': f'Bearer: {valid_token_encoded}'},
)
# -- Assert ----------------------------------------------------------
assert r.status_code == 200
def test__token_missing_required_scope__should_return_status_401(
self,
app: Application,
client: FlaskClient,
valid_token_encoded: str,
):
"""
TODO
"""
# -- Arrange ---------------------------------------------------------
required_scope = str(uuid4()) # Something random
app.add_endpoint(
method='POST',
path='/something',
endpoint=EmptyEndpoint(),
guards=[ScopedGuard(required_scope)],
)
# -- Act -------------------------------------------------------------
r = client.post(
path='/something',
headers={'Authorization': f'Bearer: {valid_token_encoded}'},
)
# -- Assert ----------------------------------------------------------
assert r.status_code == 401
@pytest.mark.parametrize('guards', [
[ScopedGuard('scope1')],
[ScopedGuard('scope2')],
[ScopedGuard('scope1', 'scope2')],
[TokenGuard(), ScopedGuard('scope1')],
[TokenGuard(), ScopedGuard('scope1', 'scope2')],
[TokenGuard(), ScopedGuard('scope1'), ScopedGuard('scope2')],
])
def test__token_has_required_scope__should_return_status_200(
self,
guards: List[EndpointGuard],
app: Application,
client: FlaskClient,
valid_token_encoded: str,
):
"""
TODO
"""
# -- Arrange ---------------------------------------------------------
app.add_endpoint(
method='POST',
path='/something',
endpoint=EmptyEndpoint(),
guards=guards,
)
# -- Act -------------------------------------------------------------
r = client.post(
path='/something',
headers={'Authorization': f'Bearer: {valid_token_encoded}'},
)
# -- Assert ----------------------------------------------------------
assert r.status_code == 200
| 25.9375 | 78 | 0.41245 | from typing import List
from uuid import uuid4
import pytest
from flask.testing import FlaskClient
from origin.api import (
Application,
EndpointGuard,
TokenGuard,
ScopedGuard,
)
from .endpoints import EmptyEndpoint
class TestGuards:
@pytest.mark.parametrize('guard', [
TokenGuard(),
ScopedGuard('scope1'),
])
def test__provide_no_token__should_return_status_401(
self,
guard: EndpointGuard,
app: Application,
client: FlaskClient,
):
app.add_endpoint(
method='POST',
path='/something',
endpoint=EmptyEndpoint(),
guards=[guard],
)
r = client.post('/something')
assert r.status_code == 401
@pytest.mark.parametrize('guard', [
TokenGuard(),
ScopedGuard('scope1'),
])
def test__provide_invalid_token__should_return_status_401(
self,
guard: EndpointGuard,
app: Application,
client: FlaskClient,
):
app.add_endpoint(
method='POST',
path='/something',
endpoint=EmptyEndpoint(),
guards=[guard],
)
r = client.post(
path='/something',
headers={'Authorization': 'Bearer: NOT-A-VALID-TOKEN'},
)
assert r.status_code == 401
@pytest.mark.parametrize('guard', [
TokenGuard(),
ScopedGuard('scope1'),
])
def test__provide_valid_token__should_return_status_200(
self,
guard: EndpointGuard,
app: Application,
client: FlaskClient,
valid_token_encoded: str,
):
app.add_endpoint(
method='POST',
path='/something',
endpoint=EmptyEndpoint(),
guards=[guard],
)
r = client.post(
path='/something',
headers={'Authorization': f'Bearer: {valid_token_encoded}'},
)
assert r.status_code == 200
def test__token_missing_required_scope__should_return_status_401(
self,
app: Application,
client: FlaskClient,
valid_token_encoded: str,
):
required_scope = str(uuid4())
app.add_endpoint(
method='POST',
path='/something',
endpoint=EmptyEndpoint(),
guards=[ScopedGuard(required_scope)],
)
r = client.post(
path='/something',
headers={'Authorization': f'Bearer: {valid_token_encoded}'},
)
assert r.status_code == 401
@pytest.mark.parametrize('guards', [
[ScopedGuard('scope1')],
[ScopedGuard('scope2')],
[ScopedGuard('scope1', 'scope2')],
[TokenGuard(), ScopedGuard('scope1')],
[TokenGuard(), ScopedGuard('scope1', 'scope2')],
[TokenGuard(), ScopedGuard('scope1'), ScopedGuard('scope2')],
])
def test__token_has_required_scope__should_return_status_200(
self,
guards: List[EndpointGuard],
app: Application,
client: FlaskClient,
valid_token_encoded: str,
):
app.add_endpoint(
method='POST',
path='/something',
endpoint=EmptyEndpoint(),
guards=guards,
)
r = client.post(
path='/something',
headers={'Authorization': f'Bearer: {valid_token_encoded}'},
)
assert r.status_code == 200
| true | true |
1c477d2e6f7e2a1431cc5681d3d4bbd7036d06ed | 191 | py | Python | alphapept/__init__.py | enryH/alphapept | a4a1155b820f3567e21a872e0883e653661efe2b | [
"Apache-2.0"
] | null | null | null | alphapept/__init__.py | enryH/alphapept | a4a1155b820f3567e21a872e0883e653661efe2b | [
"Apache-2.0"
] | null | null | null | alphapept/__init__.py | enryH/alphapept | a4a1155b820f3567e21a872e0883e653661efe2b | [
"Apache-2.0"
] | null | null | null | __version__ = "0.4.0"
__requirements__ = {
"": "requirements/requirements.txt",
"develop": "requirements/requirements_develop.txt",
"gui": "requirements/requirements_gui.txt",
}
| 23.875 | 55 | 0.696335 | __version__ = "0.4.0"
__requirements__ = {
"": "requirements/requirements.txt",
"develop": "requirements/requirements_develop.txt",
"gui": "requirements/requirements_gui.txt",
}
| true | true |
1c477dc103178022d9d4cec538afb84e72df6950 | 167 | py | Python | django_chatserver/chat/routing.py | zhiqiyu/Random-Web | 10b89776fbcdaa012e1f42a49a050d1b397b73a2 | [
"MIT"
] | null | null | null | django_chatserver/chat/routing.py | zhiqiyu/Random-Web | 10b89776fbcdaa012e1f42a49a050d1b397b73a2 | [
"MIT"
] | null | null | null | django_chatserver/chat/routing.py | zhiqiyu/Random-Web | 10b89776fbcdaa012e1f42a49a050d1b397b73a2 | [
"MIT"
] | null | null | null | from django.urls import re_path
from . import consumers
websocket_urlpatterns = [
re_path(r'ws/chat/(?P<room_name>\w+)/$', consumers.ChatConsumer.as_asgi()),
]
| 18.555556 | 79 | 0.718563 | from django.urls import re_path
from . import consumers
websocket_urlpatterns = [
re_path(r'ws/chat/(?P<room_name>\w+)/$', consumers.ChatConsumer.as_asgi()),
]
| true | true |
1c477e1719e1177e3264aa192d9f0d5a10a984e2 | 9,469 | py | Python | osx/qcdict.py | WeAreAVP/mdqc | 3130a713c70017bc54d8e5da6bb5766ba9d97423 | [
"Apache-2.0"
] | 8 | 2019-01-18T08:58:02.000Z | 2021-05-20T16:51:14.000Z | osx/qcdict.py | WeAreAVP/mdqc | 3130a713c70017bc54d8e5da6bb5766ba9d97423 | [
"Apache-2.0"
] | 7 | 2016-02-25T21:50:03.000Z | 2017-12-13T14:27:29.000Z | osx/qcdict.py | WeAreAVP/mdqc | 3130a713c70017bc54d8e5da6bb5766ba9d97423 | [
"Apache-2.0"
] | 2 | 2020-02-13T16:00:07.000Z | 2020-08-12T16:31:49.000Z | # MDQC Core (OSX)
# Version 0.1, 2013-10-28
# Copyright (c) 2013 AudioVisual Preservation Solutions
# All rights reserved.
# Released under the Apache license, v. 2.0
import subprocess
from collections import defaultdict
from os import path
import time
from re import compile, findall, UNICODE
import sys
# template (text file)
# generates a set of rules from a text file
# returns: list of (tag, comp, value) tuples
def template(tpl):
rules = []
with open(tpl, 'r') as t:
s = t.readlines()
for r in range(len(s)):
q = s[r].split('\t')
if q[1].strip() is not 'XX':
rules.append((q[0].strip(), q[1].strip(), q[2].strip()))
return rules
def exifMeta(file_path):
meta = defaultdict(list)
try:
n = subprocess.Popen(['/usr/bin/exiftool'])
fp = '/usr/bin/exiftool'
except OSError:
fp = '/usr/local/bin/exiftool'
try:
file_path = file_path.decode('utf-8')
except:
pass
try:
# executes exiftool against the media file with group output
# exiftool is run twice: once for all but filesize,
# and one specifically for filesize
# this is due to filesize requiring precise numerical output
p = subprocess.Popen([fp, '-t', '-G', '--filesize', file_path], stdout=subprocess.PIPE)
out = p.communicate()[0].splitlines()
p.stdout.close()
try:
p.terminate()
except:
pass
q = subprocess.Popen([fp, '-t', '-G', '-filesize#', file_path], stdout=subprocess.PIPE)
out += q.communicate()[0].splitlines()
q.stdout.close()
try:
q.terminate()
except:
pass
except:
return
pass
# formats the list into a dictionary
for x in out:
if 'ExifTool Version' in x or 'File Name' in x:
continue
y = x.split('\t')
if y[2].strip():
meta[y[1].strip()] = y[2].strip()
else:
meta[y[1].strip()] = ""
return meta
# mnfoMeta (media file)
# generates metadata for the supplied media file
# returns: defaultdict of {tag: value} pairs
def mnfoMeta(file, useMediaInfoFile):
meta = defaultdict(list)
try:
n = subprocess.Popen(['/usr/bin/mediainfo'])
fp = '/usr/bin/mediainfo'
except OSError:
fp = '/usr/local/bin/mediainfo'
if useMediaInfoFile == False:
p = subprocess.Popen([fp, file],
stdout=subprocess.PIPE)
out = p.communicate()[0].splitlines()
else:
try:
import io
f = io.open(file, mode="r", encoding="utf-8").read()
out = f.splitlines()
except:
out = []
# formats the list into a dictionary
prefix = ""
for x in out:
try:
if not ":" in x or 'File Name' in x:
continue
y = x.split(' :')
if y[0].strip() == 'ID':
prefix = "Track #" + str(y[1].strip()) + " "
if y[1].strip():
meta[prefix + y[0].strip()] = y[1].strip().decode('utf8')
else:
meta[prefix + y[0].strip()] = ""
except Exception as e:
print e
pass
return meta
# verify ( (tag, comparator, value), metadata dictionary )
# takes a rule tuple and returns if the metadata dictionary meets it
# for example: ( (X Resolution, 3, 400), metadata dictionary)
def verify(rule, dict):
# unicode regex to split strings into lists
word = compile("[\w\d'-\.\,]+", UNICODE)
try:
value = findall(word, dict[rule[0]])
except:
# if we're here, there was no value - check for non-existence
if rule[1] == 2:
return 2
else:
return 1
# if it's a date, it should be reformatted into a date format
if 'Date' in rule[0]:
try:
comp = time.mktime(time.strptime(rule[2].split(' ')[0],"%Y:%m:%d"))
value = time.mktime(time.strptime(value.split(' ')[0],"%Y:%m:%d"))
except:
comp = findall(word, rule[2])
else:
# split the unicode string into a list of words/numbers
comp = findall(word, rule[2])
try:
# value exists
if rule[1] == 1:
return 2
# value does not exist
if rule[1] == 2 and not value:
return 2
# value equals reference value
if rule[1] == 3 and value == comp:
return 2
# value does not equal reference value
if rule[1] == 4 and value != comp:
return 2
# value contains reference value
if rule[1] == 5 and str(rule[2]).lower() in str(dict[rule[0]]).lower(): #any(k in s for s in value for k in rule[2]):
return 2
# value does not contain reference value
if rule[1] == 6 and str(rule[2]).lower() not in str(dict[rule[0]]).lower(): #all(k in s for s in value for k in rule[2]):
return 2
# value is greater than
if (rule[1] == 7 and
tuple(float(f) for f in destring(value)) > tuple(float(f) for f in destring(comp))):
return 2
# value is at least
if (rule[1] == 8 and
tuple(float(f) for f in destring(value)) >= tuple(float(f) for f in destring(comp))):
return 2
# value is less than
if (rule[1] == 9 and
tuple(float(f) for f in destring(value)) < tuple(float(f) for f in destring(comp))):
return 2
# value is at most
if (rule[1] == 10 and
tuple(float(f) for f in destring(value)) <= tuple(float(f) for f in destring(comp))):
return 2
# nothing was true, so it must be false
else:
return 0
except ValueError:
# we get here if we tried an illegal operation
# (e.g. unicode > unicode)
# so we'll throw 'malformed operator'
return 3
def lineno():
import inspect
"""Returns the current line number in our program."""
return inspect.currentframe().f_back.f_lineno
# validate (media asset, list of rule tuples)
# generates the metadata dictionary for the asset
# and compares each rule tuple against it
# in addition, it generates the output string for the asset,
# providing a natural-language description of what happened
def validate(file, rules, type, useMediaInfoFile = False):
verified_files = {}
result, report = "", ""
valid = True
if type:
meta = exifMeta(file)
else:
if file[-4:] != ".txt":
useMediaInfoFile = False
meta = mnfoMeta(file, useMediaInfoFile)
for r in rules:
if r[1] == 1:
op = 'existent:'
if r[1] == 2:
op = "nonexistent:"
if r[1] == 3:
op = ''
if r[1] == 4:
op = "not"
if r[1] == 5:
op = "containing"
if r[1] == 6:
op = "not containing"
if r[1] == 7:
op = "greater than"
if r[1] == 8:
op = "at least"
if r[1] == 9:
op = "less than"
if r[1] == 10:
op = "at most"
x = verify(r, meta)
if x is 0:
result += path.abspath(file) + ": FAILED at " + r[0] + " (" + meta[r[0]] + " is not " + op + " " + r[2].rstrip() + ")\n"
# <File Path> FAILED <tag> not <operation> <value> <tag1> not <operation1> <value1> ....
if file in verified_files:
verified_files[file] = str(verified_files[file]) + "\t" + str(r[0]) + " not " + op + " " + r[2].strip() + "\t"
else:
verified_files[file] = path.abspath(file) + "\tFAILED\t" + str(r[0]) + " not " + op + " " + r[2].strip() + "\t"
valid = False
elif x is 1:
result += path.abspath(file) + ": FAILED at " + r[0] + " (tag not found)\n"
# <File Path> FAILED tag not found <tag> tag not found <tag1> ....
if file in verified_files:
verified_files[file] = str(verified_files[file]) + "\t" + " tag not found " + str(r[0]) + "\t"
else:
verified_files[file] = path.abspath(file) + "\tFAILED\t" + " tag not found " + str(r[0]) + "\t"
valid = False
elif x is 3:
result += path.abspath(file) + \
": Malformed comparison operator (" + \
op + " does not operate on " + meta[r[0]] + \
" and " + r[2] + ")\n"
# <File Path> FAILED <operator> does not operate on <tag> <operator1> does not operate on <tag1> ...
if file in verified_files:
verified_files[file] = str(verified_files[file]) + "\t"+ str(op) + " does not operate on " + r[0] + "\t"
else:
verified_files[file] = path.abspath(file) + "\tFAILED\t" + str(op) + " does not operate on " + r[0] + "\t"
valid = False
for single_verified_files in verified_files:
report += verified_files[single_verified_files] + "\n"
if valid:
return path.abspath(file) + ": PASSED", path.abspath(file) + "\tPASSED\n"
return result.rstrip(), report
def destring(t):
l = []
for x in t:
try:
l.append(float(x))
except:
continue
if len(l) == 0:
l.append('bad string')
return l
| 32.539519 | 132 | 0.524659 |
import subprocess
from collections import defaultdict
from os import path
import time
from re import compile, findall, UNICODE
import sys
def template(tpl):
rules = []
with open(tpl, 'r') as t:
s = t.readlines()
for r in range(len(s)):
q = s[r].split('\t')
if q[1].strip() is not 'XX':
rules.append((q[0].strip(), q[1].strip(), q[2].strip()))
return rules
def exifMeta(file_path):
meta = defaultdict(list)
try:
n = subprocess.Popen(['/usr/bin/exiftool'])
fp = '/usr/bin/exiftool'
except OSError:
fp = '/usr/local/bin/exiftool'
try:
file_path = file_path.decode('utf-8')
except:
pass
try:
p = subprocess.Popen([fp, '-t', '-G', '--filesize', file_path], stdout=subprocess.PIPE)
out = p.communicate()[0].splitlines()
p.stdout.close()
try:
p.terminate()
except:
pass
q = subprocess.Popen([fp, '-t', '-G', '-filesize#', file_path], stdout=subprocess.PIPE)
out += q.communicate()[0].splitlines()
q.stdout.close()
try:
q.terminate()
except:
pass
except:
return
pass
for x in out:
if 'ExifTool Version' in x or 'File Name' in x:
continue
y = x.split('\t')
if y[2].strip():
meta[y[1].strip()] = y[2].strip()
else:
meta[y[1].strip()] = ""
return meta
def mnfoMeta(file, useMediaInfoFile):
meta = defaultdict(list)
try:
n = subprocess.Popen(['/usr/bin/mediainfo'])
fp = '/usr/bin/mediainfo'
except OSError:
fp = '/usr/local/bin/mediainfo'
if useMediaInfoFile == False:
p = subprocess.Popen([fp, file],
stdout=subprocess.PIPE)
out = p.communicate()[0].splitlines()
else:
try:
import io
f = io.open(file, mode="r", encoding="utf-8").read()
out = f.splitlines()
except:
out = []
prefix = ""
for x in out:
try:
if not ":" in x or 'File Name' in x:
continue
y = x.split(' :')
if y[0].strip() == 'ID':
prefix = "Track #" + str(y[1].strip()) + " "
if y[1].strip():
meta[prefix + y[0].strip()] = y[1].strip().decode('utf8')
else:
meta[prefix + y[0].strip()] = ""
except Exception as e:
print e
pass
return meta
def verify(rule, dict):
word = compile("[\w\d'-\.\,]+", UNICODE)
try:
value = findall(word, dict[rule[0]])
except:
# if we're here, there was no value - check for non-existence
if rule[1] == 2:
return 2
else:
return 1
if 'Date' in rule[0]:
try:
comp = time.mktime(time.strptime(rule[2].split(' ')[0],"%Y:%m:%d"))
value = time.mktime(time.strptime(value.split(' ')[0],"%Y:%m:%d"))
except:
comp = findall(word, rule[2])
else:
# split the unicode string into a list of words/numbers
comp = findall(word, rule[2])
try:
# value exists
if rule[1] == 1:
return 2
# value does not exist
if rule[1] == 2 and not value:
return 2
# value equals reference value
if rule[1] == 3 and value == comp:
return 2
# value does not equal reference value
if rule[1] == 4 and value != comp:
return 2
# value contains reference value
if rule[1] == 5 and str(rule[2]).lower() in str(dict[rule[0]]).lower(): #any(k in s for s in value for k in rule[2]):
return 2
# value does not contain reference value
if rule[1] == 6 and str(rule[2]).lower() not in str(dict[rule[0]]).lower(): #all(k in s for s in value for k in rule[2]):
return 2
# value is greater than
if (rule[1] == 7 and
tuple(float(f) for f in destring(value)) > tuple(float(f) for f in destring(comp))):
return 2
# value is at least
if (rule[1] == 8 and
tuple(float(f) for f in destring(value)) >= tuple(float(f) for f in destring(comp))):
return 2
# value is less than
if (rule[1] == 9 and
tuple(float(f) for f in destring(value)) < tuple(float(f) for f in destring(comp))):
return 2
# value is at most
if (rule[1] == 10 and
tuple(float(f) for f in destring(value)) <= tuple(float(f) for f in destring(comp))):
return 2
# nothing was true, so it must be false
else:
return 0
except ValueError:
# we get here if we tried an illegal operation
# (e.g. unicode > unicode)
# so we'll throw 'malformed operator'
return 3
def lineno():
import inspect
"""Returns the current line number in our program."""
return inspect.currentframe().f_back.f_lineno
def validate(file, rules, type, useMediaInfoFile = False):
verified_files = {}
result, report = "", ""
valid = True
if type:
meta = exifMeta(file)
else:
if file[-4:] != ".txt":
useMediaInfoFile = False
meta = mnfoMeta(file, useMediaInfoFile)
for r in rules:
if r[1] == 1:
op = 'existent:'
if r[1] == 2:
op = "nonexistent:"
if r[1] == 3:
op = ''
if r[1] == 4:
op = "not"
if r[1] == 5:
op = "containing"
if r[1] == 6:
op = "not containing"
if r[1] == 7:
op = "greater than"
if r[1] == 8:
op = "at least"
if r[1] == 9:
op = "less than"
if r[1] == 10:
op = "at most"
x = verify(r, meta)
if x is 0:
result += path.abspath(file) + ": FAILED at " + r[0] + " (" + meta[r[0]] + " is not " + op + " " + r[2].rstrip() + ")\n"
if file in verified_files:
verified_files[file] = str(verified_files[file]) + "\t" + str(r[0]) + " not " + op + " " + r[2].strip() + "\t"
else:
verified_files[file] = path.abspath(file) + "\tFAILED\t" + str(r[0]) + " not " + op + " " + r[2].strip() + "\t"
valid = False
elif x is 1:
result += path.abspath(file) + ": FAILED at " + r[0] + " (tag not found)\n"
if file in verified_files:
verified_files[file] = str(verified_files[file]) + "\t" + " tag not found " + str(r[0]) + "\t"
else:
verified_files[file] = path.abspath(file) + "\tFAILED\t" + " tag not found " + str(r[0]) + "\t"
valid = False
elif x is 3:
result += path.abspath(file) + \
": Malformed comparison operator (" + \
op + " does not operate on " + meta[r[0]] + \
" and " + r[2] + ")\n"
if file in verified_files:
verified_files[file] = str(verified_files[file]) + "\t"+ str(op) + " does not operate on " + r[0] + "\t"
else:
verified_files[file] = path.abspath(file) + "\tFAILED\t" + str(op) + " does not operate on " + r[0] + "\t"
valid = False
for single_verified_files in verified_files:
report += verified_files[single_verified_files] + "\n"
if valid:
return path.abspath(file) + ": PASSED", path.abspath(file) + "\tPASSED\n"
return result.rstrip(), report
def destring(t):
l = []
for x in t:
try:
l.append(float(x))
except:
continue
if len(l) == 0:
l.append('bad string')
return l
| false | true |
1c477e610890926de828f933fc42e26ec8d369e3 | 83 | py | Python | MachineLearningToolkitCore/Loss/__init__.py | showintime/MachineLearningToolkit | cb265f8b0d3ca5aa16ad92cdbe74e138b5f56023 | [
"Apache-2.0"
] | null | null | null | MachineLearningToolkitCore/Loss/__init__.py | showintime/MachineLearningToolkit | cb265f8b0d3ca5aa16ad92cdbe74e138b5f56023 | [
"Apache-2.0"
] | null | null | null | MachineLearningToolkitCore/Loss/__init__.py | showintime/MachineLearningToolkit | cb265f8b0d3ca5aa16ad92cdbe74e138b5f56023 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Mon Dec 2 22:43:31 2019
@author: ZWH
"""
| 10.375 | 35 | 0.542169 | true | true | |
1c477f84e4323ce0c780a036d579746b5abba31d | 522 | py | Python | dynabuffers-python/tests/usecase/Schema03Test.py | leftshiftone/dynabuffers | c3e94c56989be3df87b50b8d9e17d1ea86199ede | [
"Apache-2.0"
] | 2 | 2019-10-28T12:28:01.000Z | 2020-07-07T12:25:40.000Z | dynabuffers-python/tests/usecase/Schema03Test.py | leftshiftone/dynabuffers | c3e94c56989be3df87b50b8d9e17d1ea86199ede | [
"Apache-2.0"
] | 1 | 2021-12-21T07:35:22.000Z | 2021-12-21T07:35:22.000Z | dynabuffers-python/tests/usecase/Schema03Test.py | leftshiftone/dynabuffers | c3e94c56989be3df87b50b8d9e17d1ea86199ede | [
"Apache-2.0"
] | 1 | 2020-03-19T09:19:43.000Z | 2020-03-19T09:19:43.000Z | import os
import unittest
from antlr4 import FileStream
from dynabuffers.Dynabuffers import Dynabuffers
class Schema03Test(unittest.TestCase):
root_dir = os.path.dirname(os.path.realpath(__file__))
def test_parse(self):
engine = Dynabuffers.parse(FileStream(self.root_dir + "/schema03.dbs"))
map = engine.deserialize(engine.serialize({"results": [{"text":"hello world"}]}))
self.assertEqual(map, {"results": [{"text":"hello world"}]})
if __name__ == "__main__":
unittest.main()
| 24.857143 | 89 | 0.697318 | import os
import unittest
from antlr4 import FileStream
from dynabuffers.Dynabuffers import Dynabuffers
class Schema03Test(unittest.TestCase):
root_dir = os.path.dirname(os.path.realpath(__file__))
def test_parse(self):
engine = Dynabuffers.parse(FileStream(self.root_dir + "/schema03.dbs"))
map = engine.deserialize(engine.serialize({"results": [{"text":"hello world"}]}))
self.assertEqual(map, {"results": [{"text":"hello world"}]})
if __name__ == "__main__":
unittest.main()
| true | true |
1c47802de2045227fcff56755ff71d4d4d7c6eba | 150 | py | Python | hubspot/cms/performance/api/__init__.py | fakepop/hubspot-api-python | f04103a09f93f5c26c99991b25fa76801074f3d3 | [
"Apache-2.0"
] | 1 | 2020-11-12T08:46:32.000Z | 2020-11-12T08:46:32.000Z | hubspot/cms/performance/api/__init__.py | fakepop/hubspot-api-python | f04103a09f93f5c26c99991b25fa76801074f3d3 | [
"Apache-2.0"
] | null | null | null | hubspot/cms/performance/api/__init__.py | fakepop/hubspot-api-python | f04103a09f93f5c26c99991b25fa76801074f3d3 | [
"Apache-2.0"
] | null | null | null | from __future__ import absolute_import
# flake8: noqa
# import apis into api package
from hubspot.cms.performance.api.default_api import DefaultApi
| 21.428571 | 62 | 0.826667 | from __future__ import absolute_import
from hubspot.cms.performance.api.default_api import DefaultApi
| true | true |
1c47816a5c703047ace0c887f2f265050774570e | 1,174 | py | Python | dir_test/test_me.py | splbio/pytestdoc | 08a8ee1a4014bb78169ee4fc41cc6b722032826e | [
"BSD-2-Clause"
] | 9 | 2015-07-08T16:25:32.000Z | 2021-04-15T10:50:12.000Z | dir_test/test_me.py | splbio/pytestdoc | 08a8ee1a4014bb78169ee4fc41cc6b722032826e | [
"BSD-2-Clause"
] | 1 | 2015-08-18T06:53:50.000Z | 2015-10-11T04:55:41.000Z | dir_test/test_me.py | splbio/pytestdoc | 08a8ee1a4014bb78169ee4fc41cc6b722032826e | [
"BSD-2-Clause"
] | 2 | 2019-04-04T08:44:13.000Z | 2021-02-22T08:12:03.000Z | import json
import pytestdoc
WHAT_IS_THIS = True
def times(x, y):
return x * y
TEST_CATEGORY="derp"
@pytestdoc.tattr_redmine_feature(7474)
@pytestdoc.tattr_redmine_bug(7475, 1776)
@pytestdoc.tattr_incomplete
@pytestdoc.tattr_category("herp")
@pytestdoc.tattr_doc("""
This is the *documentation* for my function
It tests the following things:
- if derps are herps
- all fives are half of 10
""")
def test_positive():
assert times(5,5) == 25
@pytestdoc.tattr_doc("""Test that this works when first item is negative""")
@pytestdoc.tattr_category("herp:negatives")
def test_firstnegative():
assert times(-2,5) == -10
@pytestdoc.tattr_doc("""Test that this works when second item is negative""")
@pytestdoc.tattr_category("herp:negatives")
def test_secondnegative():
assert times(3,-12) == -36
@pytestdoc.tattr_doc("""Test that this works when both items are negative""")
@pytestdoc.tattr_category("herp:negatives")
def test_bothnegative():
assert times(-12,-12) == 144
@pytestdoc.tattr_doc("""Test that this works when first item is negative""")
@pytestdoc.tattr_category("herp:negatives")
def test_firstnegative():
assert times(-2,5) == -10
| 24.978723 | 77 | 0.736797 | import json
import pytestdoc
WHAT_IS_THIS = True
def times(x, y):
return x * y
TEST_CATEGORY="derp"
@pytestdoc.tattr_redmine_feature(7474)
@pytestdoc.tattr_redmine_bug(7475, 1776)
@pytestdoc.tattr_incomplete
@pytestdoc.tattr_category("herp")
@pytestdoc.tattr_doc("""
This is the *documentation* for my function
It tests the following things:
- if derps are herps
- all fives are half of 10
""")
def test_positive():
assert times(5,5) == 25
@pytestdoc.tattr_doc("""Test that this works when first item is negative""")
@pytestdoc.tattr_category("herp:negatives")
def test_firstnegative():
assert times(-2,5) == -10
@pytestdoc.tattr_doc("""Test that this works when second item is negative""")
@pytestdoc.tattr_category("herp:negatives")
def test_secondnegative():
assert times(3,-12) == -36
@pytestdoc.tattr_doc("""Test that this works when both items are negative""")
@pytestdoc.tattr_category("herp:negatives")
def test_bothnegative():
assert times(-12,-12) == 144
@pytestdoc.tattr_doc("""Test that this works when first item is negative""")
@pytestdoc.tattr_category("herp:negatives")
def test_firstnegative():
assert times(-2,5) == -10
| true | true |
1c4781b885c055266febe549972d98ad995a452c | 2,740 | py | Python | aiida_defect/calculations.py | unkcpz/aiida-defect | 592c1d8dd8130b06d06b543d5e5d35286afa63a3 | [
"MIT"
] | 1 | 2021-02-18T07:20:02.000Z | 2021-02-18T07:20:02.000Z | aiida_defect/calculations.py | unkcpz/aiida-defect | 592c1d8dd8130b06d06b543d5e5d35286afa63a3 | [
"MIT"
] | null | null | null | aiida_defect/calculations.py | unkcpz/aiida-defect | 592c1d8dd8130b06d06b543d5e5d35286afa63a3 | [
"MIT"
] | null | null | null | """
Calculations provided by aiida_defect.
Register calculations via the "aiida.calculations" entry point in setup.json.
"""
from __future__ import absolute_import
import six
from aiida.common import datastructures
from aiida.engine import CalcJob
from aiida.orm import SinglefileData
from aiida.plugins import DataFactory
DiffParameters = DataFactory('defect')
class DiffCalculation(CalcJob):
"""
AiiDA calculation plugin wrapping the diff executable.
Simple AiiDA plugin wrapper for 'diffing' two files.
"""
@classmethod
def define(cls, spec):
"""Define inputs and outputs of the calculation."""
# yapf: disable
super(DiffCalculation, cls).define(spec)
spec.input('metadata.options.resources', valid_type=dict, default={'num_machines': 1, 'num_mpiprocs_per_machine': 1})
spec.input('metadata.options.parser_name', valid_type=six.string_types, default='defect')
spec.input('metadata.options.output_filename', valid_type=six.string_types, default='patch.diff')
spec.input('parameters', valid_type=DiffParameters, help='Command line parameters for diff')
spec.input('file1', valid_type=SinglefileData, help='First file to be compared.')
spec.input('file2', valid_type=SinglefileData, help='Second file to be compared.')
spec.output('defect', valid_type=SinglefileData, help='diff between file1 and file2.')
spec.exit_code(100, 'ERROR_MISSING_OUTPUT_FILES', message='Calculation did not produce all expected output files.')
def prepare_for_submission(self, folder):
"""
Create input files.
:param folder: an `aiida.common.folders.Folder` where the plugin should temporarily place all files needed by
the calculation.
:return: `aiida.common.datastructures.CalcInfo` instance
"""
codeinfo = datastructures.CodeInfo()
codeinfo.cmdline_params = self.inputs.parameters.cmdline_params(
file1_name=self.inputs.file1.filename,
file2_name=self.inputs.file2.filename)
codeinfo.code_uuid = self.inputs.code.uuid
codeinfo.stdout_name = self.metadata.options.output_filename
codeinfo.withmpi = self.inputs.metadata.options.withmpi
# Prepare a `CalcInfo` to be returned to the engine
calcinfo = datastructures.CalcInfo()
calcinfo.codes_info = [codeinfo]
calcinfo.local_copy_list = [
(self.inputs.file1.uuid, self.inputs.file1.filename, self.inputs.file1.filename),
(self.inputs.file2.uuid, self.inputs.file2.filename, self.inputs.file2.filename),
]
calcinfo.retrieve_list = [self.metadata.options.output_filename]
return calcinfo
| 40.895522 | 125 | 0.708394 | from __future__ import absolute_import
import six
from aiida.common import datastructures
from aiida.engine import CalcJob
from aiida.orm import SinglefileData
from aiida.plugins import DataFactory
DiffParameters = DataFactory('defect')
class DiffCalculation(CalcJob):
@classmethod
def define(cls, spec):
super(DiffCalculation, cls).define(spec)
spec.input('metadata.options.resources', valid_type=dict, default={'num_machines': 1, 'num_mpiprocs_per_machine': 1})
spec.input('metadata.options.parser_name', valid_type=six.string_types, default='defect')
spec.input('metadata.options.output_filename', valid_type=six.string_types, default='patch.diff')
spec.input('parameters', valid_type=DiffParameters, help='Command line parameters for diff')
spec.input('file1', valid_type=SinglefileData, help='First file to be compared.')
spec.input('file2', valid_type=SinglefileData, help='Second file to be compared.')
spec.output('defect', valid_type=SinglefileData, help='diff between file1 and file2.')
spec.exit_code(100, 'ERROR_MISSING_OUTPUT_FILES', message='Calculation did not produce all expected output files.')
def prepare_for_submission(self, folder):
codeinfo = datastructures.CodeInfo()
codeinfo.cmdline_params = self.inputs.parameters.cmdline_params(
file1_name=self.inputs.file1.filename,
file2_name=self.inputs.file2.filename)
codeinfo.code_uuid = self.inputs.code.uuid
codeinfo.stdout_name = self.metadata.options.output_filename
codeinfo.withmpi = self.inputs.metadata.options.withmpi
calcinfo = datastructures.CalcInfo()
calcinfo.codes_info = [codeinfo]
calcinfo.local_copy_list = [
(self.inputs.file1.uuid, self.inputs.file1.filename, self.inputs.file1.filename),
(self.inputs.file2.uuid, self.inputs.file2.filename, self.inputs.file2.filename),
]
calcinfo.retrieve_list = [self.metadata.options.output_filename]
return calcinfo
| true | true |
1c4782033a601ea0f3de81c2b2d2f03f95b1884b | 2,006 | py | Python | examples/gui/__main__.py | vcokltfre/aionasa | 8cd1d496d7373c806e38eb75e0103e4377da0875 | [
"MIT"
] | 4 | 2020-11-26T10:49:53.000Z | 2021-05-18T17:56:08.000Z | examples/gui/__main__.py | vcokltfre/aionasa | 8cd1d496d7373c806e38eb75e0103e4377da0875 | [
"MIT"
] | 1 | 2021-01-07T01:41:27.000Z | 2021-01-07T01:41:27.000Z | examples/gui/__main__.py | vcokltfre/aionasa | 8cd1d496d7373c806e38eb75e0103e4377da0875 | [
"MIT"
] | 1 | 2021-08-19T18:49:53.000Z | 2021-08-19T18:49:53.000Z | import argparse
import asyncio
import os
from aionasa.epic.api import EPIC
from aionasa.utils import date_strptime
from gui import open_gui
__doc__ = "Download some images from NASA's EPIC archive and open them in a gui browser."
usage = "python -m aionasa.epic [-h] [--date DATE] [--collection COLLECTION] img_folder"
def argument_parser():
"""Generates the parser used by the aionasa.epic.__main__ script."""
parser = argparse.ArgumentParser(description=__doc__, usage=usage)
parser.add_argument(
'--date', '-d', type=date_strptime, default=None,
help="Format: YYYY-MM-DD"
)
parser.add_argument(
'--collection', '-c', default='natural',
help="Collection to get images from. Should be 'natural', 'enhanced', or 'natural,enhanced'"
)
parser.add_argument(
'img_folder',
help='Directory to download the images to.'
)
return parser
async def _task(coro, arg):
"""Safely execute an async function"""
try:
await coro(arg)
except:
pass
async def setup(date, path, collection):
"""Downloads all EPIC images in a collection to a directory given by the 'path' parameter."""
# make image directory if necessary
if not os.path.exists(path):
os.mkdir(path)
async with EPIC() as epic:
# API request, gets images (urls etc)
images = []
if 'natural' in collection:
images += await epic.natural_images(date)
if 'enhanced' in collection:
images += await epic.enhanced_images(date)
# download the images asynchronously
print('downloading', len(images), 'images.')
tasks = [_task(image.save, path + '/' + image.filename) for image in images]
await asyncio.gather(*tasks)
async def main():
await setup(args.date, args.img_folder, args.collection.split(','))
open_gui(args.img_folder)
if __name__ == '__main__':
args = argument_parser().parse_args()
asyncio.run(main())
| 28.657143 | 100 | 0.653539 | import argparse
import asyncio
import os
from aionasa.epic.api import EPIC
from aionasa.utils import date_strptime
from gui import open_gui
__doc__ = "Download some images from NASA's EPIC archive and open them in a gui browser."
usage = "python -m aionasa.epic [-h] [--date DATE] [--collection COLLECTION] img_folder"
def argument_parser():
parser = argparse.ArgumentParser(description=__doc__, usage=usage)
parser.add_argument(
'--date', '-d', type=date_strptime, default=None,
help="Format: YYYY-MM-DD"
)
parser.add_argument(
'--collection', '-c', default='natural',
help="Collection to get images from. Should be 'natural', 'enhanced', or 'natural,enhanced'"
)
parser.add_argument(
'img_folder',
help='Directory to download the images to.'
)
return parser
async def _task(coro, arg):
try:
await coro(arg)
except:
pass
async def setup(date, path, collection):
# make image directory if necessary
if not os.path.exists(path):
os.mkdir(path)
async with EPIC() as epic:
# API request, gets images (urls etc)
images = []
if 'natural' in collection:
images += await epic.natural_images(date)
if 'enhanced' in collection:
images += await epic.enhanced_images(date)
# download the images asynchronously
print('downloading', len(images), 'images.')
tasks = [_task(image.save, path + '/' + image.filename) for image in images]
await asyncio.gather(*tasks)
async def main():
await setup(args.date, args.img_folder, args.collection.split(','))
open_gui(args.img_folder)
if __name__ == '__main__':
args = argument_parser().parse_args()
asyncio.run(main())
| true | true |
1c4782238324e2454e74dfd129755995c5656e98 | 11,993 | py | Python | aesara/graph/utils.py | danhphan/aesara | 5a0fb0e731358d54648823170acd911cc1534d6a | [
"BSD-3-Clause"
] | null | null | null | aesara/graph/utils.py | danhphan/aesara | 5a0fb0e731358d54648823170acd911cc1534d6a | [
"BSD-3-Clause"
] | null | null | null | aesara/graph/utils.py | danhphan/aesara | 5a0fb0e731358d54648823170acd911cc1534d6a | [
"BSD-3-Clause"
] | null | null | null | import linecache
import sys
import traceback
from abc import ABCMeta
from io import StringIO
from typing import TYPE_CHECKING, List, Optional, Sequence, Tuple, TypeVar, Union
if TYPE_CHECKING:
from aesara.graph.basic import Apply, Variable
T = TypeVar("T", bound=Union["Apply", "Variable"])
def simple_extract_stack(
f=None, limit: Optional[int] = None, skips: Optional[Sequence[str]] = None
) -> List[Tuple[Optional[str], int, str, Optional[str]]]:
"""This is traceback.extract_stack from python 2.7 with this change:
- Comment the update of the cache.
- Skip internal stack trace level.
The update of the cache call os.stat to verify is the cache is up
to date. This take too much time on cluster.
limit - The number of stack level we want to return. If None, mean
all what we can.
skips - partial path of stack level we don't want to keep and count.
When we find one level that isn't skipped, we stop skipping.
"""
if skips is None:
skips = []
if f is None:
f = sys._getframe().f_back
if limit is None:
if hasattr(sys, "tracebacklimit"):
limit = sys.tracebacklimit
trace: List[Tuple[Optional[str], int, str, Optional[str]]] = []
n = 0
while f is not None and (limit is None or n < limit):
lineno = f.f_lineno
co = f.f_code
filename = co.co_filename
name = co.co_name
# linecache.checkcache(filename)
line: Optional[str] = linecache.getline(filename, lineno, f.f_globals)
if line:
line = line.strip()
else:
line = None
f = f.f_back
# Just skip inner level
if len(trace) == 0:
rm = False
for p in skips:
# Julian: I added the 'tests' exception together with
# Arnaud. Otherwise, we'd lose the stack trace during
# in our test cases (e.g. in test_opt.py). We're not
# sure this is the right way to do it though.
if p in filename and "tests" not in filename:
rm = True
break
if rm:
continue
trace.append((filename, lineno, name, line))
n = n + 1
trace.reverse()
return trace
def add_tag_trace(thing: T, user_line: Optional[int] = None) -> T:
"""Add tag.trace to a node or variable.
The argument is returned after being affected (inplace).
Parameters
----------
thing
The object where we add .tag.trace.
user_line
The max number of user line to keep.
Notes
-----
We also use config.traceback__limit for the maximum number of stack level
we look.
"""
from aesara.configdefaults import config
if user_line is None:
user_line = config.traceback__limit
if user_line == -1:
user_line = None
skips = [
"aesara/tensor/",
"aesara\\tensor\\",
"aesara/compile/",
"aesara\\compile\\",
"aesara/graph/",
"aesara\\graph\\",
"aesara/scalar/basic.py",
"aesara\\scalar\\basic.py",
"aesara/sandbox/",
"aesara\\sandbox\\",
"aesara/scan/",
"aesara\\scan\\",
"aesara/sparse/",
"aesara\\sparse\\",
"aesara/typed_list/",
"aesara\\typed_list\\",
]
if config.traceback__compile_limit > 0:
skips = []
tr = simple_extract_stack(limit=user_line, skips=skips)
# Different python version use different sementic for
# limit. python 2.7 include the call to extrack_stack. The -1 get
# rid of it.
if tr:
thing.tag.trace = [tr]
else:
thing.tag.trace = tr
return thing
def get_variable_trace_string(v):
sio = StringIO()
# For backward compatibility with old trace
tr = getattr(v.tag, "trace", [])
if isinstance(tr, list) and len(tr) > 0:
print(" \nBacktrace when that variable is created:\n", file=sio)
# The isinstance is needed to handle old pickled trace
if isinstance(tr[0], tuple):
traceback.print_list(v.tag.trace, sio)
else:
# Print separate message for each element in the list of
# backtraces
for idx, subtr in enumerate(tr):
if len(tr) > 1:
print(f"trace {int(idx)}", file=sio)
traceback.print_list(subtr, sio)
return sio.getvalue()
class InconsistencyError(Exception):
"""
This exception should be thrown by listeners to FunctionGraph when the
graph's state is invalid.
"""
class MissingInputError(Exception):
"""
A symbolic input needed to compute the outputs is missing.
"""
def __init__(self, *args, **kwargs):
if kwargs:
# The call to list is needed for Python 3
assert list(kwargs.keys()) == ["variable"]
error_msg = get_variable_trace_string(kwargs["variable"])
if error_msg:
args = args + (error_msg,)
s = "\n".join(args) # Needed to have the new line print correctly
super().__init__(s)
class TestValueError(Exception):
"""Base exception class for all test value errors."""
class MethodNotDefined(Exception):
"""
To be raised by functions defined as part of an interface.
When the user sees such an error, it is because an important interface
function has been left out of an implementation class.
"""
class MetaType(ABCMeta):
def __new__(cls, name, bases, dct):
props = dct.get("__props__", None)
if props is not None:
if not isinstance(props, tuple):
raise TypeError("__props__ has to be a tuple")
if not all(isinstance(p, str) for p in props):
raise TypeError("elements of __props__ have to be strings")
def _props(self):
"""
Tuple of properties of all attributes
"""
return tuple(getattr(self, a) for a in props)
dct["_props"] = _props
def _props_dict(self):
"""This return a dict of all ``__props__`` key-> value.
This is useful in optimization to swap op that should have the
same props. This help detect error that the new op have at
least all the original props.
"""
return {a: getattr(self, a) for a in props}
dct["_props_dict"] = _props_dict
if "__hash__" not in dct:
def __hash__(self):
return hash((type(self), tuple(getattr(self, a) for a in props)))
dct["__hash__"] = __hash__
if "__eq__" not in dct:
def __eq__(self, other):
return type(self) == type(other) and tuple(
getattr(self, a) for a in props
) == tuple(getattr(other, a) for a in props)
dct["__eq__"] = __eq__
if "__str__" not in dct:
if len(props) == 0:
def __str__(self):
return f"{self.__class__.__name__}"
else:
def __str__(self):
return "{}{{{}}}".format(
self.__class__.__name__,
", ".join(
"{}={!r}".format(p, getattr(self, p)) for p in props
),
)
dct["__str__"] = __str__
return super().__new__(cls, name, bases, dct)
class MetaObject(metaclass=MetaType):
__slots__: List = []
def __ne__(self, other):
return not self == other
class Scratchpad:
def clear(self):
self.__dict__.clear()
def __update__(self, other):
self.__dict__.update(other.__dict__)
return self
def __str__(self):
return "scratchpad" + str(self.__dict__)
def __repr__(self):
return "scratchpad" + str(self.__dict__)
def info(self):
print(f"<aesara.graph.utils.scratchpad instance at {id(self)}>")
for k, v in self.__dict__.items():
print(f" {k}: {v}")
class ValidatingScratchpad(Scratchpad):
"""This `Scratchpad` validates attribute values."""
def __init__(self, attr, attr_filter):
super().__init__()
object.__setattr__(self, "attr", attr)
object.__setattr__(self, "attr_filter", attr_filter)
def __setattr__(self, attr, obj):
if getattr(self, "attr", None) == attr:
obj = self.attr_filter(obj)
return object.__setattr__(self, attr, obj)
class D:
def __init__(self, **d):
self.__dict__.update(d)
class AssocList:
"""An associative list.
This class is like a `dict` that accepts unhashable keys by using an
assoc list for internal use only
"""
def __init__(self):
self._dict = {}
self._list = []
def __getitem__(self, item):
return self.get(item, None)
def __setitem__(self, item, value):
try:
self._dict[item] = value
except Exception:
for i, (key, val) in enumerate(self._list):
if key == item:
self._list[i] = (item, value)
return
self._list.append((item, value))
def __delitem__(self, item):
try:
if item in self._dict:
del self._dict[item]
return
except TypeError as e:
assert "unhashable type" in str(e)
for i, (key, val) in enumerate(self._list):
if key == item:
del self._list[i]
return
raise KeyError(item)
def discard(self, item):
try:
if item in self._dict:
del self._dict[item]
return
except TypeError as e:
assert "unhashable type" in str(e)
for i, (key, val) in enumerate(self._list):
if key == item:
del self._list[i]
return
def get(self, item, default):
try:
return self._dict[item]
except Exception:
for item2, value in self._list:
try:
if item == item2:
return value
if item.equals(item2):
return value
except Exception:
if item is item2:
return value
return default
def clear(self):
self._dict = {}
self._list = []
def __repr__(self):
return f"AssocList({self._dict}, {self._list})"
def toposort(prereqs_d):
"""
Sorts prereqs_d.keys() topologically.
prereqs_d[x] contains all the elements that must come before x
in the ordering.
"""
# all1 = set(prereqs_d.keys())
# all2 = set()
# for x, y in prereqs_d.items():
# all2.update(y)
# print all1.difference(all2)
seq = []
done = set()
postreqs_d = {}
for x, prereqs in prereqs_d.items():
for prereq in prereqs:
postreqs_d.setdefault(prereq, set()).add(x)
next = {k for k in prereqs_d if not prereqs_d[k]}
while next:
bases = next
next = set()
for x in bases:
done.add(x)
seq.append(x)
for x in bases:
for postreq in postreqs_d.get(x, []):
if not prereqs_d[postreq].difference(done):
next.add(postreq)
if len(prereqs_d) != len(seq):
raise Exception(
"Cannot sort topologically: there might be cycles, "
"prereqs_d does not have a key for each element or "
"some orderings contain invalid elements."
)
return seq
| 28.622912 | 85 | 0.547736 | import linecache
import sys
import traceback
from abc import ABCMeta
from io import StringIO
from typing import TYPE_CHECKING, List, Optional, Sequence, Tuple, TypeVar, Union
if TYPE_CHECKING:
from aesara.graph.basic import Apply, Variable
T = TypeVar("T", bound=Union["Apply", "Variable"])
def simple_extract_stack(
f=None, limit: Optional[int] = None, skips: Optional[Sequence[str]] = None
) -> List[Tuple[Optional[str], int, str, Optional[str]]]:
if skips is None:
skips = []
if f is None:
f = sys._getframe().f_back
if limit is None:
if hasattr(sys, "tracebacklimit"):
limit = sys.tracebacklimit
trace: List[Tuple[Optional[str], int, str, Optional[str]]] = []
n = 0
while f is not None and (limit is None or n < limit):
lineno = f.f_lineno
co = f.f_code
filename = co.co_filename
name = co.co_name
line: Optional[str] = linecache.getline(filename, lineno, f.f_globals)
if line:
line = line.strip()
else:
line = None
f = f.f_back
if len(trace) == 0:
rm = False
for p in skips:
# in our test cases (e.g. in test_opt.py). We're not
if p in filename and "tests" not in filename:
rm = True
break
if rm:
continue
trace.append((filename, lineno, name, line))
n = n + 1
trace.reverse()
return trace
def add_tag_trace(thing: T, user_line: Optional[int] = None) -> T:
from aesara.configdefaults import config
if user_line is None:
user_line = config.traceback__limit
if user_line == -1:
user_line = None
skips = [
"aesara/tensor/",
"aesara\\tensor\\",
"aesara/compile/",
"aesara\\compile\\",
"aesara/graph/",
"aesara\\graph\\",
"aesara/scalar/basic.py",
"aesara\\scalar\\basic.py",
"aesara/sandbox/",
"aesara\\sandbox\\",
"aesara/scan/",
"aesara\\scan\\",
"aesara/sparse/",
"aesara\\sparse\\",
"aesara/typed_list/",
"aesara\\typed_list\\",
]
if config.traceback__compile_limit > 0:
skips = []
tr = simple_extract_stack(limit=user_line, skips=skips)
if tr:
thing.tag.trace = [tr]
else:
thing.tag.trace = tr
return thing
def get_variable_trace_string(v):
sio = StringIO()
tr = getattr(v.tag, "trace", [])
if isinstance(tr, list) and len(tr) > 0:
print(" \nBacktrace when that variable is created:\n", file=sio)
if isinstance(tr[0], tuple):
traceback.print_list(v.tag.trace, sio)
else:
for idx, subtr in enumerate(tr):
if len(tr) > 1:
print(f"trace {int(idx)}", file=sio)
traceback.print_list(subtr, sio)
return sio.getvalue()
class InconsistencyError(Exception):
class MissingInputError(Exception):
def __init__(self, *args, **kwargs):
if kwargs:
assert list(kwargs.keys()) == ["variable"]
error_msg = get_variable_trace_string(kwargs["variable"])
if error_msg:
args = args + (error_msg,)
s = "\n".join(args)
super().__init__(s)
class TestValueError(Exception):
class MethodNotDefined(Exception):
class MetaType(ABCMeta):
def __new__(cls, name, bases, dct):
props = dct.get("__props__", None)
if props is not None:
if not isinstance(props, tuple):
raise TypeError("__props__ has to be a tuple")
if not all(isinstance(p, str) for p in props):
raise TypeError("elements of __props__ have to be strings")
def _props(self):
return tuple(getattr(self, a) for a in props)
dct["_props"] = _props
def _props_dict(self):
return {a: getattr(self, a) for a in props}
dct["_props_dict"] = _props_dict
if "__hash__" not in dct:
def __hash__(self):
return hash((type(self), tuple(getattr(self, a) for a in props)))
dct["__hash__"] = __hash__
if "__eq__" not in dct:
def __eq__(self, other):
return type(self) == type(other) and tuple(
getattr(self, a) for a in props
) == tuple(getattr(other, a) for a in props)
dct["__eq__"] = __eq__
if "__str__" not in dct:
if len(props) == 0:
def __str__(self):
return f"{self.__class__.__name__}"
else:
def __str__(self):
return "{}{{{}}}".format(
self.__class__.__name__,
", ".join(
"{}={!r}".format(p, getattr(self, p)) for p in props
),
)
dct["__str__"] = __str__
return super().__new__(cls, name, bases, dct)
class MetaObject(metaclass=MetaType):
__slots__: List = []
def __ne__(self, other):
return not self == other
class Scratchpad:
def clear(self):
self.__dict__.clear()
def __update__(self, other):
self.__dict__.update(other.__dict__)
return self
def __str__(self):
return "scratchpad" + str(self.__dict__)
def __repr__(self):
return "scratchpad" + str(self.__dict__)
def info(self):
print(f"<aesara.graph.utils.scratchpad instance at {id(self)}>")
for k, v in self.__dict__.items():
print(f" {k}: {v}")
class ValidatingScratchpad(Scratchpad):
def __init__(self, attr, attr_filter):
super().__init__()
object.__setattr__(self, "attr", attr)
object.__setattr__(self, "attr_filter", attr_filter)
def __setattr__(self, attr, obj):
if getattr(self, "attr", None) == attr:
obj = self.attr_filter(obj)
return object.__setattr__(self, attr, obj)
class D:
def __init__(self, **d):
self.__dict__.update(d)
class AssocList:
def __init__(self):
self._dict = {}
self._list = []
def __getitem__(self, item):
return self.get(item, None)
def __setitem__(self, item, value):
try:
self._dict[item] = value
except Exception:
for i, (key, val) in enumerate(self._list):
if key == item:
self._list[i] = (item, value)
return
self._list.append((item, value))
def __delitem__(self, item):
try:
if item in self._dict:
del self._dict[item]
return
except TypeError as e:
assert "unhashable type" in str(e)
for i, (key, val) in enumerate(self._list):
if key == item:
del self._list[i]
return
raise KeyError(item)
def discard(self, item):
try:
if item in self._dict:
del self._dict[item]
return
except TypeError as e:
assert "unhashable type" in str(e)
for i, (key, val) in enumerate(self._list):
if key == item:
del self._list[i]
return
def get(self, item, default):
try:
return self._dict[item]
except Exception:
for item2, value in self._list:
try:
if item == item2:
return value
if item.equals(item2):
return value
except Exception:
if item is item2:
return value
return default
def clear(self):
self._dict = {}
self._list = []
def __repr__(self):
return f"AssocList({self._dict}, {self._list})"
def toposort(prereqs_d):
seq = []
done = set()
postreqs_d = {}
for x, prereqs in prereqs_d.items():
for prereq in prereqs:
postreqs_d.setdefault(prereq, set()).add(x)
next = {k for k in prereqs_d if not prereqs_d[k]}
while next:
bases = next
next = set()
for x in bases:
done.add(x)
seq.append(x)
for x in bases:
for postreq in postreqs_d.get(x, []):
if not prereqs_d[postreq].difference(done):
next.add(postreq)
if len(prereqs_d) != len(seq):
raise Exception(
"Cannot sort topologically: there might be cycles, "
"prereqs_d does not have a key for each element or "
"some orderings contain invalid elements."
)
return seq
| true | true |
1c47827600948d2b87bf218f91b1371ea3cbb3eb | 3,488 | py | Python | fhirclient/models/coding.py | mdx-dev/client-py | f6c16c9bd386c5b05d69753b89c6519d568814ac | [
"Apache-2.0"
] | null | null | null | fhirclient/models/coding.py | mdx-dev/client-py | f6c16c9bd386c5b05d69753b89c6519d568814ac | [
"Apache-2.0"
] | null | null | null | fhirclient/models/coding.py | mdx-dev/client-py | f6c16c9bd386c5b05d69753b89c6519d568814ac | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.0-a53ec6ee1b (http://hl7.org/fhir/StructureDefinition/Coding) on 2019-01-22.
# 2019, SMART Health IT.
from . import element
class Coding(element.Element):
"""
A
r
e
f
e
r
e
n
c
e
t
o
a
c
o
d
e
d
e
f
i
n
e
d
b
y
a
t
e
r
m
i
n
o
l
o
g
y
s
y
s
t
e
m
.
"""
resource_type = "Coding"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.code = None
"""
S
y
m
b
o
l
i
n
s
y
n
t
a
x
d
e
f
i
n
e
d
b
y
t
h
e
s
y
s
t
e
m
.
Type `str`. """
self.display = None
"""
R
e
p
r
e
s
e
n
t
a
t
i
o
n
d
e
f
i
n
e
d
b
y
t
h
e
s
y
s
t
e
m
.
Type `str`. """
self.system = None
"""
I
d
e
n
t
i
t
y
o
f
t
h
e
t
e
r
m
i
n
o
l
o
g
y
s
y
s
t
e
m
.
Type `str`. """
self.userSelected = None
"""
I
f
t
h
i
s
c
o
d
i
n
g
w
a
s
c
h
o
s
e
n
d
i
r
e
c
t
l
y
b
y
t
h
e
u
s
e
r
.
Type `bool`. """
self.version = None
"""
V
e
r
s
i
o
n
o
f
t
h
e
s
y
s
t
e
m
-
i
f
r
e
l
e
v
a
n
t
.
Type `str`. """
super(Coding, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(Coding, self).elementProperties()
js.extend([
("code", "code", str, False, None, False),
("display", "display", str, False, None, False),
("system", "system", str, False, None, False),
("userSelected", "userSelected", bool, False, None, False),
("version", "version", str, False, None, False),
])
return js
| 12.966543 | 103 | 0.294725 |
from . import element
class Coding(element.Element):
resource_type = "Coding"
def __init__(self, jsondict=None, strict=True):
self.code = None
self.display = None
self.system = None
self.userSelected = None
self.version = None
super(Coding, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(Coding, self).elementProperties()
js.extend([
("code", "code", str, False, None, False),
("display", "display", str, False, None, False),
("system", "system", str, False, None, False),
("userSelected", "userSelected", bool, False, None, False),
("version", "version", str, False, None, False),
])
return js
| true | true |
1c4782c8740a735f5aa4dfddb82ffcdda14f7ceb | 689 | py | Python | packages/cuda/cuSolverDn.py | lijun99/pyre | 004dfd4c06489b4ba5b32877338ca6440f2d523b | [
"BSD-3-Clause"
] | 3 | 2019-08-02T21:02:47.000Z | 2021-09-08T13:59:43.000Z | packages/cuda/cuSolverDn.py | lijun99/pyre | 004dfd4c06489b4ba5b32877338ca6440f2d523b | [
"BSD-3-Clause"
] | null | null | null | packages/cuda/cuSolverDn.py | lijun99/pyre | 004dfd4c06489b4ba5b32877338ca6440f2d523b | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Lijun Zhu
# california institute of technology
# (c) 2016-2019 all rights reserved
#
# externals
from . import cuda as libcuda # the extension
from .Matrix import Matrix
class cuSolverDn:
"""
Wrapper for cusolverDn lib utitilies
"""
def create_handle():
"""
create a cusolverDn handle
"""
handle = libcuda.cusolverDnCreate()
return handle
def get_current_handle():
# default device handle
from . import manager
if manager.current_device is None:
manager.device(0)
handle = manager.current_device.cusolverdn_handle
return handle
# end of file
| 20.264706 | 57 | 0.628447 |
from . import cuda as libcuda
from .Matrix import Matrix
class cuSolverDn:
def create_handle():
handle = libcuda.cusolverDnCreate()
return handle
def get_current_handle():
from . import manager
if manager.current_device is None:
manager.device(0)
handle = manager.current_device.cusolverdn_handle
return handle
| true | true |
1c478487162412bd45e541a0e720bee7c90272d6 | 42,379 | py | Python | tensorflow/python/framework/func_graph.py | fraudies/tensorflow | a42423e302b71893bbd24aa896869941013c07fb | [
"Apache-2.0"
] | 3 | 2016-08-20T04:02:24.000Z | 2019-04-21T06:18:41.000Z | tensorflow/python/framework/func_graph.py | fraudies/tensorflow | a42423e302b71893bbd24aa896869941013c07fb | [
"Apache-2.0"
] | 59 | 2019-06-17T09:37:49.000Z | 2022-01-19T01:21:34.000Z | tensorflow/python/framework/func_graph.py | fraudies/tensorflow | a42423e302b71893bbd24aa896869941013c07fb | [
"Apache-2.0"
] | 1 | 2019-10-31T09:22:30.000Z | 2019-10-31T09:22:30.000Z | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""FuncGraph and related functionality."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections as py_collections
import itertools
import weakref
from tensorflow.core.framework import attr_value_pb2
from tensorflow.python.eager import context
from tensorflow.python.eager import execute
from tensorflow.python.eager import tape
from tensorflow.python.eager.graph_only_ops import graph_placeholder
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework.auto_control_deps import AutomaticControlDependencies
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import custom_gradient
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.util import compat
from tensorflow.python.util import memory
from tensorflow.python.util import nest
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util import tf_decorator
from tensorflow.python.util.lazy_loader import LazyLoader
# This is to avoid a circular dependency:
# function -> func_graph
function = LazyLoader("function", globals(),
"tensorflow.python.eager.function")
def_function = LazyLoader(
"def_function", globals(),
"tensorflow.python.eager.def_function")
WHITELIST_COLLECTIONS = [
ops.GraphKeys.GLOBAL_VARIABLES,
ops.GraphKeys.LOCAL_VARIABLES,
ops.GraphKeys.TRAINABLE_VARIABLES,
variable_scope._VARSTORE_KEY, # pylint: disable=protected-access
variable_scope._VARSCOPESTORE_KEY # pylint: disable=protected-access
]
class UnknownArgument(object):
"""Signifies an argument which is not currently handled."""
pass
def convert_structure_to_signature(structure, arg_names=None):
"""Convert a potentially nested structure to a signature.
Args:
structure: Structure to convert, where top level collection is a list or a
tuple.
arg_names: Optional list of arguments that has equal number of elements as
`structure` and is used for naming corresponding TensorSpecs.
Returns:
Identical structure that has TensorSpec objects instead of Tensors and
UknownArgument instead of any unsupported types.
"""
structure = composite_tensor.replace_composites_with_components(structure)
def encode_arg(arg, path):
"""A representation for this argument, for converting into signatures."""
if isinstance(arg, ops.Tensor):
user_specified_name = None
try:
user_specified_name = compat.as_str(
arg.op.get_attr("_user_specified_name"))
except ValueError:
pass
if path and user_specified_name and user_specified_name != path[0]:
# The user has explicitly named the argument differently than the name
# of the function argument.
name = user_specified_name
else:
name = "/".join([str(p) for p in path])
return tensor_spec.TensorSpec(arg.shape, arg.dtype, name)
if isinstance(arg, (
int,
float,
bool,
type(None),
dtypes.DType,
tensor_spec.TensorSpec,
)):
return arg
return UnknownArgument()
# We are using the flattened paths to name the TensorSpecs. We need an
# explicit name for them downstream.
flattened = nest.flatten_with_tuple_paths(structure, expand_composites=True)
if arg_names:
if len(arg_names) != len(structure):
raise ValueError(
"Passed in arg_names don't match actual signature (%s)." % arg_names)
# Replace all top-level names with their actual arg_names. If a path before
# was "(2,'a',1)", it will become "(arg_names[2],'a',1)".
flattened = [
((arg_names[path[0]],) + path[1:], arg) for path, arg in flattened
]
mapped = [encode_arg(arg, path) for path, arg in flattened]
return nest.pack_sequence_as(structure, mapped, expand_composites=True)
class FuncGraph(ops.Graph):
"""Graph representing a function body.
Attributes:
name: The name of the function.
inputs: Placeholder tensors representing the inputs to this function. The
tensors are in this FuncGraph. This represents "regular" inputs as well as
captured inputs (i.e. the values of self.captures), with the regular
inputs coming first.
outputs: Tensors that will be returned by this function. The tensors are in
this FuncGraph.
control_outputs: Operations that must be executed before the function
represented by this graph can be said to have been executed.
structured_input_signature: A tuple of (args, kwargs), which are both
possibly-nested python objects that were received by this function. Note
that these structures might contain Python `None`s.
structured_outputs: A possibly-nested python object which will be returned
by this function. The Tensors in this structure are the same as those of
self.outputs. Note that this structure might contain Python `None`s.
variables: Variables that should be watched during function execution.
outer_graph: The graph this function is defined in. May be another FuncGraph
or the global default Graph.
captures: Maps external tensor -> internal tensor (i.e. input placeholder).
The entries are in the order they were captured.
control_captures: Set of external ops on which this graph has a control
dependency.
seed: The graph-level random seed.
capture_by_value: If True, the func graph will capture Variables by value
instead of reference.
"""
def __init__(self, name, collections=None, capture_by_value=None):
"""Construct a new FuncGraph.
The graph will inherit its graph key, collections, seed, and distribution
strategy stack from the current context or graph.
Args:
name: the name of the function.
collections: a dictionary of collections this FuncGraph should start
with. If not specified (None), the FuncGraph will read (but not write
to) the outer graph's collections that are not whitelisted, and both
read and write to the outer graph's collections that are whitelisted.
The current whitelisted collections are the global variables, the
local variables, and the trainable variables.
Defaults to None.
capture_by_value: An optional boolean. If True, the func graph will
capture Variables by value instead of reference. By default inherit
from outer graphs, and failing that will default to False.
"""
super(FuncGraph, self).__init__()
self.name = name
self.inputs = []
self.outputs = []
self.control_outputs = []
self.control_captures = set()
self.structured_input_signature = None
self.structured_outputs = None
self._weak_variables = []
self._watched_variables = weakref.WeakSet()
self.outer_graph = ops.get_default_graph()
self.captures = py_collections.OrderedDict()
# Inherit capture-by-value from outer graph.
if capture_by_value is not None:
self.capture_by_value = capture_by_value
elif self.outer_graph is not None and isinstance(
self.outer_graph, FuncGraph):
self.capture_by_value = self.outer_graph.capture_by_value
else:
self.capture_by_value = False
self._building_function = True
# Map from resource tensor name to last op (in program order) which uses
# this tensor. Used to enforce that execution order matches program order
# for resource tensors.
self._last_op_using_resource_tensor = {}
graph = self.outer_graph
if context.executing_eagerly():
self.seed = context.global_seed()
# [for tf-data user migration from TF1.0 to 2.0] seed_used keep track of
# any None op_seed for random_op in the function, in which case we end up
# using function seed, which could be unintended behavior for the op.
self._seed_used = False
else:
self.seed = graph.seed
self._seed_used = False
# TODO(allenl): Figure out if we can remove colocation stack
# specialization (currently used in cond_v2), here and in the cache key.
self._colocation_stack = graph._colocation_stack.copy() # pylint: disable=protected-access
if collections is None:
for collection_name in graph.get_all_collection_keys():
if collection_name not in WHITELIST_COLLECTIONS:
self._collections[collection_name] = graph.get_collection(
collection_name)
for collection_name in WHITELIST_COLLECTIONS:
self._collections[collection_name] = graph.get_collection_ref(
collection_name)
else:
self._collections = collections
def __str__(self):
return "FuncGraph(name=%s, id=%s)" % (self.name, id(self))
def watch_variable(self, v):
"""Marks the variable v as accessed while building this graph."""
while self is not None and isinstance(self, FuncGraph):
self._watched_variables.add(v)
self = self.outer_graph
def control_dependencies(self, control_inputs):
"""Handles control dependencies.
FuncGraph wraps Graph's control_dependencies logic by first filtering out
any external tensors / operations and storing them in the graph's
control_captures member. Any consumers of this function graph must then
decide how to handle the control captures.
Args:
control_inputs: A list of `Operation` or `Tensor` objects which
must be executed or computed before running the operations
defined in the context. Can also be `None` to clear the control
dependencies.
Returns:
A context manager that specifies control dependencies for all
operations constructed within the context.
Raises:
TypeError: If `control_inputs` is not a list of `Operation` or
`Tensor` objects.
"""
if control_inputs is None:
return super(FuncGraph, self).control_dependencies(control_inputs)
filtered_control_inputs = []
for c in control_inputs:
# Check for _UnreadVariable
if (isinstance(c, ops.IndexedSlices) or
(hasattr(c, "_handle") and hasattr(c, "op"))):
c = c.op
graph_element = ops._as_graph_element(c) # pylint: disable=protected-access
if graph_element is None:
graph_element = c
if graph_element is not None and getattr(
graph_element, "graph", None) is not self:
self.control_captures.add(graph_element)
else:
filtered_control_inputs.append(graph_element)
return super(FuncGraph, self).control_dependencies(filtered_control_inputs)
def as_default(self):
outer_cm = super(FuncGraph, self).as_default()
@tf_contextlib.contextmanager
def inner_cm():
"""Context manager for copying distribute.Strategy scope information."""
graph = ops.get_default_graph()
# pylint: disable=protected-access
# TODO(b/112906995, nareshmodi): distribution strategy depends on
# inheriting this stack from the default graph even in eager mode. Maybe
# it should be part of the eager context? This would also allow us to
# remove a get_default_graph() call from the function cache lookup.
old_strategy_stack = self._distribution_strategy_stack
self._distribution_strategy_stack = list(
graph._distribution_strategy_stack)
# We ignore device placements from any outer scopes while tracing the
# function when possible, to avoid hard-coding them in the function
# graph. "Default" placements come from the PartitionedCallOp's placement,
# so that the same trace of the Python function may be placed on several
# different devices and saved functions may be placed on new devices when
# restored.
old_device_stack = self._device_function_stack
if context.executing_eagerly():
if self._distribution_strategy_stack:
self._add_device_to_stack(context.context().device_name)
else:
if (self._distribution_strategy_stack
or device_stack_has_callable(graph._device_function_stack)):
# Hard-code devices from device functions in the function body
self._device_function_stack = graph._device_function_stack.copy()
old_creator_stack = self._variable_creator_stack
self._variable_creator_stack = graph._variable_creator_stack
# Inherit the graph key, since this is used for matching variables in
# optimizers.
old_graph_key = self._graph_key
self._graph_key = graph._graph_key
# pylint: enable=protected-access
with outer_cm as g:
try:
yield g
finally:
self._distribution_strategy_stack = old_strategy_stack
self._device_function_stack = old_device_stack
self._variable_creator_stack = old_creator_stack
self._graph_key = old_graph_key
return inner_cm()
@property
def output_types(self):
return [t.dtype for t in self.outputs]
@property
def output_shapes(self):
return [t.shape for t in self.outputs]
@property
def variables(self):
"""A list of variables accessed by this FuncGraph.
Note that functions keep only weak references to variables. Calling the
function after a variable it accesses has been deleted is an error.
Yields:
Strong references to variables accessed by this FuncGraph.
"""
for weak_v in self._weak_variables:
v = weak_v()
if v is None:
raise AssertionError(
"Called a function referencing variables which have been deleted. "
"This likely means that function-local variables were created and "
"not referenced elsewhere in the program. This is generally a "
"mistake; consider storing variables in an object attribute on "
"first call.")
yield v
@variables.setter
def variables(self, var_list):
self._weak_variables = [weakref.ref(v) for v in var_list]
def _capture_by_value(
self,
op_type,
inputs,
dtypes, # pylint: disable=redefined-outer-name
input_types=None,
name=None,
attrs=None,
op_def=None,
compute_shapes=True,
compute_device=True):
# When capturing by value, do the read outside
reverse_captures = dict((v, k) for k, v in self.captures.items())
uncaptured_inputs = [reverse_captures.get(t, t) for t in inputs]
with ops.init_scope():
if context.executing_eagerly():
attr_list = ("dtype", int(attrs["dtype"].type))
value, = execute.execute(
compat.as_bytes(op_type), 1, uncaptured_inputs, attr_list,
context.context())
else:
op = ops.get_default_graph().create_op(
op_type, uncaptured_inputs, dtypes, input_types, name, attrs,
op_def, compute_shapes, compute_device)
value = op.outputs[0]
captured_value = self.capture(value)
return captured_value.op
def create_op(
self,
op_type,
inputs,
dtypes=None, # pylint: disable=redefined-outer-name
input_types=None,
name=None,
attrs=None,
op_def=None,
compute_shapes=True,
compute_device=True):
"""Like Graph.create_op, except handles external input tensors.
This overload adds functionality to create_op to "capture" any external
input tensors, i.e. tensors from the eager context or outer function graphs
if this is a nested function. See `capture` for more information.
Args:
op_type: The `Operation` type to create. This corresponds to the
`OpDef.name` field for the proto that defines the operation.
inputs: A list of `Tensor` objects that will be inputs to the `Operation`.
dtypes: (Optional) A list of `DType` objects that will be the types of the
tensors that the operation produces.
input_types: (Optional.) A list of `DType`s that will be the types of
the tensors that the operation consumes. By default, uses the base
`DType` of each input in `inputs`. Operations that expect
reference-typed inputs must specify `input_types` explicitly.
name: (Optional.) A string name for the operation. If not specified, a
name is generated based on `op_type`.
attrs: (Optional.) A dictionary where the key is the attribute name (a
string) and the value is the respective `attr` attribute of the
`NodeDef` proto that will represent the operation (an `AttrValue`
proto).
op_def: (Optional.) The `OpDef` proto that describes the `op_type` that
the operation will have.
compute_shapes: (Optional.) Deprecated. Has no effect (shapes are always
computed).
compute_device: (Optional.) If True, device functions will be executed
to compute the device property of the Operation.
Returns:
An `Operation` object.
"""
if self.capture_by_value and op_type in ["ReadVariableOp",
"ResourceGather"]:
return self._capture_by_value(
op_type, inputs, dtypes, input_types, name, attrs, op_def,
compute_shapes, compute_device)
# This capturing logic interacts poorly with control flow contexts which
# want to replace inputs of ops far too late in the process. This can lead
# the context to get confused and try to create an Enter for an Enter. We
# can detect this here and skip the additional Enter which can confuse loop
# validation logic.
if op_type == "Enter" and inputs[0].op.type == "Enter":
if inputs[0].op.get_attr("frame_name") == attrs["frame_name"].s:
return inputs[0].op
# Calling AddValue on the control flow contexts to force creation of the
# backward accumulators in the original graph before we create placeholders
# to capture the inputs.
ctxt = ops.get_default_graph()._control_flow_context # pylint: disable=protected-access
for i, inp in enumerate(inputs):
# TPU Estimator defines a control flow context with no AddValue method.
if ctxt is not None and hasattr(ctxt, "AddValue"):
inp = ctxt.AddValue(inp)
inp = self.capture(inp)
inputs[i] = inp
return super(FuncGraph, self).create_op(
op_type, inputs, dtypes, input_types, name, attrs, op_def,
compute_device=compute_device)
def capture(self, tensor, name=None):
"""Captures `tensor` if it's external to this graph.
If `tensor` is from a different graph, returns a placeholder for it.
`tensor` and the placeholder will appear in self.captures, and the
placeholder will appear in self.inputs. Multiple calls to this method with
the same `tensor` argument will return the same placeholder. If `tensor` is
from this graph, returns `tensor`.
Args:
tensor: Tensor. May be from this FuncGraph or a different graph.
name: Optional name if a placeholder is created.
Returns:
Tensor from this FuncGraph.
"""
# Note: _forward_func_graph is currently only set when building the gradient
# graph graph of a defun call. If the backwards graph tries to capture
# tensors those will be captured first in the forward graph. This
# makes sure that any tensor needed by a custom_gradient is correctly
# captured.
if (getattr(tensor, "graph", None) is not self and
hasattr(self, "_forward_func_graph") and
isinstance(self._forward_func_graph, FuncGraph)):
tensor = self._forward_func_graph.capture(tensor)
if isinstance(tensor, ops.EagerTensor):
if name is None:
name = str(ops.uid())
return self._capture_helper(tensor, name)
if tensor.graph is not self:
if name is None:
name = tensor.op.name
inner_graph = tensor.graph
while inner_graph is not None and isinstance(inner_graph, FuncGraph):
if inner_graph is self:
raise ValueError(
"Trying to capture a tensor from an inner function. This can be "
"caused by accessing a tensor defined inside a loop or "
"conditional body, or a subfunction, from a calling function, "
"without going through the proper return value mechanism. "
"Consider using TensorFlow mechanisms such as TensorArrays "
"to return tensors from inner functions or loop / conditional "
"bodies. Tensor: %s; tensor graph: %s; this graph: %s"
% (tensor, tensor.graph, self))
inner_graph = inner_graph.outer_graph
return self._capture_helper(tensor, name)
return tensor
def _capture_helper(self, tensor, name):
captured_tensor = self.captures.get(tensor, None)
if captured_tensor is None:
captured_tensor = _create_substitute_placeholder(tensor, name=name,
dtype=tensor.dtype)
self.captures[tensor] = captured_tensor
self.inputs.append(captured_tensor)
tape.record_operation("captured_value", [captured_tensor], [tensor],
lambda x: [x])
return captured_tensor
@property
def external_captures(self):
"""External tensors captured by this function."""
return list(self.captures.keys())
@property
def internal_captures(self):
"""Placeholders in this function corresponding captured tensors."""
return list(self.captures.values())
def func_graph_from_py_func(name,
python_func,
args,
kwargs,
signature=None,
func_graph=None,
autograph=False,
autograph_options=None,
add_control_dependencies=True,
arg_names=None,
op_return_value=None,
collections=None,
capture_by_value=None,
override_flat_arg_shapes=None):
"""Returns a `FuncGraph` generated from `python_func`.
Args:
name: an identifier for the function.
python_func: the Python function to trace.
args: the positional args with which the Python function should be called;
ignored if a signature is provided.
kwargs: the keyword args with which the Python function should be called;
ignored if a signature is provided.
signature: a possibly nested sequence of `TensorSpecs` specifying the shapes
and dtypes of the arguments. When a signature is provided, `args` and
`kwargs` are ignored, and `python_func` is traced with Tensors conforming
to `signature`. If `None`, the shapes and dtypes are inferred from the
inputs.
func_graph: Optional. An instance of FuncGraph. If provided, we will use
this graph else a new one is built and returned.
autograph: whether to use autograph to compile `python_func`.
See https://www.tensorflow.org/guide/autograph for more information.
autograph_options: additional knobs to control when `autograph=True`.
See https://www.tensorflow.org/guide/autograph for more information.
add_control_dependencies: If True, automatically adds control dependencies
to ensure program order matches execution order and stateful ops always
execute.
arg_names: Optional list of argument names, used to give input placeholders
recognizable names.
op_return_value: Optional. A Tensor. If set and `python_func` returns
Operations, those return values will be replaced with this value. If not
set, returning an Operation triggers an error.
collections: a dictionary of collections this FuncGraph should start
with. If not specified (None), the FuncGraph will read (but not write to)
the outer graph's collections that are not whitelisted, and both
read and write to the outer graph's collections that are whitelisted.
The current whitelisted collections are the global variables, the
local variables, and the trainable variables.
Defaults to None.
capture_by_value: An optional boolean. If True, the func graph will capture
Variables by value instead of reference. By default inherit from outer
graphs, and failing that will default to False.
override_flat_arg_shapes: An optional list of instances that are either
`None` or `TensorShape`. The length must match that of
`nest.flatten((args, kwargs), expand_composites=True)`. The entries
containing value `None` must match entries in flattened arguments
containing non-tensors, while entries containing a `TensorShape` must
match entries in the flattened arguments containing tensors.
Returns:
A FuncGraph.
Raises:
TypeError: If any of `python_func`'s return values is neither `None` nor a
`Tensor`.
ValueError: If both `signature` and `override_flat_arg_shapes` are
passed in.
"""
if op_return_value is not None:
assert isinstance(op_return_value, ops.Tensor), op_return_value
if func_graph is None:
func_graph = FuncGraph(name, collections=collections,
capture_by_value=capture_by_value)
assert isinstance(func_graph, FuncGraph)
if add_control_dependencies:
control_manager = AutomaticControlDependencies()
else:
control_manager = ops.NullContextmanager()
with func_graph.as_default(), control_manager as a:
current_scope = variable_scope.get_variable_scope()
default_use_recource = current_scope.use_resource
current_scope.set_use_resource(True)
if signature is not None and override_flat_arg_shapes is not None:
raise ValueError(
"Passed both signature and override_flat_arg_shapes: %s and %s."
% (signature, override_flat_arg_shapes))
if signature is not None:
args = signature
kwargs = {}
# Creates and names placeholders for all arguments.
if override_flat_arg_shapes is not None:
flat_args = nest.flatten(args, expand_composites=True)
arg_shapes = override_flat_arg_shapes[:len(flat_args)]
kwarg_shapes = override_flat_arg_shapes[len(flat_args):]
else:
arg_shapes = None
kwarg_shapes = None
func_args = _get_defun_inputs_from_args(
args, arg_names, flat_shapes=arg_shapes)
func_kwargs = _get_defun_inputs_from_kwargs(
kwargs, flat_shapes=kwarg_shapes)
# Convert all Tensors into TensorSpecs before saving the structured inputs.
# If storing pure concrete functions that are not called through polymorphic
# functions, we don't have access to FunctionSpec, so we need to call the
# TensorSpecs by their `arg_names` for later binding.
func_graph.structured_input_signature = (
convert_structure_to_signature(func_args, arg_names),
convert_structure_to_signature(func_kwargs))
flat_func_args = nest.flatten(func_args, expand_composites=True)
flat_func_kwargs = nest.flatten(func_kwargs, expand_composites=True)
# Temporarily set inputs to allow graph building code to inspect
# them. Reassigned below.
func_graph.inputs = [arg for arg in flat_func_args + flat_func_kwargs
if isinstance(arg, ops.Tensor)]
# Note: `nest.flatten` sorts by keys, as does `_deterministic_dict_values`.
# Variables to help check whether mutation happens in calling the function
# Copy the recursive list, tuple and map structure, but not base objects
func_args_before = nest.pack_sequence_as(func_args, flat_func_args,
expand_composites=True)
func_kwargs_before = nest.pack_sequence_as(
func_kwargs, flat_func_kwargs, expand_composites=True)
def convert(x):
"""Converts a function output to a Tensor."""
if x is None:
return None
if op_return_value is not None and isinstance(x, ops.Operation):
# TODO(b/79881896): we currently can't capture external control deps, so
# this won't work if x needs to be captured (i.e. if python_func returns
# captured Operations).
with ops.control_dependencies([x]):
x = array_ops.identity(op_return_value)
elif not isinstance(x, tensor_array_ops.TensorArray):
try:
x = ops.convert_to_tensor_or_composite(x)
except (ValueError, TypeError):
raise TypeError(
"To be compatible with tf.contrib.eager.defun, Python functions "
"must return zero or more Tensors; in compilation of %s, found "
"return value of type %s, which is not a Tensor." %
(str(python_func), type(x)))
if add_control_dependencies:
x = a.mark_as_return(x)
return x
try:
if autograph:
from tensorflow.python import autograph # pylint: disable=g-import-not-at-top
_, original_func = tf_decorator.unwrap(python_func)
def wrapper(*args, **kwargs):
# Note: functions annotated with @tf.function should always be
# converted even though they would meet autograph's whitelisting
# criteria.
# If this assumption is ever broken, converted_call will need to
# handle the possibility of original_func still being a shim, e.g.
# bound to WeakrefSelf.
return autograph.converted_call(
original_func, None,
autograph.ConversionOptions(
recursive=True,
optional_features=autograph_options,
force_conversion=True,
), args, kwargs)
# Wrapping around a decorator allows checks like tf_inspect.getargspec
# to be accurate.
converted_func = tf_decorator.make_decorator(original_func, wrapper)
python_func = tf_decorator.rewrap(python_func, original_func,
converted_func)
func_outputs = python_func(*func_args, **func_kwargs)
# invariant: `func_outputs` contains only Tensors, CompositeTensors,
# TensorArrays and `None`s.
func_outputs = nest.map_structure(convert, func_outputs,
expand_composites=True)
check_mutation(func_args_before, func_args)
check_mutation(func_kwargs_before, func_kwargs)
finally:
current_scope.set_use_resource(default_use_recource)
# Variables in `func_args`, `func_kwargs` should be explicit inputs
# to the function, not captured inputs.
graph_variables = list(func_graph._watched_variables) # pylint: disable=protected-access
arg_variables = set()
inputs = []
for arg in (nest.flatten(func_args, expand_composites=True) +
nest.flatten(func_kwargs, expand_composites=True)):
if isinstance(arg, resource_variable_ops.ResourceVariable):
# Even if an argument variable was not used in the function, we've
# already manually captured the resource Tensor when creating argument
# placeholders.
resource_placeholder = func_graph.captures.pop(arg.handle, None)
if resource_placeholder is None:
continue
arg_variables.add(arg)
inputs.append(resource_placeholder)
elif isinstance(arg, ops.Tensor):
inputs.append(arg)
variables = [v for v in graph_variables if v not in arg_variables]
func_graph.inputs = inputs + list(func_graph.captures.values())
func_graph.structured_outputs = func_outputs
# Returning a closed-over tensor does not trigger convert_to_tensor.
func_graph.outputs.extend(
func_graph.capture(x)
for x in flatten(func_graph.structured_outputs)
if x is not None)
func_graph.variables = variables
if add_control_dependencies:
func_graph.control_outputs.extend(control_manager.ops_which_must_run)
# Register any other functions defined in the graph.
with ops.init_scope():
if context.executing_eagerly():
for f in func_graph._functions.values(): # pylint: disable=protected-access
# TODO(ashankar): What about the gradient registry?
context.add_function(f._c_func.func) # pylint: disable=protected-access
return func_graph
def maybe_captured(tensor):
"""If t is a captured value placeholder, returns the original captured value.
Args:
tensor: Tensor.
Returns:
A tensor, potentially from a different Graph/FuncGraph.
"""
if (not isinstance(tensor, ops.EagerTensor) and
tensor.op.graph.building_function and tensor.op.type == "Placeholder"):
for input_t, placeholder_t in tensor.op.graph.captures.items():
if tensor == placeholder_t:
return maybe_captured(input_t)
# pylint: enable=protected-access
return tensor
def device_stack_has_callable(device_stack):
"""Checks whether a device stack contains a callable."""
return any(callable(spec._device_name_or_function) # pylint: disable=protected-access
for spec in device_stack.peek_objs())
def check_mutation(n1, n2):
"""Check if two list of arguments are exactly the same."""
errmsg = ("Function to be traced should not modify structure of input "
"arguments. Check if your function has list and dictionary "
"operations that alter input arguments, "
"such as `list.pop`, `list.append`")
try:
nest.assert_same_structure(n1, n2, expand_composites=True)
except ValueError:
raise ValueError(errmsg)
for arg1, arg2 in zip(nest.flatten(n1, expand_composites=True),
nest.flatten(n2, expand_composites=True)):
if arg1 is not arg2:
raise ValueError(errmsg)
# TODO(edloper): If TensorArray becomes a CompositeTensor, then delete this.
def flatten(sequence):
"""Like nest.flatten w/ expand_composites, but returns flow for TensorArrays.
Args:
sequence: A nested structure of Tensors, CompositeTensors, and
TensorArrays.
Returns:
A list of tensors.
"""
flat_sequence = nest.flatten(sequence, expand_composites=True)
return [
item.flow if isinstance(item, tensor_array_ops.TensorArray) else item
for item in flat_sequence]
# TODO(edloper): If TensorArray becomes a CompositeTensor, then delete this.
def pack_sequence_as(structure, flat_sequence):
"""Like `nest.pack_sequence_as` but also builds TensorArrays from flows.
Args:
structure: The structure to pack into. May contain Tensors,
CompositeTensors, or TensorArrays.
flat_sequence: An iterable containing tensors.
Returns:
A nested structure.
Raises:
AssertionError if `structure` and `flat_sequence` are not compatible.
"""
flat_sequence = list(flat_sequence)
flattened_structure = nest.flatten(structure, expand_composites=True)
if len(flattened_structure) != len(flat_sequence):
raise ValueError("Mismatch in element count")
for i in range(len(flat_sequence)):
if isinstance(flattened_structure[i], tensor_array_ops.TensorArray):
flat_sequence[i] = tensor_array_ops.build_ta_with_new_flow(
old_ta=flattened_structure[i], flow=flat_sequence[i])
return nest.pack_sequence_as(structure, flat_sequence, expand_composites=True)
def _create_substitute_placeholder(value, name=None, dtype=None):
"""Creates a placeholder for `value` and propagates shape info to it."""
# Note: setting ops.control_dependencies(None) ensures we always put
# capturing placeholders outside of any control flow context.
with ops.control_dependencies(None):
placeholder = graph_placeholder(
dtype=dtype or value.dtype, shape=value.shape, name=name)
custom_gradient.copy_handle_data(value, placeholder)
return placeholder
def _get_defun_inputs_from_args(args, names, flat_shapes=None):
"""Maps Python function positional args to graph-construction inputs."""
return _get_defun_inputs(
args, names, structure=args, flat_shapes=flat_shapes)
def _get_defun_inputs(args, names, structure, flat_shapes=None):
"""Maps python function args to graph-construction inputs.
Args:
args: A flat list of user-specified arguments.
names: A list of strings with user-specified argument names, same length as
`args`. May be `None`, in which case a generic name is used.
structure: The original argument list or dictionary.
flat_shapes: A flat list of values that are either `None` or
instances of `TensorShape`. If provided, then length must match
that of `nest.flatten(args, expand_composites=True)`; and locations where
`args` are instances of `Tensor` must have a corresponding `TensorShape`
in `flat_shapes`. May be `None`, in which case exact shapes are read
directly from the args.
Returns:
Placeholders with the same structure as `structure`.
Raises:
RuntimeError: if `flat_shapes` is provided, but
`len(flat_shapes) != len(nest.flatten(args, expand_composites=True))`.
RuntimeError: if a shape from `flat_shapes` is not None
for an argument that is not a `Tensor`, `TensorSpec`,
or `ResourceVariable`.
"""
func_graph = ops.get_default_graph()
function_inputs = []
if names is None:
names = [None] * len(args)
if flat_shapes is None:
shapes_iter = itertools.repeat(None)
else:
len_flat_args = len(nest.flatten(args, expand_composites=True))
if len_flat_args != len(flat_shapes):
raise RuntimeError(
"Length of fully flat shapes (%d) must match that of "
"flatten(args) (%d). args: %s, flat_shapes: %s"
% (len(flat_shapes),
len_flat_args,
args,
flat_shapes))
shapes_iter = iter(flat_shapes)
for arg_value, name in zip(args, names):
flattened = nest.flatten(arg_value, expand_composites=True)
tensor_specs = [
arg for arg in flattened if isinstance(arg, tensor_spec.TensorSpec)
]
specified_names = [arg.name for arg in tensor_specs if arg.name]
if specified_names and len(specified_names) < len(tensor_specs):
raise ValueError("If specifying TensorSpec names for nested structures, "
"either zero or all names have to be specified.")
for arg in flattened:
# We have a shape entry for each arg, regadless of whether it's a real
# Tensor or not. For non-tensor entries it should be None.
shape = next(shapes_iter)
if isinstance(arg, (ops.Tensor, tensor_spec.TensorSpec)):
if isinstance(arg, tensor_spec.TensorSpec) and arg.name:
requested_name = arg.name
else:
requested_name = name
placeholder_shape = shape if shape is not None else arg.shape
try:
placeholder = graph_placeholder(
arg.dtype, placeholder_shape,
name=requested_name)
except ValueError:
# Sometimes parameter names are not valid op names, so fall back to
# unnamed placeholders.
placeholder = graph_placeholder(arg.dtype, placeholder_shape)
if name is not None:
# Record the requested/user-specified name in case it's different than
# the uniquified name, for validation when exporting signatures.
placeholder.op._set_attr( # pylint: disable=protected-access
"_user_specified_name",
attr_value_pb2.AttrValue(s=compat.as_bytes(requested_name)))
function_inputs.append(placeholder)
elif isinstance(arg, resource_variable_ops.ResourceVariable):
# Capture arg variables to create placeholders for them. These will be
# removed as captures after the function is traced (since otherwise we'd
# just add it back with a new placeholder when the variable was
# referenced).
placeholder = func_graph.capture(arg.handle, name=name)
placeholder.op._set_attr( # pylint: disable=protected-access
"_user_specified_name",
attr_value_pb2.AttrValue(s=compat.as_bytes(name)))
function_inputs.append(arg)
else:
if shape is not None:
raise RuntimeError(
"Expected provided shape override to be None for arg that isn't "
"a Tensor, but saw arg: '%s', shape: '%s'. args: %s"
% (arg, shape, args))
function_inputs.append(arg)
return nest.pack_sequence_as(structure, function_inputs,
expand_composites=True)
def _get_defun_inputs_from_kwargs(kwargs, flat_shapes):
"""Maps Python function keyword args to graph-construction inputs."""
if kwargs:
names, args = zip(*sorted(kwargs.items()))
else:
names = []
args = []
return _get_defun_inputs(
args, names, structure=kwargs, flat_shapes=flat_shapes)
def dismantle_func_graph(func_graph):
"""Removes reference cycles in `func_graph` FuncGraph.
Helpful for making sure the garbage collector doesn't need to run when
the FuncGraph goes out of scope, e.g. in tests using defun with
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True).
Args:
func_graph: A `FuncGraph` object to destroy. `func_graph` is unusable
after this function.
"""
# TODO(b/115366440): Delete this method when a custom OrderedDict is added.
# Clearing captures using clear() leaves some cycles around.
while func_graph.captures:
func_graph.captures.popitem()
memory.dismantle_ordered_dict(func_graph.captures)
ops.dismantle_graph(func_graph)
| 42.720766 | 97 | 0.697704 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections as py_collections
import itertools
import weakref
from tensorflow.core.framework import attr_value_pb2
from tensorflow.python.eager import context
from tensorflow.python.eager import execute
from tensorflow.python.eager import tape
from tensorflow.python.eager.graph_only_ops import graph_placeholder
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework.auto_control_deps import AutomaticControlDependencies
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import custom_gradient
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.util import compat
from tensorflow.python.util import memory
from tensorflow.python.util import nest
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util import tf_decorator
from tensorflow.python.util.lazy_loader import LazyLoader
function = LazyLoader("function", globals(),
"tensorflow.python.eager.function")
def_function = LazyLoader(
"def_function", globals(),
"tensorflow.python.eager.def_function")
WHITELIST_COLLECTIONS = [
ops.GraphKeys.GLOBAL_VARIABLES,
ops.GraphKeys.LOCAL_VARIABLES,
ops.GraphKeys.TRAINABLE_VARIABLES,
variable_scope._VARSTORE_KEY,
variable_scope._VARSCOPESTORE_KEY
]
class UnknownArgument(object):
pass
def convert_structure_to_signature(structure, arg_names=None):
structure = composite_tensor.replace_composites_with_components(structure)
def encode_arg(arg, path):
if isinstance(arg, ops.Tensor):
user_specified_name = None
try:
user_specified_name = compat.as_str(
arg.op.get_attr("_user_specified_name"))
except ValueError:
pass
if path and user_specified_name and user_specified_name != path[0]:
name = user_specified_name
else:
name = "/".join([str(p) for p in path])
return tensor_spec.TensorSpec(arg.shape, arg.dtype, name)
if isinstance(arg, (
int,
float,
bool,
type(None),
dtypes.DType,
tensor_spec.TensorSpec,
)):
return arg
return UnknownArgument()
flattened = nest.flatten_with_tuple_paths(structure, expand_composites=True)
if arg_names:
if len(arg_names) != len(structure):
raise ValueError(
"Passed in arg_names don't match actual signature (%s)." % arg_names)
# Replace all top-level names with their actual arg_names. If a path before
# was "(2,'a',1)", it will become "(arg_names[2],'a',1)".
flattened = [
((arg_names[path[0]],) + path[1:], arg) for path, arg in flattened
]
mapped = [encode_arg(arg, path) for path, arg in flattened]
return nest.pack_sequence_as(structure, mapped, expand_composites=True)
class FuncGraph(ops.Graph):
def __init__(self, name, collections=None, capture_by_value=None):
super(FuncGraph, self).__init__()
self.name = name
self.inputs = []
self.outputs = []
self.control_outputs = []
self.control_captures = set()
self.structured_input_signature = None
self.structured_outputs = None
self._weak_variables = []
self._watched_variables = weakref.WeakSet()
self.outer_graph = ops.get_default_graph()
self.captures = py_collections.OrderedDict()
# Inherit capture-by-value from outer graph.
if capture_by_value is not None:
self.capture_by_value = capture_by_value
elif self.outer_graph is not None and isinstance(
self.outer_graph, FuncGraph):
self.capture_by_value = self.outer_graph.capture_by_value
else:
self.capture_by_value = False
self._building_function = True
# Map from resource tensor name to last op (in program order) which uses
# this tensor. Used to enforce that execution order matches program order
# for resource tensors.
self._last_op_using_resource_tensor = {}
graph = self.outer_graph
if context.executing_eagerly():
self.seed = context.global_seed()
# [for tf-data user migration from TF1.0 to 2.0] seed_used keep track of
# any None op_seed for random_op in the function, in which case we end up
# using function seed, which could be unintended behavior for the op.
self._seed_used = False
else:
self.seed = graph.seed
self._seed_used = False
# TODO(allenl): Figure out if we can remove colocation stack
# specialization (currently used in cond_v2), here and in the cache key.
self._colocation_stack = graph._colocation_stack.copy() # pylint: disable=protected-access
if collections is None:
for collection_name in graph.get_all_collection_keys():
if collection_name not in WHITELIST_COLLECTIONS:
self._collections[collection_name] = graph.get_collection(
collection_name)
for collection_name in WHITELIST_COLLECTIONS:
self._collections[collection_name] = graph.get_collection_ref(
collection_name)
else:
self._collections = collections
def __str__(self):
return "FuncGraph(name=%s, id=%s)" % (self.name, id(self))
def watch_variable(self, v):
while self is not None and isinstance(self, FuncGraph):
self._watched_variables.add(v)
self = self.outer_graph
def control_dependencies(self, control_inputs):
if control_inputs is None:
return super(FuncGraph, self).control_dependencies(control_inputs)
filtered_control_inputs = []
for c in control_inputs:
# Check for _UnreadVariable
if (isinstance(c, ops.IndexedSlices) or
(hasattr(c, "_handle") and hasattr(c, "op"))):
c = c.op
graph_element = ops._as_graph_element(c) # pylint: disable=protected-access
if graph_element is None:
graph_element = c
if graph_element is not None and getattr(
graph_element, "graph", None) is not self:
self.control_captures.add(graph_element)
else:
filtered_control_inputs.append(graph_element)
return super(FuncGraph, self).control_dependencies(filtered_control_inputs)
def as_default(self):
outer_cm = super(FuncGraph, self).as_default()
@tf_contextlib.contextmanager
def inner_cm():
graph = ops.get_default_graph()
# pylint: disable=protected-access
# TODO(b/112906995, nareshmodi): distribution strategy depends on
# inheriting this stack from the default graph even in eager mode. Maybe
# it should be part of the eager context? This would also allow us to
# remove a get_default_graph() call from the function cache lookup.
old_strategy_stack = self._distribution_strategy_stack
self._distribution_strategy_stack = list(
graph._distribution_strategy_stack)
# We ignore device placements from any outer scopes while tracing the
# function when possible, to avoid hard-coding them in the function
# graph. "Default" placements come from the PartitionedCallOp's placement,
old_device_stack = self._device_function_stack
if context.executing_eagerly():
if self._distribution_strategy_stack:
self._add_device_to_stack(context.context().device_name)
else:
if (self._distribution_strategy_stack
or device_stack_has_callable(graph._device_function_stack)):
self._device_function_stack = graph._device_function_stack.copy()
old_creator_stack = self._variable_creator_stack
self._variable_creator_stack = graph._variable_creator_stack
old_graph_key = self._graph_key
self._graph_key = graph._graph_key
with outer_cm as g:
try:
yield g
finally:
self._distribution_strategy_stack = old_strategy_stack
self._device_function_stack = old_device_stack
self._variable_creator_stack = old_creator_stack
self._graph_key = old_graph_key
return inner_cm()
@property
def output_types(self):
return [t.dtype for t in self.outputs]
@property
def output_shapes(self):
return [t.shape for t in self.outputs]
@property
def variables(self):
for weak_v in self._weak_variables:
v = weak_v()
if v is None:
raise AssertionError(
"Called a function referencing variables which have been deleted. "
"This likely means that function-local variables were created and "
"not referenced elsewhere in the program. This is generally a "
"mistake; consider storing variables in an object attribute on "
"first call.")
yield v
@variables.setter
def variables(self, var_list):
self._weak_variables = [weakref.ref(v) for v in var_list]
def _capture_by_value(
self,
op_type,
inputs,
dtypes,
input_types=None,
name=None,
attrs=None,
op_def=None,
compute_shapes=True,
compute_device=True):
reverse_captures = dict((v, k) for k, v in self.captures.items())
uncaptured_inputs = [reverse_captures.get(t, t) for t in inputs]
with ops.init_scope():
if context.executing_eagerly():
attr_list = ("dtype", int(attrs["dtype"].type))
value, = execute.execute(
compat.as_bytes(op_type), 1, uncaptured_inputs, attr_list,
context.context())
else:
op = ops.get_default_graph().create_op(
op_type, uncaptured_inputs, dtypes, input_types, name, attrs,
op_def, compute_shapes, compute_device)
value = op.outputs[0]
captured_value = self.capture(value)
return captured_value.op
def create_op(
self,
op_type,
inputs,
dtypes=None,
input_types=None,
name=None,
attrs=None,
op_def=None,
compute_shapes=True,
compute_device=True):
if self.capture_by_value and op_type in ["ReadVariableOp",
"ResourceGather"]:
return self._capture_by_value(
op_type, inputs, dtypes, input_types, name, attrs, op_def,
compute_shapes, compute_device)
if op_type == "Enter" and inputs[0].op.type == "Enter":
if inputs[0].op.get_attr("frame_name") == attrs["frame_name"].s:
return inputs[0].op
ctxt = ops.get_default_graph()._control_flow_context
for i, inp in enumerate(inputs):
if ctxt is not None and hasattr(ctxt, "AddValue"):
inp = ctxt.AddValue(inp)
inp = self.capture(inp)
inputs[i] = inp
return super(FuncGraph, self).create_op(
op_type, inputs, dtypes, input_types, name, attrs, op_def,
compute_device=compute_device)
def capture(self, tensor, name=None):
if (getattr(tensor, "graph", None) is not self and
hasattr(self, "_forward_func_graph") and
isinstance(self._forward_func_graph, FuncGraph)):
tensor = self._forward_func_graph.capture(tensor)
if isinstance(tensor, ops.EagerTensor):
if name is None:
name = str(ops.uid())
return self._capture_helper(tensor, name)
if tensor.graph is not self:
if name is None:
name = tensor.op.name
inner_graph = tensor.graph
while inner_graph is not None and isinstance(inner_graph, FuncGraph):
if inner_graph is self:
raise ValueError(
"Trying to capture a tensor from an inner function. This can be "
"caused by accessing a tensor defined inside a loop or "
"conditional body, or a subfunction, from a calling function, "
"without going through the proper return value mechanism. "
"Consider using TensorFlow mechanisms such as TensorArrays "
"to return tensors from inner functions or loop / conditional "
"bodies. Tensor: %s; tensor graph: %s; this graph: %s"
% (tensor, tensor.graph, self))
inner_graph = inner_graph.outer_graph
return self._capture_helper(tensor, name)
return tensor
def _capture_helper(self, tensor, name):
captured_tensor = self.captures.get(tensor, None)
if captured_tensor is None:
captured_tensor = _create_substitute_placeholder(tensor, name=name,
dtype=tensor.dtype)
self.captures[tensor] = captured_tensor
self.inputs.append(captured_tensor)
tape.record_operation("captured_value", [captured_tensor], [tensor],
lambda x: [x])
return captured_tensor
@property
def external_captures(self):
return list(self.captures.keys())
@property
def internal_captures(self):
return list(self.captures.values())
def func_graph_from_py_func(name,
python_func,
args,
kwargs,
signature=None,
func_graph=None,
autograph=False,
autograph_options=None,
add_control_dependencies=True,
arg_names=None,
op_return_value=None,
collections=None,
capture_by_value=None,
override_flat_arg_shapes=None):
if op_return_value is not None:
assert isinstance(op_return_value, ops.Tensor), op_return_value
if func_graph is None:
func_graph = FuncGraph(name, collections=collections,
capture_by_value=capture_by_value)
assert isinstance(func_graph, FuncGraph)
if add_control_dependencies:
control_manager = AutomaticControlDependencies()
else:
control_manager = ops.NullContextmanager()
with func_graph.as_default(), control_manager as a:
current_scope = variable_scope.get_variable_scope()
default_use_recource = current_scope.use_resource
current_scope.set_use_resource(True)
if signature is not None and override_flat_arg_shapes is not None:
raise ValueError(
"Passed both signature and override_flat_arg_shapes: %s and %s."
% (signature, override_flat_arg_shapes))
if signature is not None:
args = signature
kwargs = {}
if override_flat_arg_shapes is not None:
flat_args = nest.flatten(args, expand_composites=True)
arg_shapes = override_flat_arg_shapes[:len(flat_args)]
kwarg_shapes = override_flat_arg_shapes[len(flat_args):]
else:
arg_shapes = None
kwarg_shapes = None
func_args = _get_defun_inputs_from_args(
args, arg_names, flat_shapes=arg_shapes)
func_kwargs = _get_defun_inputs_from_kwargs(
kwargs, flat_shapes=kwarg_shapes)
# TensorSpecs by their `arg_names` for later binding.
func_graph.structured_input_signature = (
convert_structure_to_signature(func_args, arg_names),
convert_structure_to_signature(func_kwargs))
flat_func_args = nest.flatten(func_args, expand_composites=True)
flat_func_kwargs = nest.flatten(func_kwargs, expand_composites=True)
# Temporarily set inputs to allow graph building code to inspect
# them. Reassigned below.
func_graph.inputs = [arg for arg in flat_func_args + flat_func_kwargs
if isinstance(arg, ops.Tensor)]
# Note: `nest.flatten` sorts by keys, as does `_deterministic_dict_values`.
# Variables to help check whether mutation happens in calling the function
# Copy the recursive list, tuple and map structure, but not base objects
func_args_before = nest.pack_sequence_as(func_args, flat_func_args,
expand_composites=True)
func_kwargs_before = nest.pack_sequence_as(
func_kwargs, flat_func_kwargs, expand_composites=True)
def convert(x):
if x is None:
return None
if op_return_value is not None and isinstance(x, ops.Operation):
# TODO(b/79881896): we currently can't capture external control deps, so
# captured Operations).
with ops.control_dependencies([x]):
x = array_ops.identity(op_return_value)
elif not isinstance(x, tensor_array_ops.TensorArray):
try:
x = ops.convert_to_tensor_or_composite(x)
except (ValueError, TypeError):
raise TypeError(
"To be compatible with tf.contrib.eager.defun, Python functions "
"must return zero or more Tensors; in compilation of %s, found "
"return value of type %s, which is not a Tensor." %
(str(python_func), type(x)))
if add_control_dependencies:
x = a.mark_as_return(x)
return x
try:
if autograph:
from tensorflow.python import autograph # pylint: disable=g-import-not-at-top
_, original_func = tf_decorator.unwrap(python_func)
def wrapper(*args, **kwargs):
# Note: functions annotated with @tf.function should always be
# converted even though they would meet autograph's whitelisting
return autograph.converted_call(
original_func, None,
autograph.ConversionOptions(
recursive=True,
optional_features=autograph_options,
force_conversion=True,
), args, kwargs)
converted_func = tf_decorator.make_decorator(original_func, wrapper)
python_func = tf_decorator.rewrap(python_func, original_func,
converted_func)
func_outputs = python_func(*func_args, **func_kwargs)
func_outputs = nest.map_structure(convert, func_outputs,
expand_composites=True)
check_mutation(func_args_before, func_args)
check_mutation(func_kwargs_before, func_kwargs)
finally:
current_scope.set_use_resource(default_use_recource)
graph_variables = list(func_graph._watched_variables)
arg_variables = set()
inputs = []
for arg in (nest.flatten(func_args, expand_composites=True) +
nest.flatten(func_kwargs, expand_composites=True)):
if isinstance(arg, resource_variable_ops.ResourceVariable):
# already manually captured the resource Tensor when creating argument
# placeholders.
resource_placeholder = func_graph.captures.pop(arg.handle, None)
if resource_placeholder is None:
continue
arg_variables.add(arg)
inputs.append(resource_placeholder)
elif isinstance(arg, ops.Tensor):
inputs.append(arg)
variables = [v for v in graph_variables if v not in arg_variables]
func_graph.inputs = inputs + list(func_graph.captures.values())
func_graph.structured_outputs = func_outputs
# Returning a closed-over tensor does not trigger convert_to_tensor.
func_graph.outputs.extend(
func_graph.capture(x)
for x in flatten(func_graph.structured_outputs)
if x is not None)
func_graph.variables = variables
if add_control_dependencies:
func_graph.control_outputs.extend(control_manager.ops_which_must_run)
# Register any other functions defined in the graph.
with ops.init_scope():
if context.executing_eagerly():
for f in func_graph._functions.values(): # pylint: disable=protected-access
# TODO(ashankar): What about the gradient registry?
context.add_function(f._c_func.func) # pylint: disable=protected-access
return func_graph
def maybe_captured(tensor):
if (not isinstance(tensor, ops.EagerTensor) and
tensor.op.graph.building_function and tensor.op.type == "Placeholder"):
for input_t, placeholder_t in tensor.op.graph.captures.items():
if tensor == placeholder_t:
return maybe_captured(input_t)
# pylint: enable=protected-access
return tensor
def device_stack_has_callable(device_stack):
return any(callable(spec._device_name_or_function) # pylint: disable=protected-access
for spec in device_stack.peek_objs())
def check_mutation(n1, n2):
errmsg = ("Function to be traced should not modify structure of input "
"arguments. Check if your function has list and dictionary "
"operations that alter input arguments, "
"such as `list.pop`, `list.append`")
try:
nest.assert_same_structure(n1, n2, expand_composites=True)
except ValueError:
raise ValueError(errmsg)
for arg1, arg2 in zip(nest.flatten(n1, expand_composites=True),
nest.flatten(n2, expand_composites=True)):
if arg1 is not arg2:
raise ValueError(errmsg)
# TODO(edloper): If TensorArray becomes a CompositeTensor, then delete this.
def flatten(sequence):
flat_sequence = nest.flatten(sequence, expand_composites=True)
return [
item.flow if isinstance(item, tensor_array_ops.TensorArray) else item
for item in flat_sequence]
# TODO(edloper): If TensorArray becomes a CompositeTensor, then delete this.
def pack_sequence_as(structure, flat_sequence):
flat_sequence = list(flat_sequence)
flattened_structure = nest.flatten(structure, expand_composites=True)
if len(flattened_structure) != len(flat_sequence):
raise ValueError("Mismatch in element count")
for i in range(len(flat_sequence)):
if isinstance(flattened_structure[i], tensor_array_ops.TensorArray):
flat_sequence[i] = tensor_array_ops.build_ta_with_new_flow(
old_ta=flattened_structure[i], flow=flat_sequence[i])
return nest.pack_sequence_as(structure, flat_sequence, expand_composites=True)
def _create_substitute_placeholder(value, name=None, dtype=None):
# Note: setting ops.control_dependencies(None) ensures we always put
# capturing placeholders outside of any control flow context.
with ops.control_dependencies(None):
placeholder = graph_placeholder(
dtype=dtype or value.dtype, shape=value.shape, name=name)
custom_gradient.copy_handle_data(value, placeholder)
return placeholder
def _get_defun_inputs_from_args(args, names, flat_shapes=None):
return _get_defun_inputs(
args, names, structure=args, flat_shapes=flat_shapes)
def _get_defun_inputs(args, names, structure, flat_shapes=None):
func_graph = ops.get_default_graph()
function_inputs = []
if names is None:
names = [None] * len(args)
if flat_shapes is None:
shapes_iter = itertools.repeat(None)
else:
len_flat_args = len(nest.flatten(args, expand_composites=True))
if len_flat_args != len(flat_shapes):
raise RuntimeError(
"Length of fully flat shapes (%d) must match that of "
"flatten(args) (%d). args: %s, flat_shapes: %s"
% (len(flat_shapes),
len_flat_args,
args,
flat_shapes))
shapes_iter = iter(flat_shapes)
for arg_value, name in zip(args, names):
flattened = nest.flatten(arg_value, expand_composites=True)
tensor_specs = [
arg for arg in flattened if isinstance(arg, tensor_spec.TensorSpec)
]
specified_names = [arg.name for arg in tensor_specs if arg.name]
if specified_names and len(specified_names) < len(tensor_specs):
raise ValueError("If specifying TensorSpec names for nested structures, "
"either zero or all names have to be specified.")
for arg in flattened:
# We have a shape entry for each arg, regadless of whether it's a real
shape = next(shapes_iter)
if isinstance(arg, (ops.Tensor, tensor_spec.TensorSpec)):
if isinstance(arg, tensor_spec.TensorSpec) and arg.name:
requested_name = arg.name
else:
requested_name = name
placeholder_shape = shape if shape is not None else arg.shape
try:
placeholder = graph_placeholder(
arg.dtype, placeholder_shape,
name=requested_name)
except ValueError:
placeholder = graph_placeholder(arg.dtype, placeholder_shape)
if name is not None:
# the uniquified name, for validation when exporting signatures.
placeholder.op._set_attr( # pylint: disable=protected-access
"_user_specified_name",
attr_value_pb2.AttrValue(s=compat.as_bytes(requested_name)))
function_inputs.append(placeholder)
elif isinstance(arg, resource_variable_ops.ResourceVariable):
# Capture arg variables to create placeholders for them. These will be
# removed as captures after the function is traced (since otherwise we'd
placeholder = func_graph.capture(arg.handle, name=name)
placeholder.op._set_attr(
"_user_specified_name",
attr_value_pb2.AttrValue(s=compat.as_bytes(name)))
function_inputs.append(arg)
else:
if shape is not None:
raise RuntimeError(
"Expected provided shape override to be None for arg that isn't "
"a Tensor, but saw arg: '%s', shape: '%s'. args: %s"
% (arg, shape, args))
function_inputs.append(arg)
return nest.pack_sequence_as(structure, function_inputs,
expand_composites=True)
def _get_defun_inputs_from_kwargs(kwargs, flat_shapes):
if kwargs:
names, args = zip(*sorted(kwargs.items()))
else:
names = []
args = []
return _get_defun_inputs(
args, names, structure=kwargs, flat_shapes=flat_shapes)
def dismantle_func_graph(func_graph):
# TODO(b/115366440): Delete this method when a custom OrderedDict is added.
# Clearing captures using clear() leaves some cycles around.
while func_graph.captures:
func_graph.captures.popitem()
memory.dismantle_ordered_dict(func_graph.captures)
ops.dismantle_graph(func_graph)
| true | true |
1c478522810cfe82e7a178b902b41a16a8504685 | 14,006 | py | Python | sdk/python/pulumi_azure_native/azurestackhci/v20210101preview/get_cluster.py | polivbr/pulumi-azure-native | 09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/azurestackhci/v20210101preview/get_cluster.py | polivbr/pulumi-azure-native | 09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/azurestackhci/v20210101preview/get_cluster.py | polivbr/pulumi-azure-native | 09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetClusterResult',
'AwaitableGetClusterResult',
'get_cluster',
]
@pulumi.output_type
class GetClusterResult:
"""
Cluster details.
"""
def __init__(__self__, aad_client_id=None, aad_tenant_id=None, billing_model=None, cloud_id=None, cloud_management_endpoint=None, created_at=None, created_by=None, created_by_type=None, id=None, last_billing_timestamp=None, last_modified_at=None, last_modified_by=None, last_modified_by_type=None, last_sync_timestamp=None, location=None, name=None, provisioning_state=None, registration_timestamp=None, reported_properties=None, status=None, tags=None, trial_days_remaining=None, type=None):
if aad_client_id and not isinstance(aad_client_id, str):
raise TypeError("Expected argument 'aad_client_id' to be a str")
pulumi.set(__self__, "aad_client_id", aad_client_id)
if aad_tenant_id and not isinstance(aad_tenant_id, str):
raise TypeError("Expected argument 'aad_tenant_id' to be a str")
pulumi.set(__self__, "aad_tenant_id", aad_tenant_id)
if billing_model and not isinstance(billing_model, str):
raise TypeError("Expected argument 'billing_model' to be a str")
pulumi.set(__self__, "billing_model", billing_model)
if cloud_id and not isinstance(cloud_id, str):
raise TypeError("Expected argument 'cloud_id' to be a str")
pulumi.set(__self__, "cloud_id", cloud_id)
if cloud_management_endpoint and not isinstance(cloud_management_endpoint, str):
raise TypeError("Expected argument 'cloud_management_endpoint' to be a str")
pulumi.set(__self__, "cloud_management_endpoint", cloud_management_endpoint)
if created_at and not isinstance(created_at, str):
raise TypeError("Expected argument 'created_at' to be a str")
pulumi.set(__self__, "created_at", created_at)
if created_by and not isinstance(created_by, str):
raise TypeError("Expected argument 'created_by' to be a str")
pulumi.set(__self__, "created_by", created_by)
if created_by_type and not isinstance(created_by_type, str):
raise TypeError("Expected argument 'created_by_type' to be a str")
pulumi.set(__self__, "created_by_type", created_by_type)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if last_billing_timestamp and not isinstance(last_billing_timestamp, str):
raise TypeError("Expected argument 'last_billing_timestamp' to be a str")
pulumi.set(__self__, "last_billing_timestamp", last_billing_timestamp)
if last_modified_at and not isinstance(last_modified_at, str):
raise TypeError("Expected argument 'last_modified_at' to be a str")
pulumi.set(__self__, "last_modified_at", last_modified_at)
if last_modified_by and not isinstance(last_modified_by, str):
raise TypeError("Expected argument 'last_modified_by' to be a str")
pulumi.set(__self__, "last_modified_by", last_modified_by)
if last_modified_by_type and not isinstance(last_modified_by_type, str):
raise TypeError("Expected argument 'last_modified_by_type' to be a str")
pulumi.set(__self__, "last_modified_by_type", last_modified_by_type)
if last_sync_timestamp and not isinstance(last_sync_timestamp, str):
raise TypeError("Expected argument 'last_sync_timestamp' to be a str")
pulumi.set(__self__, "last_sync_timestamp", last_sync_timestamp)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if registration_timestamp and not isinstance(registration_timestamp, str):
raise TypeError("Expected argument 'registration_timestamp' to be a str")
pulumi.set(__self__, "registration_timestamp", registration_timestamp)
if reported_properties and not isinstance(reported_properties, dict):
raise TypeError("Expected argument 'reported_properties' to be a dict")
pulumi.set(__self__, "reported_properties", reported_properties)
if status and not isinstance(status, str):
raise TypeError("Expected argument 'status' to be a str")
pulumi.set(__self__, "status", status)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if trial_days_remaining and not isinstance(trial_days_remaining, float):
raise TypeError("Expected argument 'trial_days_remaining' to be a float")
pulumi.set(__self__, "trial_days_remaining", trial_days_remaining)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="aadClientId")
def aad_client_id(self) -> str:
"""
App id of cluster AAD identity.
"""
return pulumi.get(self, "aad_client_id")
@property
@pulumi.getter(name="aadTenantId")
def aad_tenant_id(self) -> str:
"""
Tenant id of cluster AAD identity.
"""
return pulumi.get(self, "aad_tenant_id")
@property
@pulumi.getter(name="billingModel")
def billing_model(self) -> str:
"""
Type of billing applied to the resource.
"""
return pulumi.get(self, "billing_model")
@property
@pulumi.getter(name="cloudId")
def cloud_id(self) -> str:
"""
Unique, immutable resource id.
"""
return pulumi.get(self, "cloud_id")
@property
@pulumi.getter(name="cloudManagementEndpoint")
def cloud_management_endpoint(self) -> Optional[str]:
"""
Endpoint configured for management from the Azure portal
"""
return pulumi.get(self, "cloud_management_endpoint")
@property
@pulumi.getter(name="createdAt")
def created_at(self) -> Optional[str]:
"""
The timestamp of resource creation (UTC).
"""
return pulumi.get(self, "created_at")
@property
@pulumi.getter(name="createdBy")
def created_by(self) -> Optional[str]:
"""
The identity that created the resource.
"""
return pulumi.get(self, "created_by")
@property
@pulumi.getter(name="createdByType")
def created_by_type(self) -> Optional[str]:
"""
The type of identity that created the resource.
"""
return pulumi.get(self, "created_by_type")
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="lastBillingTimestamp")
def last_billing_timestamp(self) -> str:
"""
Most recent billing meter timestamp.
"""
return pulumi.get(self, "last_billing_timestamp")
@property
@pulumi.getter(name="lastModifiedAt")
def last_modified_at(self) -> Optional[str]:
"""
The timestamp of resource last modification (UTC)
"""
return pulumi.get(self, "last_modified_at")
@property
@pulumi.getter(name="lastModifiedBy")
def last_modified_by(self) -> Optional[str]:
"""
The identity that last modified the resource.
"""
return pulumi.get(self, "last_modified_by")
@property
@pulumi.getter(name="lastModifiedByType")
def last_modified_by_type(self) -> Optional[str]:
"""
The type of identity that last modified the resource.
"""
return pulumi.get(self, "last_modified_by_type")
@property
@pulumi.getter(name="lastSyncTimestamp")
def last_sync_timestamp(self) -> str:
"""
Most recent cluster sync timestamp.
"""
return pulumi.get(self, "last_sync_timestamp")
@property
@pulumi.getter
def location(self) -> str:
"""
The geo-location where the resource lives
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
Provisioning state.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="registrationTimestamp")
def registration_timestamp(self) -> str:
"""
First cluster sync timestamp.
"""
return pulumi.get(self, "registration_timestamp")
@property
@pulumi.getter(name="reportedProperties")
def reported_properties(self) -> 'outputs.ClusterReportedPropertiesResponse':
"""
Properties reported by cluster agent.
"""
return pulumi.get(self, "reported_properties")
@property
@pulumi.getter
def status(self) -> str:
"""
Status of the cluster agent.
"""
return pulumi.get(self, "status")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="trialDaysRemaining")
def trial_days_remaining(self) -> float:
"""
Number of days remaining in the trial period.
"""
return pulumi.get(self, "trial_days_remaining")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetClusterResult(GetClusterResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetClusterResult(
aad_client_id=self.aad_client_id,
aad_tenant_id=self.aad_tenant_id,
billing_model=self.billing_model,
cloud_id=self.cloud_id,
cloud_management_endpoint=self.cloud_management_endpoint,
created_at=self.created_at,
created_by=self.created_by,
created_by_type=self.created_by_type,
id=self.id,
last_billing_timestamp=self.last_billing_timestamp,
last_modified_at=self.last_modified_at,
last_modified_by=self.last_modified_by,
last_modified_by_type=self.last_modified_by_type,
last_sync_timestamp=self.last_sync_timestamp,
location=self.location,
name=self.name,
provisioning_state=self.provisioning_state,
registration_timestamp=self.registration_timestamp,
reported_properties=self.reported_properties,
status=self.status,
tags=self.tags,
trial_days_remaining=self.trial_days_remaining,
type=self.type)
def get_cluster(cluster_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetClusterResult:
"""
Cluster details.
:param str cluster_name: The name of the cluster.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
"""
__args__ = dict()
__args__['clusterName'] = cluster_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:azurestackhci/v20210101preview:getCluster', __args__, opts=opts, typ=GetClusterResult).value
return AwaitableGetClusterResult(
aad_client_id=__ret__.aad_client_id,
aad_tenant_id=__ret__.aad_tenant_id,
billing_model=__ret__.billing_model,
cloud_id=__ret__.cloud_id,
cloud_management_endpoint=__ret__.cloud_management_endpoint,
created_at=__ret__.created_at,
created_by=__ret__.created_by,
created_by_type=__ret__.created_by_type,
id=__ret__.id,
last_billing_timestamp=__ret__.last_billing_timestamp,
last_modified_at=__ret__.last_modified_at,
last_modified_by=__ret__.last_modified_by,
last_modified_by_type=__ret__.last_modified_by_type,
last_sync_timestamp=__ret__.last_sync_timestamp,
location=__ret__.location,
name=__ret__.name,
provisioning_state=__ret__.provisioning_state,
registration_timestamp=__ret__.registration_timestamp,
reported_properties=__ret__.reported_properties,
status=__ret__.status,
tags=__ret__.tags,
trial_days_remaining=__ret__.trial_days_remaining,
type=__ret__.type)
| 39.677054 | 496 | 0.667214 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetClusterResult',
'AwaitableGetClusterResult',
'get_cluster',
]
@pulumi.output_type
class GetClusterResult:
def __init__(__self__, aad_client_id=None, aad_tenant_id=None, billing_model=None, cloud_id=None, cloud_management_endpoint=None, created_at=None, created_by=None, created_by_type=None, id=None, last_billing_timestamp=None, last_modified_at=None, last_modified_by=None, last_modified_by_type=None, last_sync_timestamp=None, location=None, name=None, provisioning_state=None, registration_timestamp=None, reported_properties=None, status=None, tags=None, trial_days_remaining=None, type=None):
if aad_client_id and not isinstance(aad_client_id, str):
raise TypeError("Expected argument 'aad_client_id' to be a str")
pulumi.set(__self__, "aad_client_id", aad_client_id)
if aad_tenant_id and not isinstance(aad_tenant_id, str):
raise TypeError("Expected argument 'aad_tenant_id' to be a str")
pulumi.set(__self__, "aad_tenant_id", aad_tenant_id)
if billing_model and not isinstance(billing_model, str):
raise TypeError("Expected argument 'billing_model' to be a str")
pulumi.set(__self__, "billing_model", billing_model)
if cloud_id and not isinstance(cloud_id, str):
raise TypeError("Expected argument 'cloud_id' to be a str")
pulumi.set(__self__, "cloud_id", cloud_id)
if cloud_management_endpoint and not isinstance(cloud_management_endpoint, str):
raise TypeError("Expected argument 'cloud_management_endpoint' to be a str")
pulumi.set(__self__, "cloud_management_endpoint", cloud_management_endpoint)
if created_at and not isinstance(created_at, str):
raise TypeError("Expected argument 'created_at' to be a str")
pulumi.set(__self__, "created_at", created_at)
if created_by and not isinstance(created_by, str):
raise TypeError("Expected argument 'created_by' to be a str")
pulumi.set(__self__, "created_by", created_by)
if created_by_type and not isinstance(created_by_type, str):
raise TypeError("Expected argument 'created_by_type' to be a str")
pulumi.set(__self__, "created_by_type", created_by_type)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if last_billing_timestamp and not isinstance(last_billing_timestamp, str):
raise TypeError("Expected argument 'last_billing_timestamp' to be a str")
pulumi.set(__self__, "last_billing_timestamp", last_billing_timestamp)
if last_modified_at and not isinstance(last_modified_at, str):
raise TypeError("Expected argument 'last_modified_at' to be a str")
pulumi.set(__self__, "last_modified_at", last_modified_at)
if last_modified_by and not isinstance(last_modified_by, str):
raise TypeError("Expected argument 'last_modified_by' to be a str")
pulumi.set(__self__, "last_modified_by", last_modified_by)
if last_modified_by_type and not isinstance(last_modified_by_type, str):
raise TypeError("Expected argument 'last_modified_by_type' to be a str")
pulumi.set(__self__, "last_modified_by_type", last_modified_by_type)
if last_sync_timestamp and not isinstance(last_sync_timestamp, str):
raise TypeError("Expected argument 'last_sync_timestamp' to be a str")
pulumi.set(__self__, "last_sync_timestamp", last_sync_timestamp)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if registration_timestamp and not isinstance(registration_timestamp, str):
raise TypeError("Expected argument 'registration_timestamp' to be a str")
pulumi.set(__self__, "registration_timestamp", registration_timestamp)
if reported_properties and not isinstance(reported_properties, dict):
raise TypeError("Expected argument 'reported_properties' to be a dict")
pulumi.set(__self__, "reported_properties", reported_properties)
if status and not isinstance(status, str):
raise TypeError("Expected argument 'status' to be a str")
pulumi.set(__self__, "status", status)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if trial_days_remaining and not isinstance(trial_days_remaining, float):
raise TypeError("Expected argument 'trial_days_remaining' to be a float")
pulumi.set(__self__, "trial_days_remaining", trial_days_remaining)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="aadClientId")
def aad_client_id(self) -> str:
return pulumi.get(self, "aad_client_id")
@property
@pulumi.getter(name="aadTenantId")
def aad_tenant_id(self) -> str:
return pulumi.get(self, "aad_tenant_id")
@property
@pulumi.getter(name="billingModel")
def billing_model(self) -> str:
return pulumi.get(self, "billing_model")
@property
@pulumi.getter(name="cloudId")
def cloud_id(self) -> str:
return pulumi.get(self, "cloud_id")
@property
@pulumi.getter(name="cloudManagementEndpoint")
def cloud_management_endpoint(self) -> Optional[str]:
return pulumi.get(self, "cloud_management_endpoint")
@property
@pulumi.getter(name="createdAt")
def created_at(self) -> Optional[str]:
return pulumi.get(self, "created_at")
@property
@pulumi.getter(name="createdBy")
def created_by(self) -> Optional[str]:
return pulumi.get(self, "created_by")
@property
@pulumi.getter(name="createdByType")
def created_by_type(self) -> Optional[str]:
return pulumi.get(self, "created_by_type")
@property
@pulumi.getter
def id(self) -> str:
return pulumi.get(self, "id")
@property
@pulumi.getter(name="lastBillingTimestamp")
def last_billing_timestamp(self) -> str:
return pulumi.get(self, "last_billing_timestamp")
@property
@pulumi.getter(name="lastModifiedAt")
def last_modified_at(self) -> Optional[str]:
return pulumi.get(self, "last_modified_at")
@property
@pulumi.getter(name="lastModifiedBy")
def last_modified_by(self) -> Optional[str]:
return pulumi.get(self, "last_modified_by")
@property
@pulumi.getter(name="lastModifiedByType")
def last_modified_by_type(self) -> Optional[str]:
return pulumi.get(self, "last_modified_by_type")
@property
@pulumi.getter(name="lastSyncTimestamp")
def last_sync_timestamp(self) -> str:
return pulumi.get(self, "last_sync_timestamp")
@property
@pulumi.getter
def location(self) -> str:
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="registrationTimestamp")
def registration_timestamp(self) -> str:
return pulumi.get(self, "registration_timestamp")
@property
@pulumi.getter(name="reportedProperties")
def reported_properties(self) -> 'outputs.ClusterReportedPropertiesResponse':
return pulumi.get(self, "reported_properties")
@property
@pulumi.getter
def status(self) -> str:
return pulumi.get(self, "status")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="trialDaysRemaining")
def trial_days_remaining(self) -> float:
return pulumi.get(self, "trial_days_remaining")
@property
@pulumi.getter
def type(self) -> str:
return pulumi.get(self, "type")
class AwaitableGetClusterResult(GetClusterResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetClusterResult(
aad_client_id=self.aad_client_id,
aad_tenant_id=self.aad_tenant_id,
billing_model=self.billing_model,
cloud_id=self.cloud_id,
cloud_management_endpoint=self.cloud_management_endpoint,
created_at=self.created_at,
created_by=self.created_by,
created_by_type=self.created_by_type,
id=self.id,
last_billing_timestamp=self.last_billing_timestamp,
last_modified_at=self.last_modified_at,
last_modified_by=self.last_modified_by,
last_modified_by_type=self.last_modified_by_type,
last_sync_timestamp=self.last_sync_timestamp,
location=self.location,
name=self.name,
provisioning_state=self.provisioning_state,
registration_timestamp=self.registration_timestamp,
reported_properties=self.reported_properties,
status=self.status,
tags=self.tags,
trial_days_remaining=self.trial_days_remaining,
type=self.type)
def get_cluster(cluster_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetClusterResult:
__args__ = dict()
__args__['clusterName'] = cluster_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:azurestackhci/v20210101preview:getCluster', __args__, opts=opts, typ=GetClusterResult).value
return AwaitableGetClusterResult(
aad_client_id=__ret__.aad_client_id,
aad_tenant_id=__ret__.aad_tenant_id,
billing_model=__ret__.billing_model,
cloud_id=__ret__.cloud_id,
cloud_management_endpoint=__ret__.cloud_management_endpoint,
created_at=__ret__.created_at,
created_by=__ret__.created_by,
created_by_type=__ret__.created_by_type,
id=__ret__.id,
last_billing_timestamp=__ret__.last_billing_timestamp,
last_modified_at=__ret__.last_modified_at,
last_modified_by=__ret__.last_modified_by,
last_modified_by_type=__ret__.last_modified_by_type,
last_sync_timestamp=__ret__.last_sync_timestamp,
location=__ret__.location,
name=__ret__.name,
provisioning_state=__ret__.provisioning_state,
registration_timestamp=__ret__.registration_timestamp,
reported_properties=__ret__.reported_properties,
status=__ret__.status,
tags=__ret__.tags,
trial_days_remaining=__ret__.trial_days_remaining,
type=__ret__.type)
| true | true |
1c478544308d1c24ccd1470dc7b2c5e5197b8d45 | 1,181 | py | Python | src/main.py | ITAnalyst-JU/process-logger | a51d4604b2dc3047dec9adfec96334ff20a3782f | [
"MIT"
] | null | null | null | src/main.py | ITAnalyst-JU/process-logger | a51d4604b2dc3047dec9adfec96334ff20a3782f | [
"MIT"
] | null | null | null | src/main.py | ITAnalyst-JU/process-logger | a51d4604b2dc3047dec9adfec96334ff20a3782f | [
"MIT"
] | null | null | null | #!/bin/python
import time
import argparse
from Invoker import Invoker
def parse_cmd_name(parts):
# TODO maybe for eg. 'make testall' this shoule return 'make testall' and not 'make'?
assert len(parts) >= 1
assert len(parts[0]) >= 1
x = parts[0].split('/')[-1]
assert ' ' not in x
return x
def get_time_str():
t = time.localtime()
return f'{t.tm_year}.{t.tm_mon}.{t.tm_mday} {t.tm_hour}:{t.tm_min}:{t.tm_sec}'
# TODO better process for deciding filename and title
def main():
parser = argparse.ArgumentParser(description="Monitor command's output in real time.")
parser.add_argument('cmd', type=str, nargs='+',
help='command invocation to be monitored')
parser.add_argument('-o', '--output', metavar='filename', dest='log_file_location', default=None, type=str,
help='write the output to a given filename')
args = parser.parse_args()
title = parse_cmd_name(args.cmd) + ' ' + get_time_str()
if args.log_file_location is None:
args.log_file_location = title + '.html'
Invoker(args.cmd, args.log_file_location, title)
if __name__ == '__main__':
main()
| 28.804878 | 111 | 0.647756 |
import time
import argparse
from Invoker import Invoker
def parse_cmd_name(parts):
assert len(parts) >= 1
assert len(parts[0]) >= 1
x = parts[0].split('/')[-1]
assert ' ' not in x
return x
def get_time_str():
t = time.localtime()
return f'{t.tm_year}.{t.tm_mon}.{t.tm_mday} {t.tm_hour}:{t.tm_min}:{t.tm_sec}'
def main():
parser = argparse.ArgumentParser(description="Monitor command's output in real time.")
parser.add_argument('cmd', type=str, nargs='+',
help='command invocation to be monitored')
parser.add_argument('-o', '--output', metavar='filename', dest='log_file_location', default=None, type=str,
help='write the output to a given filename')
args = parser.parse_args()
title = parse_cmd_name(args.cmd) + ' ' + get_time_str()
if args.log_file_location is None:
args.log_file_location = title + '.html'
Invoker(args.cmd, args.log_file_location, title)
if __name__ == '__main__':
main()
| true | true |
1c478548a8539ffc957d7a9e7b5a3ba080deb1de | 1,052 | py | Python | manabe/public/management/commands/fake_server.py | luoyedao/manabe | 90c158bd23e956308263b542634adc97f6526276 | [
"Apache-2.0"
] | 16 | 2018-08-12T08:28:00.000Z | 2022-03-15T02:13:42.000Z | manabe/public/management/commands/fake_server.py | luoyedao/manabe | 90c158bd23e956308263b542634adc97f6526276 | [
"Apache-2.0"
] | 14 | 2020-02-11T23:27:29.000Z | 2022-02-11T03:43:26.000Z | manabe/public/management/commands/fake_server.py | luoyedao/manabe | 90c158bd23e956308263b542634adc97f6526276 | [
"Apache-2.0"
] | 25 | 2018-08-26T07:38:46.000Z | 2022-03-15T02:13:45.000Z | from random import choice
from django.contrib.auth.models import User
from appinput.models import App
from envx.models import Env
from serverinput.models import Server
def fake_server_data():
Server.objects.all().delete()
print('delete all server data')
user_set = User.objects.all()
app_set = App.objects.all()
env_set = Env.objects.all()
for i in range(100):
ip_address = salt_name = "192.168.0.{}".format(i)
for j in [80, 443, 8080, 8888]:
port = j
name = "192.168.0.{}_{}".format(i, port)
app_user = choice(['root', 'tomcat', 'javauser'])
op_user = choice(user_set)
app_item = choice(app_set)
env_item = choice(env_set)
Server.objects.create(name=name, ip_address=ip_address, port=port,
salt_name=salt_name, env_name=env_item,
app_name=app_item, op_user=op_user,
app_user=app_user)
print('create all server data')
| 35.066667 | 78 | 0.586502 | from random import choice
from django.contrib.auth.models import User
from appinput.models import App
from envx.models import Env
from serverinput.models import Server
def fake_server_data():
Server.objects.all().delete()
print('delete all server data')
user_set = User.objects.all()
app_set = App.objects.all()
env_set = Env.objects.all()
for i in range(100):
ip_address = salt_name = "192.168.0.{}".format(i)
for j in [80, 443, 8080, 8888]:
port = j
name = "192.168.0.{}_{}".format(i, port)
app_user = choice(['root', 'tomcat', 'javauser'])
op_user = choice(user_set)
app_item = choice(app_set)
env_item = choice(env_set)
Server.objects.create(name=name, ip_address=ip_address, port=port,
salt_name=salt_name, env_name=env_item,
app_name=app_item, op_user=op_user,
app_user=app_user)
print('create all server data')
| true | true |
1c47869bfa0f88eba2e94f57df3c36bcb2331ede | 404 | py | Python | server/src/prefect_server/utilities/__init__.py | louisditzel/prefect | b1a02fee623b965e756a38aa09059db780ab67eb | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2020-05-10T14:32:32.000Z | 2020-05-10T14:32:32.000Z | server/src/prefect_server/utilities/__init__.py | louisditzel/prefect | b1a02fee623b965e756a38aa09059db780ab67eb | [
"ECL-2.0",
"Apache-2.0"
] | 3 | 2022-02-14T11:25:57.000Z | 2022-02-27T16:25:14.000Z | server/src/prefect_server/utilities/__init__.py | louisditzel/prefect | b1a02fee623b965e756a38aa09059db780ab67eb | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2020-05-31T04:42:56.000Z | 2020-05-31T04:42:56.000Z | # Licensed under the Prefect Community License, available at
# https://www.prefect.io/legal/prefect-community-license
import prefect_server.utilities.context
import prefect_server.utilities.exceptions
import prefect_server.utilities.graphql
import prefect_server.utilities.logging
import prefect_server.utilities.names
import prefect_server.utilities.tests
import prefect_server.utilities.asynchronous
| 33.666667 | 60 | 0.868812 |
import prefect_server.utilities.context
import prefect_server.utilities.exceptions
import prefect_server.utilities.graphql
import prefect_server.utilities.logging
import prefect_server.utilities.names
import prefect_server.utilities.tests
import prefect_server.utilities.asynchronous
| true | true |
1c47883aeba99de2cb069da42b1663aff45d1bfb | 11,011 | py | Python | data_kits/nf_kits.py | Jarvis73/DINs | fe967115182a47b9ad1018658cd1be745831e7aa | [
"MIT"
] | null | null | null | data_kits/nf_kits.py | Jarvis73/DINs | fe967115182a47b9ad1018658cd1be745831e7aa | [
"MIT"
] | null | null | null | data_kits/nf_kits.py | Jarvis73/DINs | fe967115182a47b9ad1018658cd1be745831e7aa | [
"MIT"
] | null | null | null | # Copyright 2019-2020 Jianwei Zhang All Right Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# =================================================================================
import pickle
import zlib
from pathlib import Path
import nibabel as nib
import numpy as np
import pandas as pd
import scipy.ndimage as ndi
import tqdm
ROOT = Path(__file__).parent.parent.parent
DATA_ROOT = ROOT / "data/NF"
def read_nii(file_name, out_dtype=np.int16, special=False, only_header=False):
nib_vol = nib.load(str(file_name))
vh = nib_vol.header
if only_header:
return vh
affine = vh.get_best_affine()
# assert len(np.where(affine[:3, :3].reshape(-1) != 0)[0]) == 3, affine
trans = np.argmax(np.abs(affine[:3, :3]), axis=1)
data = nib_vol.get_fdata().astype(out_dtype).transpose(*trans[::-1])
if special:
data = np.flip(data, axis=2)
if affine[0, trans[0]] > 0: # Increase x from Right to Left
data = np.flip(data, axis=2)
if affine[1, trans[1]] > 0: # Increase y from Anterior to Posterior
data = np.flip(data, axis=1)
if affine[2, trans[2]] < 0: # Increase z from Interior to Superior
data = np.flip(data, axis=0)
return vh, data
def write_nii(data, header, out_path, out_dtype=np.int16, special=False, affine=None):
if header is not None:
affine = header.get_best_affine()
# assert len(np.where(affine[:3, :3].reshape(-1) != 0)[0]) == 3, affine
trans = np.argmax(np.abs(affine[:3, :3]), axis=1)
trans_bk = [np.argwhere(np.array(trans[::-1]) == i)[0][0] for i in range(3)]
if special:
data = np.flip(data, axis=2)
if affine[0, trans[0]] > 0: # Increase x from Right to Left
data = np.flip(data, axis=2)
if affine[1, trans[1]] > 0: # Increase y from Anterior to Posterior
data = np.flip(data, axis=1)
if affine[2, trans[2]] < 0: # Increase z from Interior to Superior
data = np.flip(data, axis=0)
out_image = np.transpose(data, trans_bk).astype(out_dtype)
if header is None and affine is not None:
out = nib.Nifti1Image(out_image, affine=affine)
else:
out = nib.Nifti1Image(out_image, affine=None, header=header)
nib.save(out, str(out_path))
def load_data(logger):
data_dir = DATA_ROOT / "nii_NF"
path_list = list(data_dir.glob("volume*"))
logger.info(f"Loading data ({len(path_list)} examples) ...")
cache_path = DATA_ROOT / "cache.pkl.gz"
if cache_path.exists():
logger.info(f"Loading data cache from {cache_path}")
with cache_path.open("rb") as f:
data = zlib.decompress(f.read())
_data_cache = pickle.loads(data)
logger.info("Finished!")
return _data_cache
_data_cache = {}
for path in tqdm.tqdm(path_list):
pid = path.name.split(".")[0].split("-")[-1]
header, volume = read_nii(path)
la_path = path.parent / path.name.replace("volume", "segmentation")
_, label = read_nii(la_path)
assert volume.shape == label.shape, f"{volume.shape} vs {label.shape}"
_data_cache[int(pid)] = {"im_path": path.absolute(),
"la_path": la_path.absolute(),
"img": volume,
"lab": label.astype(np.uint8),
"pos": np.stack(np.where(label > 0), axis=1),
"meta": header,
"lab_rng": np.unique(label)}
with cache_path.open("wb") as f:
logger.info(f"Saving data cache to {cache_path}")
cache_s = pickle.dumps(_data_cache, pickle.HIGHEST_PROTOCOL)
f.write(zlib.compress(cache_s))
logger.info("Finished!")
return _data_cache
def pre_filter_data(data, filter_thresh, connectivity=3, down_sampling=False):
""" For object-based segmentation tasks.
Pre-compute connected components and remove small objects
"""
_pre_filter_cache = None
cache_path = DATA_ROOT / ("pre-filter.pkl.gz" if not down_sampling else "pre-filter_ds.pkl.gz")
if cache_path.exists():
logger.info(f"Loading pre-filter cache from {cache_path}")
with cache_path.open("rb") as f:
data = zlib.decompress(f.read())
_pre_filter_cache = pickle.loads(data)
logger.info("Finished!")
return _pre_filter_cache
_pre_filter_cache = {}
for pid in data:
mask = data[pid]["lab"]
struct = ndi.generate_binary_structure(3, connectivity)
labeled, n_obj = ndi.label(mask, struct)
slices = ndi.find_objects(labeled)
obj_list = []
for i, sli in enumerate(slices):
patch = labeled[sli]
z, y, x = np.where(patch == i + 1)
if z.shape[0] < filter_thresh:
patch[z, y, x] = 0
else:
obj_list.append(np.stack((z, y, x), axis=1))
better_label = np.clip(labeled, 0, 1)
_pre_filter_cache[pid] = {"lab": better_label,
"obj_list": obj_list}
with cache_path.open("wb") as f:
logger.info(f"Saving pre-filter cache to {cache_path}")
cache_s = pickle.dumps(_pre_filter_cache, pickle.HIGHEST_PROTOCOL)
f.write(zlib.compress(cache_s))
logger.info("Finished!")
return _pre_filter_cache
def load_split(set_key, test_fold):
if set_key in ["train", "val", "eval"]:
fold_path = DATA_ROOT / "split.csv"
folds = pd.read_csv(str(fold_path)).fillna(0).astype(int)
val_split = folds.loc[folds.split == test_fold]
if set_key != "train":
return val_split
train_folds = list(range(5))
train_folds.remove(test_fold)
train_split = folds.loc[folds.split.isin(train_folds)]
return train_split
elif set_key == "test":
fold_path = DATA_ROOT / "split_test.csv"
folds = pd.read_csv(str(fold_path)).fillna(0).astype(int)
test_split = folds.loc[folds.split == 0]
return test_split
elif set_key == "extra": # The dataset with 45 cases of 15 patients
fold_path = DATA_ROOT / "split_extra.csv"
folds = pd.read_csv(str(fold_path)).fillna(0).astype(int)
test_split = folds.loc[folds.split == 0]
return test_split
else:
raise ValueError(f"`set_key` supports [train|val|test|extra], got {set_key}")
def filter_tiny_nf(mask):
struct2 = ndi.generate_binary_structure(2, 1)
for i in range(mask.shape[0]):
res, n_obj = ndi.label(mask[i], struct2)
size = np.bincount(res.flat)
for j in np.where(size <= 2)[0]:
mask[i][res == j] = 0
struct3 = ndi.generate_binary_structure(3, 2)
res, n_obj = ndi.label(mask, struct3)
size = np.bincount(res.flat)
for i in np.where(size <= 5)[0]:
mask[res == i] = 0
return mask
def slim_labels(data, logger):
slim_labels_path = DATA_ROOT / "slim_labels.pkl.gz"
if slim_labels_path.exists():
logger.info(f"Loading slimmed label cache from {slim_labels_path}")
with slim_labels_path.open("rb") as f:
new_labels = pickle.loads(zlib.decompress(f.read()))
for i in data:
data[i]['slim'] = new_labels[i]
logger.info("Finished!")
else:
new_labels = {}
logger.info(f"Saving slimmed label cache to {slim_labels_path}")
for i, item in data.items():
new_labels[i] = filter_tiny_nf(np.clip(item['lab'], 0, 1).copy())
data[i]['slim'] = new_labels[i]
with slim_labels_path.open("wb") as f:
f.write(zlib.compress(pickle.dumps(new_labels, pickle.HIGHEST_PROTOCOL)))
logger.info("Finished!")
return data
def load_test_data_paths():
data_dir = DATA_ROOT / "test_NF"
path_list = list(data_dir.glob("*img.nii.gz"))
dataset = {}
for path in path_list:
pid = int(path.name.split("-")[0])
dataset[pid] = {"img_path": path, "lab_path": path.parent / path.name.replace("img", "mask")}
return dataset
extra_name_mapping = {
"---Abdomen1__20080620-img.nii.gz": 0,
"---Abdomen1__20101129-img.nii.gz": 1,
"---Abdomen1__20130625-img.nii.gz": 2,
"---Airway1__20031216-img.nii.gz": 3,
"---Airway1__20041020-img.nii.gz": 4,
"---Airway1__20060907-img.nii.gz": 5,
"---Airway2__20080707-img.nii.gz": 6,
"---Airway2__20110124-img.nii.gz": 7,
"---Airway2__20130204-img.nii.gz": 8,
"---Back1__20070330-img.nii.gz": 9,
"---Back1__20081117-img.nii.gz": 10,
"---Back1__20100323-img.nii.gz": 11,
"---Brachial-plexus1__20130205-img.nii.gz": 12,
"---Br-plexus1__20120223-img.nii.gz": 13,
"---Br-plexus1__20120625-img.nii.gz": 14,
"---Chest2__20011227-img.nii.gz": 15,
"---Chest2__20050914-img.nii.gz": 16,
"---Chest2__20080918-img.nii.gz": 17,
"---Chest3__20081222-img.nii.gz": 18,
"---Chest3__20110602-img.nii.gz": 19,
"---Chest3__20131122-img.nii.gz": 20,
"---Face1__20100719-img.nii.gz": 21,
"---Face1__20110418-img.nii.gz": 22,
"---Face1__20120924-img.nii.gz": 23,
"---Leg1__20080714-img.nii.gz": 24,
"---Leg1__20100726-img.nii.gz": 25,
"---Leg1__20110228-img.nii.gz": 26,
"---Neck1__20020726-img.nii.gz": 27,
"---Neck1__20040315-img.nii.gz": 28,
"---Neck1__20050527-img.nii.gz": 29,
"---Orbit1__20030225-img.nii.gz": 30,
"---Orbit1__20050217-img.nii.gz": 31,
"---Orbit1__20061016-img.nii.gz": 32,
"---Orbit2__20090403-img.nii.gz": 33,
"---Orbit2__20121018-img.nii.gz": 34,
"---Orbit2__20140520-img.nii.gz": 35,
"---Pelvis1__20030916-img.nii.gz": 36,
"---Pelvis1__20060109-img.nii.gz": 37,
"---Pelvis1__20100726-img.nii.gz": 38,
"---Pelvis2__20090114-img.nii.gz": 39,
"---Pelvis2__20100112-img.nii.gz": 40,
"---Pelvis2__20120423-img.nii.gz": 41,
"---Thigh1__20071019-img.nii.gz": 42,
"---Thigh1__20100712-img.nii.gz": 43,
"---Thigh1__20120106-img.nii.gz": 44,
}
def load_extra_data_paths():
data_dir = DATA_ROOT / "NCI_NF1_InaLabeled"
path_list = list(data_dir.glob("*img.nii.gz"))
dataset = {}
for path in path_list:
pid = extra_name_mapping[path.name]
dataset[pid] = {"img_path": path, "lab_path": path.parent / path.name.replace("img", "mask")}
return dataset
def load_box_csv():
box_file = DATA_ROOT / "nf_box.csv"
box_df = pd.read_csv(box_file)
return box_df
| 38.365854 | 101 | 0.612297 |
import pickle
import zlib
from pathlib import Path
import nibabel as nib
import numpy as np
import pandas as pd
import scipy.ndimage as ndi
import tqdm
ROOT = Path(__file__).parent.parent.parent
DATA_ROOT = ROOT / "data/NF"
def read_nii(file_name, out_dtype=np.int16, special=False, only_header=False):
nib_vol = nib.load(str(file_name))
vh = nib_vol.header
if only_header:
return vh
affine = vh.get_best_affine()
trans = np.argmax(np.abs(affine[:3, :3]), axis=1)
data = nib_vol.get_fdata().astype(out_dtype).transpose(*trans[::-1])
if special:
data = np.flip(data, axis=2)
if affine[0, trans[0]] > 0:
data = np.flip(data, axis=2)
if affine[1, trans[1]] > 0:
data = np.flip(data, axis=1)
if affine[2, trans[2]] < 0:
data = np.flip(data, axis=0)
return vh, data
def write_nii(data, header, out_path, out_dtype=np.int16, special=False, affine=None):
if header is not None:
affine = header.get_best_affine()
trans = np.argmax(np.abs(affine[:3, :3]), axis=1)
trans_bk = [np.argwhere(np.array(trans[::-1]) == i)[0][0] for i in range(3)]
if special:
data = np.flip(data, axis=2)
if affine[0, trans[0]] > 0:
data = np.flip(data, axis=2)
if affine[1, trans[1]] > 0:
data = np.flip(data, axis=1)
if affine[2, trans[2]] < 0:
data = np.flip(data, axis=0)
out_image = np.transpose(data, trans_bk).astype(out_dtype)
if header is None and affine is not None:
out = nib.Nifti1Image(out_image, affine=affine)
else:
out = nib.Nifti1Image(out_image, affine=None, header=header)
nib.save(out, str(out_path))
def load_data(logger):
data_dir = DATA_ROOT / "nii_NF"
path_list = list(data_dir.glob("volume*"))
logger.info(f"Loading data ({len(path_list)} examples) ...")
cache_path = DATA_ROOT / "cache.pkl.gz"
if cache_path.exists():
logger.info(f"Loading data cache from {cache_path}")
with cache_path.open("rb") as f:
data = zlib.decompress(f.read())
_data_cache = pickle.loads(data)
logger.info("Finished!")
return _data_cache
_data_cache = {}
for path in tqdm.tqdm(path_list):
pid = path.name.split(".")[0].split("-")[-1]
header, volume = read_nii(path)
la_path = path.parent / path.name.replace("volume", "segmentation")
_, label = read_nii(la_path)
assert volume.shape == label.shape, f"{volume.shape} vs {label.shape}"
_data_cache[int(pid)] = {"im_path": path.absolute(),
"la_path": la_path.absolute(),
"img": volume,
"lab": label.astype(np.uint8),
"pos": np.stack(np.where(label > 0), axis=1),
"meta": header,
"lab_rng": np.unique(label)}
with cache_path.open("wb") as f:
logger.info(f"Saving data cache to {cache_path}")
cache_s = pickle.dumps(_data_cache, pickle.HIGHEST_PROTOCOL)
f.write(zlib.compress(cache_s))
logger.info("Finished!")
return _data_cache
def pre_filter_data(data, filter_thresh, connectivity=3, down_sampling=False):
_pre_filter_cache = None
cache_path = DATA_ROOT / ("pre-filter.pkl.gz" if not down_sampling else "pre-filter_ds.pkl.gz")
if cache_path.exists():
logger.info(f"Loading pre-filter cache from {cache_path}")
with cache_path.open("rb") as f:
data = zlib.decompress(f.read())
_pre_filter_cache = pickle.loads(data)
logger.info("Finished!")
return _pre_filter_cache
_pre_filter_cache = {}
for pid in data:
mask = data[pid]["lab"]
struct = ndi.generate_binary_structure(3, connectivity)
labeled, n_obj = ndi.label(mask, struct)
slices = ndi.find_objects(labeled)
obj_list = []
for i, sli in enumerate(slices):
patch = labeled[sli]
z, y, x = np.where(patch == i + 1)
if z.shape[0] < filter_thresh:
patch[z, y, x] = 0
else:
obj_list.append(np.stack((z, y, x), axis=1))
better_label = np.clip(labeled, 0, 1)
_pre_filter_cache[pid] = {"lab": better_label,
"obj_list": obj_list}
with cache_path.open("wb") as f:
logger.info(f"Saving pre-filter cache to {cache_path}")
cache_s = pickle.dumps(_pre_filter_cache, pickle.HIGHEST_PROTOCOL)
f.write(zlib.compress(cache_s))
logger.info("Finished!")
return _pre_filter_cache
def load_split(set_key, test_fold):
if set_key in ["train", "val", "eval"]:
fold_path = DATA_ROOT / "split.csv"
folds = pd.read_csv(str(fold_path)).fillna(0).astype(int)
val_split = folds.loc[folds.split == test_fold]
if set_key != "train":
return val_split
train_folds = list(range(5))
train_folds.remove(test_fold)
train_split = folds.loc[folds.split.isin(train_folds)]
return train_split
elif set_key == "test":
fold_path = DATA_ROOT / "split_test.csv"
folds = pd.read_csv(str(fold_path)).fillna(0).astype(int)
test_split = folds.loc[folds.split == 0]
return test_split
elif set_key == "extra":
fold_path = DATA_ROOT / "split_extra.csv"
folds = pd.read_csv(str(fold_path)).fillna(0).astype(int)
test_split = folds.loc[folds.split == 0]
return test_split
else:
raise ValueError(f"`set_key` supports [train|val|test|extra], got {set_key}")
def filter_tiny_nf(mask):
struct2 = ndi.generate_binary_structure(2, 1)
for i in range(mask.shape[0]):
res, n_obj = ndi.label(mask[i], struct2)
size = np.bincount(res.flat)
for j in np.where(size <= 2)[0]:
mask[i][res == j] = 0
struct3 = ndi.generate_binary_structure(3, 2)
res, n_obj = ndi.label(mask, struct3)
size = np.bincount(res.flat)
for i in np.where(size <= 5)[0]:
mask[res == i] = 0
return mask
def slim_labels(data, logger):
slim_labels_path = DATA_ROOT / "slim_labels.pkl.gz"
if slim_labels_path.exists():
logger.info(f"Loading slimmed label cache from {slim_labels_path}")
with slim_labels_path.open("rb") as f:
new_labels = pickle.loads(zlib.decompress(f.read()))
for i in data:
data[i]['slim'] = new_labels[i]
logger.info("Finished!")
else:
new_labels = {}
logger.info(f"Saving slimmed label cache to {slim_labels_path}")
for i, item in data.items():
new_labels[i] = filter_tiny_nf(np.clip(item['lab'], 0, 1).copy())
data[i]['slim'] = new_labels[i]
with slim_labels_path.open("wb") as f:
f.write(zlib.compress(pickle.dumps(new_labels, pickle.HIGHEST_PROTOCOL)))
logger.info("Finished!")
return data
def load_test_data_paths():
data_dir = DATA_ROOT / "test_NF"
path_list = list(data_dir.glob("*img.nii.gz"))
dataset = {}
for path in path_list:
pid = int(path.name.split("-")[0])
dataset[pid] = {"img_path": path, "lab_path": path.parent / path.name.replace("img", "mask")}
return dataset
extra_name_mapping = {
"---Abdomen1__20080620-img.nii.gz": 0,
"---Abdomen1__20101129-img.nii.gz": 1,
"---Abdomen1__20130625-img.nii.gz": 2,
"---Airway1__20031216-img.nii.gz": 3,
"---Airway1__20041020-img.nii.gz": 4,
"---Airway1__20060907-img.nii.gz": 5,
"---Airway2__20080707-img.nii.gz": 6,
"---Airway2__20110124-img.nii.gz": 7,
"---Airway2__20130204-img.nii.gz": 8,
"---Back1__20070330-img.nii.gz": 9,
"---Back1__20081117-img.nii.gz": 10,
"---Back1__20100323-img.nii.gz": 11,
"---Brachial-plexus1__20130205-img.nii.gz": 12,
"---Br-plexus1__20120223-img.nii.gz": 13,
"---Br-plexus1__20120625-img.nii.gz": 14,
"---Chest2__20011227-img.nii.gz": 15,
"---Chest2__20050914-img.nii.gz": 16,
"---Chest2__20080918-img.nii.gz": 17,
"---Chest3__20081222-img.nii.gz": 18,
"---Chest3__20110602-img.nii.gz": 19,
"---Chest3__20131122-img.nii.gz": 20,
"---Face1__20100719-img.nii.gz": 21,
"---Face1__20110418-img.nii.gz": 22,
"---Face1__20120924-img.nii.gz": 23,
"---Leg1__20080714-img.nii.gz": 24,
"---Leg1__20100726-img.nii.gz": 25,
"---Leg1__20110228-img.nii.gz": 26,
"---Neck1__20020726-img.nii.gz": 27,
"---Neck1__20040315-img.nii.gz": 28,
"---Neck1__20050527-img.nii.gz": 29,
"---Orbit1__20030225-img.nii.gz": 30,
"---Orbit1__20050217-img.nii.gz": 31,
"---Orbit1__20061016-img.nii.gz": 32,
"---Orbit2__20090403-img.nii.gz": 33,
"---Orbit2__20121018-img.nii.gz": 34,
"---Orbit2__20140520-img.nii.gz": 35,
"---Pelvis1__20030916-img.nii.gz": 36,
"---Pelvis1__20060109-img.nii.gz": 37,
"---Pelvis1__20100726-img.nii.gz": 38,
"---Pelvis2__20090114-img.nii.gz": 39,
"---Pelvis2__20100112-img.nii.gz": 40,
"---Pelvis2__20120423-img.nii.gz": 41,
"---Thigh1__20071019-img.nii.gz": 42,
"---Thigh1__20100712-img.nii.gz": 43,
"---Thigh1__20120106-img.nii.gz": 44,
}
def load_extra_data_paths():
data_dir = DATA_ROOT / "NCI_NF1_InaLabeled"
path_list = list(data_dir.glob("*img.nii.gz"))
dataset = {}
for path in path_list:
pid = extra_name_mapping[path.name]
dataset[pid] = {"img_path": path, "lab_path": path.parent / path.name.replace("img", "mask")}
return dataset
def load_box_csv():
box_file = DATA_ROOT / "nf_box.csv"
box_df = pd.read_csv(box_file)
return box_df
| true | true |
1c4788a7fec1e92cf4988f8cd63897bc0a883269 | 1,288 | py | Python | setup.py | AbhiProjects/TagLib | 214139259157a7b3ec3f2fb7b342411a33b85839 | [
"BSD-3-Clause"
] | null | null | null | setup.py | AbhiProjects/TagLib | 214139259157a7b3ec3f2fb7b342411a33b85839 | [
"BSD-3-Clause"
] | null | null | null | setup.py | AbhiProjects/TagLib | 214139259157a7b3ec3f2fb7b342411a33b85839 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
"""Setup script for taglib"""
import sys
if sys.hexversion < 0x02060000:
print >> sys.stderr, 'Sorry, Python 2.6 is required.'
sys.exit(1)
from distutils.core import setup
sys.dont_write_bytecode = True # don't leave turds
from taglib import __version__
def main():
setup(name='taglib',
author='Chris Jones',
author_email='cjones@gruntle.org',
url='http://code.google.com/p/python-taglib/',
description='Library to manipulate audio file metadata',
license='BSD',
version=__version__,
py_modules=['taglib'],
scripts=['scripts/tagdump'],
# http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.6',
'Topic :: Multimedia :: Sound/Audio',
'Topic :: Software Development :: Libraries :: Python Modules'])
return 0
if __name__ == '__main__':
sys.exit(main())
| 29.953488 | 78 | 0.585404 |
import sys
if sys.hexversion < 0x02060000:
print >> sys.stderr, 'Sorry, Python 2.6 is required.'
sys.exit(1)
from distutils.core import setup
sys.dont_write_bytecode = True
from taglib import __version__
def main():
setup(name='taglib',
author='Chris Jones',
author_email='cjones@gruntle.org',
url='http://code.google.com/p/python-taglib/',
description='Library to manipulate audio file metadata',
license='BSD',
version=__version__,
py_modules=['taglib'],
scripts=['scripts/tagdump'],
# http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.6',
'Topic :: Multimedia :: Sound/Audio',
'Topic :: Software Development :: Libraries :: Python Modules'])
return 0
if __name__ == '__main__':
sys.exit(main())
| true | true |
1c478921c64292aa5b2d3adeb81064377fca26e0 | 1,101 | py | Python | azure-mgmt-recoveryservicesbackup/azure/mgmt/recoveryservicesbackup/models/schedule_policy.py | v-Ajnava/azure-sdk-for-python | a1f6f80eb5869c5b710e8bfb66146546697e2a6f | [
"MIT"
] | 4 | 2016-06-17T23:25:29.000Z | 2022-03-30T22:37:45.000Z | azure-mgmt-recoveryservicesbackup/azure/mgmt/recoveryservicesbackup/models/schedule_policy.py | v-Ajnava/azure-sdk-for-python | a1f6f80eb5869c5b710e8bfb66146546697e2a6f | [
"MIT"
] | 54 | 2016-03-25T17:25:01.000Z | 2018-10-22T17:27:54.000Z | azure-mgmt-recoveryservicesbackup/azure/mgmt/recoveryservicesbackup/models/schedule_policy.py | v-Ajnava/azure-sdk-for-python | a1f6f80eb5869c5b710e8bfb66146546697e2a6f | [
"MIT"
] | 3 | 2016-05-03T20:49:46.000Z | 2017-10-05T21:05:27.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class SchedulePolicy(Model):
"""Base class for backup schedule.
:param schedule_policy_type: Polymorphic Discriminator
:type schedule_policy_type: str
"""
_validation = {
'schedule_policy_type': {'required': True},
}
_attribute_map = {
'schedule_policy_type': {'key': 'schedulePolicyType', 'type': 'str'},
}
_subtype_map = {
'schedule_policy_type': {'LongTermSchedulePolicy': 'LongTermSchedulePolicy', 'SimpleSchedulePolicy': 'SimpleSchedulePolicy'}
}
def __init__(self):
self.schedule_policy_type = None
| 30.583333 | 132 | 0.608538 |
from msrest.serialization import Model
class SchedulePolicy(Model):
_validation = {
'schedule_policy_type': {'required': True},
}
_attribute_map = {
'schedule_policy_type': {'key': 'schedulePolicyType', 'type': 'str'},
}
_subtype_map = {
'schedule_policy_type': {'LongTermSchedulePolicy': 'LongTermSchedulePolicy', 'SimpleSchedulePolicy': 'SimpleSchedulePolicy'}
}
def __init__(self):
self.schedule_policy_type = None
| true | true |
1c478b7837a4774911d31634003c88b12e9c37bc | 3,439 | py | Python | assets/winc_firmware_upgrade/firmware/handler_search.py | rashedtalukder/cryptoauth_trustplatform_designsuite | 6b42c64071a9fb5dc9894bfedbbfabbcfb7961c1 | [
"MIT"
] | 11 | 2019-12-03T14:18:38.000Z | 2021-08-25T16:41:27.000Z | assets/winc_firmware_upgrade/firmware/handler_search.py | rashedtalukder/cryptoauth_trustplatform_designsuite | 6b42c64071a9fb5dc9894bfedbbfabbcfb7961c1 | [
"MIT"
] | 9 | 2020-02-13T09:07:42.000Z | 2022-03-18T18:29:24.000Z | assets/winc_firmware_upgrade/firmware/handler_search.py | rashedtalukder/cryptoauth_trustplatform_designsuite | 6b42c64071a9fb5dc9894bfedbbfabbcfb7961c1 | [
"MIT"
] | 10 | 2020-04-28T10:35:48.000Z | 2021-11-03T23:03:30.000Z | '''
Simple program to get a hint where simple programs might be installed by chasing thru registry,
does not deal with things like word which are beyonf complicated.
Pass in extention to check and a hint at what program you want.
Returns 0 if found.
2 for parm error
1 for not found
Eg
C:\work_repos\>python handler_search.py cpP studio
""C:\Program Files (x86)\Microsoft Visual Studio 14.0\Common7\IDE\devenv.exe"
C:\work_repos\>python handler_search.py cpP atmelstudio
"C:\Program Files (x86)\Atmel\Studio\7.0\atmelstudio.exe"
'''
import sys
import os
import winreg
roots_hives = {
"HKEY_CLASSES_ROOT": winreg.HKEY_CLASSES_ROOT,
"HKEY_CURRENT_USER": winreg.HKEY_CURRENT_USER,
"HKEY_LOCAL_MACHINE": winreg.HKEY_LOCAL_MACHINE,
"HKEY_USERS": winreg.HKEY_USERS,
"HKEY_PERFORMANCE_DATA": winreg.HKEY_PERFORMANCE_DATA,
"HKEY_CURRENT_CONFIG": winreg.HKEY_CURRENT_CONFIG,
"HKEY_DYN_DATA": winreg.HKEY_DYN_DATA
}
def join(path, *paths):
path = path.strip('/\\')
paths = map(lambda x: x.strip('/\\'), paths)
paths = list(paths)
result = os.path.join(path, *paths)
result = result.replace('/', '\\')
return result
def parse_key(key):
key = key.upper()
aparts = key.split('\\')
parts = list(filter(None, aparts))
root_hive_name = parts[0]
root_hive = roots_hives.get(root_hive_name)
partial_key = '\\'.join(parts[1:])
if not root_hive:
raise Exception('root hive "{}" was not found'.format(root_hive_name))
return partial_key, root_hive
def get_all_values(key):
data = {}
data[0] = [[''],['']]
try:
partial_key, root_hive = parse_key(key)
with winreg.ConnectRegistry(None, root_hive) as reg:
with winreg.OpenKey(reg, partial_key) as key_object:
i = 0
while True:
try:
ret = winreg.EnumValue(key_object, i)
if ret[2] == winreg.REG_EXPAND_SZ:
if ret[0] == '':
data[i] = ["(Default)", expandvars(ret[1])]
else:
data[i] = [ret[0], expandvars(ret[1])]
else:
if ret[0] == '':
data[i] = ["(Default)", ret[1]]
else:
data[i] = [ret[0], ret[1]]
except WindowsError:
break
i += 1
key_object.Close()
except:
pass
return data
def main(argv=None):
argv = sys.argv
args = argv[1:]
key = r'HKEY_CLASSES_ROOT\.' + args[0] + '\\OpenWithProgids'
pkey = r''
data = get_all_values(key)
for x in range(0, len(data)):
strdatax = str(data[x][0])
if args[1].upper() in strdatax.upper():
pkey = r'HKEY_CLASSES_ROOT\\' + strdatax + '\\shell\\open\\command'
break
if str(data[0][1]) == '[\'\']':
print ("Assoc not found")
sys.exit(1)
data = get_all_values(pkey)
for x in range(0, len(data)):
if ".EXE" in str(data[x][1]).upper():
exeind = str(data[x][1]).upper().find('.EXE')
print ('"' + str(data[x][1])[:exeind+4] + '"')
sys.exit(0)
print ("Handler not found")
sys.exit(1)
if __name__ == "__main__":
main()
| 27.95935 | 95 | 0.549578 | import sys
import os
import winreg
roots_hives = {
"HKEY_CLASSES_ROOT": winreg.HKEY_CLASSES_ROOT,
"HKEY_CURRENT_USER": winreg.HKEY_CURRENT_USER,
"HKEY_LOCAL_MACHINE": winreg.HKEY_LOCAL_MACHINE,
"HKEY_USERS": winreg.HKEY_USERS,
"HKEY_PERFORMANCE_DATA": winreg.HKEY_PERFORMANCE_DATA,
"HKEY_CURRENT_CONFIG": winreg.HKEY_CURRENT_CONFIG,
"HKEY_DYN_DATA": winreg.HKEY_DYN_DATA
}
def join(path, *paths):
path = path.strip('/\\')
paths = map(lambda x: x.strip('/\\'), paths)
paths = list(paths)
result = os.path.join(path, *paths)
result = result.replace('/', '\\')
return result
def parse_key(key):
key = key.upper()
aparts = key.split('\\')
parts = list(filter(None, aparts))
root_hive_name = parts[0]
root_hive = roots_hives.get(root_hive_name)
partial_key = '\\'.join(parts[1:])
if not root_hive:
raise Exception('root hive "{}" was not found'.format(root_hive_name))
return partial_key, root_hive
def get_all_values(key):
data = {}
data[0] = [[''],['']]
try:
partial_key, root_hive = parse_key(key)
with winreg.ConnectRegistry(None, root_hive) as reg:
with winreg.OpenKey(reg, partial_key) as key_object:
i = 0
while True:
try:
ret = winreg.EnumValue(key_object, i)
if ret[2] == winreg.REG_EXPAND_SZ:
if ret[0] == '':
data[i] = ["(Default)", expandvars(ret[1])]
else:
data[i] = [ret[0], expandvars(ret[1])]
else:
if ret[0] == '':
data[i] = ["(Default)", ret[1]]
else:
data[i] = [ret[0], ret[1]]
except WindowsError:
break
i += 1
key_object.Close()
except:
pass
return data
def main(argv=None):
argv = sys.argv
args = argv[1:]
key = r'HKEY_CLASSES_ROOT\.' + args[0] + '\\OpenWithProgids'
pkey = r''
data = get_all_values(key)
for x in range(0, len(data)):
strdatax = str(data[x][0])
if args[1].upper() in strdatax.upper():
pkey = r'HKEY_CLASSES_ROOT\\' + strdatax + '\\shell\\open\\command'
break
if str(data[0][1]) == '[\'\']':
print ("Assoc not found")
sys.exit(1)
data = get_all_values(pkey)
for x in range(0, len(data)):
if ".EXE" in str(data[x][1]).upper():
exeind = str(data[x][1]).upper().find('.EXE')
print ('"' + str(data[x][1])[:exeind+4] + '"')
sys.exit(0)
print ("Handler not found")
sys.exit(1)
if __name__ == "__main__":
main()
| true | true |
1c478b7de55a29c23c21c47bbecf9e11a14c3e20 | 7,684 | py | Python | test/test_grapher.py | leehyoeun96/rosprofiler | c7bee4e98d8417cd3e2a8ef246b7930c97c74dc5 | [
"Apache-2.0"
] | 6 | 2017-11-18T05:59:22.000Z | 2022-01-01T11:56:00.000Z | test/test_grapher.py | leehyoeun96/rosprofiler | c7bee4e98d8417cd3e2a8ef246b7930c97c74dc5 | [
"Apache-2.0"
] | 3 | 2015-04-11T20:04:24.000Z | 2018-06-19T21:55:39.000Z | test/test_grapher.py | leehyoeun96/rosprofiler | c7bee4e98d8417cd3e2a8ef246b7930c97c74dc5 | [
"Apache-2.0"
] | 15 | 2017-11-19T05:03:29.000Z | 2021-03-15T15:26:37.000Z | #!/usr/bin/env python
# Copyright 2014 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest
import time
import rospy
import rostest
from ros_topology_msgs.msg import *
PKG = 'rosprofiler'
NAME = 'test_grapher'
# TODO: Check services
EXPECTED_NODES = dict()
talker1 = Node(name="/talker1")
talker1.publishes.append("/chatter")
talker1.publishes.append("/rosout")
talker1.connections.append(Connection(destination='/rosout',topic='/rosout',direction=2,transport="TCPROS"))
talker1.connections.append(Connection(destination='/listener1',topic='/chatter',direction=2,transport="TCPROS"))
talker1.connections.append(Connection(destination='/listener2',topic='/chatter',direction=2,transport="TCPROS"))
talker2 = Node(name="/talker2")
talker2.publishes.append("/chatter")
talker2.publishes.append("/rosout")
talker2.connections.append(Connection(destination='/rosout',topic='/rosout',direction=2,transport="TCPROS"))
talker2.connections.append(Connection(destination='/listener1',topic='/chatter',direction=2,transport="TCPROS"))
talker2.connections.append(Connection(destination='/listener2',topic='/chatter',direction=2,transport="TCPROS"))
listener1 = Node(name="/listener1")
listener1.publishes.append("/rosout")
listener1.subscribes.append("/chatter")
listener1.connections.append(Connection(destination='/rosout',topic='/rosout',direction=2,transport="TCPROS"))
listener1.connections.append(Connection(destination='/talker1',topic='/chatter',direction=1,transport="TCPROS"))
listener1.connections.append(Connection(destination='/talker2',topic='/chatter',direction=1,transport="TCPROS"))
listener2 = Node(name="/listener2")
listener2.publishes.append("/rosout")
listener2.subscribes.append("/chatter")
listener2.connections.append(Connection(destination='/rosout',topic='/rosout',direction=2,transport="TCPROS"))
listener2.connections.append(Connection(destination='/talker1',topic='/chatter',direction=1,transport="TCPROS"))
listener2.connections.append(Connection(destination='/talker2',topic='/chatter',direction=1,transport="TCPROS"))
rosout = Node(name="/rosout")
rosout.publishes.append("/rosout_agg")
rosout.subscribes.append("/rosout")
rosout.connections.append(Connection(destination='/talker1',topic='/rosout',direction=1,transport="TCPROS"))
rosout.connections.append(Connection(destination='/talker2',topic='/rosout',direction=1,transport="TCPROS"))
rosout.connections.append(Connection(destination='/listener1',topic='/rosout',direction=1,transport="TCPROS"))
rosout.connections.append(Connection(destination='/listener2',topic='/rosout',direction=1,transport="TCPROS"))
rosout.connections.append(Connection(destination='/test_grapher',topic='/rosout',direction=1,transport="TCPROS"))
rosout.connections.append(Connection(destination='/rosgrapher',topic='/rosout',direction=1,transport="TCPROS"))
grapher = Node(name="/rosgrapher")
grapher.publishes.append("/rosout")
grapher.publishes.append("/topology")
grapher.connections.append(Connection(destination='/rosout',topic='/rosout',direction=2,transport="TCPROS"))
grapher.connections.append(Connection(destination='/'+NAME,topic='/topology',direction=2,transport="TCPROS"))
tester = Node(name="/test_grapher")
tester.publishes.append("/rosout")
tester.subscribes.append("/topology")
tester.connections.append(Connection(destination='/rosout',topic='/rosout',direction=2,transport="TCPROS"))
tester.connections.append(Connection(destination='/rosgrapher',topic='/topology',direction=1,transport="TCPROS"))
EXPECTED_NODES['/talker1'] = talker1
EXPECTED_NODES['/talker2'] = talker2
EXPECTED_NODES['/listener1'] = listener1
EXPECTED_NODES['/listener2'] = listener2
EXPECTED_NODES['/rosout'] = rosout
EXPECTED_NODES['/rosgrapher'] = grapher
EXPECTED_NODES['/'+NAME] = tester
t_chatter = Topic(name="/chatter", type="std_msgs/String")
t_rosout = Topic(name="/rosout", type="rosgraph_msgs/Log")
t_rosout_agg = Topic(name="/rosout_agg", type="rosgraph_msgs/Log")
t_topology = Topic(name="/topology", type="ros_topology_msgs/Graph")
EXPECTED_TOPICS = [t_chatter, t_rosout, t_rosout_agg, t_topology]
class TestGrapher(unittest.TestCase):
def __init__(self, *args):
super(TestGrapher, self).__init__(*args)
# Start time - for calculating timeout
self.start_time = None
self.graph = Graph()
def setUp(self):
rospy.init_node(NAME)
rospy.Subscriber('/topology', Graph, self.callback)
self.wait_for_data(10.0)
def callback(self, data):
self.graph = data
def wait_for_data(self, duration):
""" Waits to receive statistics data """
start_time = rospy.get_rostime()
while not rospy.is_shutdown() and not (rospy.get_rostime() > (start_time + rospy.Duration(duration))):
if len(self.graph.nodes) >= len(EXPECTED_NODES) and len(self.graph.topics) >= len(EXPECTED_TOPICS):
return
rospy.sleep(1.0)
def test_nodes_publishers(self):
for node in self.graph.nodes:
assert node.name in EXPECTED_NODES, "%s not found!"%node.name
testnode = EXPECTED_NODES[node.name]
assert set(node.publishes) == set(testnode.publishes), "%s.publishes=%s, but should be %s"%(node.name,node.publishes,testnode.publishes)
def test_nodes_subscribers(self):
for node in self.graph.nodes:
assert node.name in EXPECTED_NODES, "%s not found!"%node.name
testnode = EXPECTED_NODES[node.name]
assert set(node.subscribes) == set(testnode.subscribes), "%s.subscribes=%s, but should be %s"%(node.name,node.subscribes,testnode.subscribes)
def test_nodes_connections_present(self):
for node in self.graph.nodes:
assert node.name in EXPECTED_NODES, "%s not found!"%node.name
testnode = EXPECTED_NODES[node.name]
for connection in node.connections:
assert connection in testnode.connections, "Node %s has extra connection %s"%(node.name, connection)
def test_nodes_connections_missing(self):
for node in self.graph.nodes:
assert node.name in EXPECTED_NODES, "%s not found!"%node.name
testnode = EXPECTED_NODES[node.name]
for connection in testnode.connections:
assert connection in node.connections, "Node %s expected to find missing connection %s"%(node.name, connection)
def test_nodes_present(self):
for node in self.graph.nodes:
assert node.name in EXPECTED_NODES.keys(), "Found extra node '%s'"%node.name
def test_nodes_missing(self):
for node_name in EXPECTED_NODES.keys():
assert node_name in [n.name for n in self.graph.nodes], "Expected to find missing node '%s'"%node_name
def test_topics_present(self):
for topic in self.graph.topics:
assert topic in EXPECTED_TOPICS, "Found extra topic '%s'"%topic
def test_topics_missing(self):
for topic in EXPECTED_TOPICS:
assert topic in self.graph.topics, "Expected to find missing topic '%s'"%topic
if __name__ == '__main__':
rostest.rosrun(PKG, NAME, TestGrapher, sys.argv)
| 49.574194 | 153 | 0.730739 |
import sys
import unittest
import time
import rospy
import rostest
from ros_topology_msgs.msg import *
PKG = 'rosprofiler'
NAME = 'test_grapher'
EXPECTED_NODES = dict()
talker1 = Node(name="/talker1")
talker1.publishes.append("/chatter")
talker1.publishes.append("/rosout")
talker1.connections.append(Connection(destination='/rosout',topic='/rosout',direction=2,transport="TCPROS"))
talker1.connections.append(Connection(destination='/listener1',topic='/chatter',direction=2,transport="TCPROS"))
talker1.connections.append(Connection(destination='/listener2',topic='/chatter',direction=2,transport="TCPROS"))
talker2 = Node(name="/talker2")
talker2.publishes.append("/chatter")
talker2.publishes.append("/rosout")
talker2.connections.append(Connection(destination='/rosout',topic='/rosout',direction=2,transport="TCPROS"))
talker2.connections.append(Connection(destination='/listener1',topic='/chatter',direction=2,transport="TCPROS"))
talker2.connections.append(Connection(destination='/listener2',topic='/chatter',direction=2,transport="TCPROS"))
listener1 = Node(name="/listener1")
listener1.publishes.append("/rosout")
listener1.subscribes.append("/chatter")
listener1.connections.append(Connection(destination='/rosout',topic='/rosout',direction=2,transport="TCPROS"))
listener1.connections.append(Connection(destination='/talker1',topic='/chatter',direction=1,transport="TCPROS"))
listener1.connections.append(Connection(destination='/talker2',topic='/chatter',direction=1,transport="TCPROS"))
listener2 = Node(name="/listener2")
listener2.publishes.append("/rosout")
listener2.subscribes.append("/chatter")
listener2.connections.append(Connection(destination='/rosout',topic='/rosout',direction=2,transport="TCPROS"))
listener2.connections.append(Connection(destination='/talker1',topic='/chatter',direction=1,transport="TCPROS"))
listener2.connections.append(Connection(destination='/talker2',topic='/chatter',direction=1,transport="TCPROS"))
rosout = Node(name="/rosout")
rosout.publishes.append("/rosout_agg")
rosout.subscribes.append("/rosout")
rosout.connections.append(Connection(destination='/talker1',topic='/rosout',direction=1,transport="TCPROS"))
rosout.connections.append(Connection(destination='/talker2',topic='/rosout',direction=1,transport="TCPROS"))
rosout.connections.append(Connection(destination='/listener1',topic='/rosout',direction=1,transport="TCPROS"))
rosout.connections.append(Connection(destination='/listener2',topic='/rosout',direction=1,transport="TCPROS"))
rosout.connections.append(Connection(destination='/test_grapher',topic='/rosout',direction=1,transport="TCPROS"))
rosout.connections.append(Connection(destination='/rosgrapher',topic='/rosout',direction=1,transport="TCPROS"))
grapher = Node(name="/rosgrapher")
grapher.publishes.append("/rosout")
grapher.publishes.append("/topology")
grapher.connections.append(Connection(destination='/rosout',topic='/rosout',direction=2,transport="TCPROS"))
grapher.connections.append(Connection(destination='/'+NAME,topic='/topology',direction=2,transport="TCPROS"))
tester = Node(name="/test_grapher")
tester.publishes.append("/rosout")
tester.subscribes.append("/topology")
tester.connections.append(Connection(destination='/rosout',topic='/rosout',direction=2,transport="TCPROS"))
tester.connections.append(Connection(destination='/rosgrapher',topic='/topology',direction=1,transport="TCPROS"))
EXPECTED_NODES['/talker1'] = talker1
EXPECTED_NODES['/talker2'] = talker2
EXPECTED_NODES['/listener1'] = listener1
EXPECTED_NODES['/listener2'] = listener2
EXPECTED_NODES['/rosout'] = rosout
EXPECTED_NODES['/rosgrapher'] = grapher
EXPECTED_NODES['/'+NAME] = tester
t_chatter = Topic(name="/chatter", type="std_msgs/String")
t_rosout = Topic(name="/rosout", type="rosgraph_msgs/Log")
t_rosout_agg = Topic(name="/rosout_agg", type="rosgraph_msgs/Log")
t_topology = Topic(name="/topology", type="ros_topology_msgs/Graph")
EXPECTED_TOPICS = [t_chatter, t_rosout, t_rosout_agg, t_topology]
class TestGrapher(unittest.TestCase):
def __init__(self, *args):
super(TestGrapher, self).__init__(*args)
self.start_time = None
self.graph = Graph()
def setUp(self):
rospy.init_node(NAME)
rospy.Subscriber('/topology', Graph, self.callback)
self.wait_for_data(10.0)
def callback(self, data):
self.graph = data
def wait_for_data(self, duration):
start_time = rospy.get_rostime()
while not rospy.is_shutdown() and not (rospy.get_rostime() > (start_time + rospy.Duration(duration))):
if len(self.graph.nodes) >= len(EXPECTED_NODES) and len(self.graph.topics) >= len(EXPECTED_TOPICS):
return
rospy.sleep(1.0)
def test_nodes_publishers(self):
for node in self.graph.nodes:
assert node.name in EXPECTED_NODES, "%s not found!"%node.name
testnode = EXPECTED_NODES[node.name]
assert set(node.publishes) == set(testnode.publishes), "%s.publishes=%s, but should be %s"%(node.name,node.publishes,testnode.publishes)
def test_nodes_subscribers(self):
for node in self.graph.nodes:
assert node.name in EXPECTED_NODES, "%s not found!"%node.name
testnode = EXPECTED_NODES[node.name]
assert set(node.subscribes) == set(testnode.subscribes), "%s.subscribes=%s, but should be %s"%(node.name,node.subscribes,testnode.subscribes)
def test_nodes_connections_present(self):
for node in self.graph.nodes:
assert node.name in EXPECTED_NODES, "%s not found!"%node.name
testnode = EXPECTED_NODES[node.name]
for connection in node.connections:
assert connection in testnode.connections, "Node %s has extra connection %s"%(node.name, connection)
def test_nodes_connections_missing(self):
for node in self.graph.nodes:
assert node.name in EXPECTED_NODES, "%s not found!"%node.name
testnode = EXPECTED_NODES[node.name]
for connection in testnode.connections:
assert connection in node.connections, "Node %s expected to find missing connection %s"%(node.name, connection)
def test_nodes_present(self):
for node in self.graph.nodes:
assert node.name in EXPECTED_NODES.keys(), "Found extra node '%s'"%node.name
def test_nodes_missing(self):
for node_name in EXPECTED_NODES.keys():
assert node_name in [n.name for n in self.graph.nodes], "Expected to find missing node '%s'"%node_name
def test_topics_present(self):
for topic in self.graph.topics:
assert topic in EXPECTED_TOPICS, "Found extra topic '%s'"%topic
def test_topics_missing(self):
for topic in EXPECTED_TOPICS:
assert topic in self.graph.topics, "Expected to find missing topic '%s'"%topic
if __name__ == '__main__':
rostest.rosrun(PKG, NAME, TestGrapher, sys.argv)
| true | true |
1c478bdc9499e3ce8182dc63c0c4b8edbd2abeb0 | 12,032 | py | Python | stellapy/stellapy_old/stella_read.py | SStroteich/stella-1 | 104556a07b9736e7c28e6f1bf2f799384732f38b | [
"MIT"
] | 4 | 2021-12-15T08:23:45.000Z | 2022-02-18T15:14:42.000Z | stellapy/stellapy_old/stella_read.py | SStroteich/stella-1 | 104556a07b9736e7c28e6f1bf2f799384732f38b | [
"MIT"
] | 37 | 2021-07-05T16:41:33.000Z | 2022-03-21T15:58:05.000Z | stellapy/stellapy_old/stella_read.py | SStroteich/stella-1 | 104556a07b9736e7c28e6f1bf2f799384732f38b | [
"MIT"
] | 7 | 2021-07-05T15:35:55.000Z | 2022-03-09T09:23:42.000Z | import numpy as np
from stella_dirs import *
from scipy.io import netcdf
#plt.rcParams.update({'font.size': 28})
#plt.rcParams['lines.linewidth'] = 2
import tabCompleter
from tabCompleter import *
from plotbox import *
from aux_functions import *
from os import listdir
from netCDF4 import *
import glob
import os.path
# ==============================================================
# Some utils
def format1(value):
return "%.3e" % value
def format2(value):
return "%14.6e" % value
def format3(value):
return "%4.2f" % value
def format4(value):
return "%6.2f" % value
def format6(value):
return "%7.3f" % value
def format5(value):
return "%.5e" % value
def format7(value):
return "%22.3f" % value
def format8(value):
return "%04d" % value
def format9(value):
return "%7.5f" % value
# Some utils ended
#===============================================================
def casestr(case=None):
# Function that returns the string of the input, which
# determines the name of the rest of output files.
if case.endswith(".in"):
buff = case.split("/")
return buff[size(buff)-1].split(".in")[0]
else:
if size(inputlist(case)) > 1:
print("\nSpecify the input in the case field, more than one input file found:\n")
print(inputlist(case))
exit
elif size(inputlist(case) == 1):
return inputlist(case)[0].split(".in")[0]
def inputlist_r(case):
inputs_level_0 = glob.glob(outdir(case)+'/*.in', recursive = True)
inputs_level_1 = glob.glob(outdir(case)+'/*/*.in', recursive = True)
return (inputs_level_0+inputs_level_1)
def inputlist(case, recursive=False):
# Function that returns all the input file names
# with extention ".in"
inlist = []
if recursive:
inlist = inputlist_r(case=case)
else:
for f in listdir(outdir(case)):
if f.endswith('.in'):
if not f.startswith('.'):
inputname=f
inlist.append(f)
return inlist
def outdir(case=None):
if case.endswith(".in"):
vcase=case.split("/")
return runsdir()+'/'+ case.replace("/"+vcase[size(vcase)-1], '')
else:
return runsdir()+'/'+ case
def geotxtfile(case=None):
# It returns the full path of an output file, endind with
# the string value of "quant".
if os.path.isfile(case):
return case.split('.in')[0] + '.geometry'
else:
return outdir(case) + '/' + casestr(case) + '.geometry'
def outfile(case=None, quant=None):
# It returns the full path of an output file, endind with
# the string value of "quant".
if os.path.isfile(case):
return case.split('.in')[0] + '.' + quant
else:
return outdir(case) + '/' + casestr(case) + '.' + quant
def infile(case=None):
# infile = input("Path to netcdf file: ")
return outfile(case, quant='out.nc')
def fluxes_txt(case=None):
# infile = input("Path to netcdf file: ")
return outfile(case, quant='fluxes')
# ==================================================================
# Reading variables in the input *.in file
def torflux(case):
# get torflux from input file.
myfile = open(outfile(case, quant='in'))
content = float(myfile.read().split('torflux')[1].split('\n')[0].split('=')[1])
return content
# ==================================================================
# Translation of quantities in stella_data module by Michael into
# functions with the run directory ("case") as single argument.
def read_stella_float(case, var):
import numpy as np
ncfile = netcdf.netcdf_file(infile(case),'r')
try:
arr = np.copy(ncfile.variables[var][:])
flag = True
except KeyError:
print('INFO: '+var+' not found in netcdf file')
arr = np.arange(1,dtype=float)
flag = False
return arr
def read_stella_value(case, var):
woutfile = infile(case)
d = Dataset(woutfile, mode='r')
return d.variables[var][:]
def kx(case):
# get kx grid
# this is the index of the first negative value of kx
# note stella orders kx as (0, dkx, ..., kx_max, -kx_max, -kx_max+dkx, ..., -dkx)
ncfile = netcdf.netcdf_file(infile(case),'r')
kx_stella = np.copy(ncfile.variables['kx'][:])
nakx = ncfile.dimensions['kx']
nakx_mid = nakx//2+1
kx = np.concatenate((kx_stella[nakx_mid:],kx_stella[:nakx_mid]))
return kx, nakx, nakx_mid
def kx_stella(case):
ncfile = netcdf.netcdf_file(infile(case),'r')
kx_stella = np.copy(ncfile.variables['kx'][:])
return kx_stella
def ky(case):
# get ky grid
ncfile = netcdf.netcdf_file(infile(case),'r')
ky = np.copy(ncfile.variables['ky'][:])
naky = ncfile.dimensions['ky']
return ky, naky
def zed(case):
# get zed grid
ncfile = netcdf.netcdf_file(infile(case),'r')
zed = np.copy(ncfile.variables['zed'][:])
nzed = zed.size
iz0 = nzed//2+1
return zed, nzed, iz0
def time(case):
# get time grid
ncfile = netcdf.netcdf_file(infile(case),'r')
time = np.copy(ncfile.variables['t'][:])
ntime = time.size
return time, ntime
def nspec(case):
# number of kinetic species
ncfile = netcdf.netcdf_file(infile(case),'r')
nspec = ncfile.dimensions['species']
return nspec
def geo(case):
# get geometric quantities
d = Dataset(infile(case), mode='r')
ncfile = netcdf.netcdf_file(infile(case),'r')
bmag = np.copy(ncfile.variables['bmag'][:])
gradpar = np.copy(ncfile.variables['gradpar'][:])
gbdrift = np.copy(ncfile.variables['gbdrift'][:])
gbdrift0 = np.copy(ncfile.variables['gbdrift0'][:])
cvdrift = np.copy(ncfile.variables['cvdrift'][:])
cvdrift0 = np.copy(ncfile.variables['cvdrift0'][:])
gds2 = np.copy(ncfile.variables['gds2'][:])
gds21 = np.copy(ncfile.variables['gds21'][:])
gds22 = np.copy(ncfile.variables['gds22'][:])
shat = float(d.variables['shat'][:])
return bmag, gradpar, gbdrift, gbdrift0, cvdrift, cvdrift0, gds2, gds21, gds22, shat
def phi2_vs_kxky(case):
# electrostatic potential averaged over z as function of (ky,kx,t)
phi2_vs_kxky_stella = read_stella_float(case, 'phi2_vs_kxky')
# phi2_vs_kxky_stella[:,0,0] = 0.0
# phi2_vs_kxky = np.concatenate((phi2_vs_kxky_stella[:, kx(case)[2]:,:],\
# phi2_vs_kxky_stella[:,:kx(case)[2] ,:]),axis=1)
return phi2_vs_kxky_stella
def pflux_vs_kxky(case):
pflux_vs_kxky_stella = read_stella_float(case, 'pflx_kxky')
return pflux_vs_kxky_stella
def vflux_vs_kxky(case):
vflux_vs_kxky_stella = read_stella_float(case, 'vflx_kxky')
return vflux_vs_kxky_stella
def qflux_vs_kxky(case):
qflux_vs_kxky_stella = read_stella_float(case, 'qflx_kxky')
return qflux_vs_kxky_stella
def density_vs_kxky(case):
density_vs_kxky_stella = read_stella_float(case, 'density')
return density_vs_kxky_stella
def upar_vs_kxky(case):
upar_vs_kxky_stella = read_stella_float(case, 'upar')
return upar_vs_kxky_stella
def temperature_vs_kxky(case):
temperature_vs_kxky_stella = read_stella_float(case, 'temperature')
return temperature_vs_kxky_stella
def phi_vs_t(case):
# electrostatic potential as a function of (z,kx,ky,t)
phi_vs_t_stella = read_stella_float(case, 'phi_vs_t')
return phi_vs_t_stella
def gvmus(case):
# |g|^2 averaged over kx, ky, and z
return read_stella_float(case, 'gvmus')
def gzvs(case):
# |g|^2 averaged over kx, ky, and mu
return read_stella_float(case, 'gzvs')
def jacob(case):
# jacobian for transformation to (rho,alpha,z) coordinates
return read_stella_float(case, 'jacob')
def jtwist(case):
# jtwist factor for twist-and-shift BC
return read_stella_value(case, 'jtwist')
def grho(case):
# gradient of normalized radial coordinate rho
return read_stella_float(case, 'grho')
def phi2_stella(case):
# modulus squared of electrostatic potential (averaged over space)
return read_stella_float(case, 'phi2')
def es_part_flux(case):
# time-dependent electrostatic particle flux for each species
return read_stella_float(case, 'es_part_flux')
def es_heat_flux(case):
# electrostatic heat flux
return read_stella_float(case, 'es_heat_flux')
def es_mom_flux(case):
# electrostatic momentum flux
return read_stella_float(case, 'es_mom_flux')
def es_energy_exchange(case):
return read_stella_float(case, 'es_energy_exchange')
def es_part_by_k(case):
# time-dependent particle flux for each species as a function of (kx,ky)
es_part_by_k_stella, es_part_by_k_present = \
read_stella_float(case, 'es_part_by_k')
if es_part_by_k_present is not True:
es_part_by_k_stella, es_part_by_k_present = \
read_stella_float(case, 'es_part_flux_by_mode')
return es_part_by_k_stella, es_part_by_k_present
def es_mom_by_k(case):
# time-dependent momentum flux for each species as a function of (kx,ky)
es_mom_by_k_stella, es_mom_by_k_present = \
read_stella_float(case, 'es_mom_by_k')
if es_mom_by_k_present is not True:
es_mom_by_k_stella, es_mom_by_k_present = \
read_stella_float(case, 'es_mom_flux_by_mode')
return es_mom_by_k_stella, es_mom_by_k_present
def es_energy_exchange_by_k(case):
es_energy_exchange_by_k_stella, es_energy_exchange_by_k_present = \
read_stella_float(case, 'es_energy_exchange_by_k')
if es_energy_exchange_by_k_present is not True:
es_energy_exchange_by_k_stella, es_energy_exchange_by_k_present = \
read_stella_float(case, 'es_energy_exchange_by_mode')
return es_energy_exchange_by_k_stella, es_energy_exchange_by_k_present
def es_energy_exchange_by_ky(case):
return read_stella_float(case, 'es_energy_exchange_by_ky')
def vpa(case):
# parallel velocity grid
return read_stella_float(case, 'vpa')
def mu(case):
# mu grid
return read_stella_float(case, 'mu')
def es_part_sym(case):
# electrostatic particle flux as function of (vpa,z)
return read_stella_float(case, 'es_part_sym')
def es_heat_sym(case):
# electrostatic heat flux as function of (vpa,z)
return read_stella_float(case, 'es_heat_sym')
def es_mom_sym(case):
# electrostatic momentum flux as function of (vpa,z)
es_mom_sym_stella, es_mom_sym_present = read_stella_float(case, 'es_mom_sym')
if vpa(case)[1] == False:
es_mom_sym_present = False
return es_mom_sym_stella, es_mom_sym_present
def xgrid(case):
xgrid_stella, xgrid_present = \
read_stella_float(case, 'xgrid')
xgrid = np.concatenate((xgrid_stella[kx_stella(case).shape[0]//2+1:],\
xgrid_stella[:kx_stella(case).shape[0]//2+1]))
return xgrid, xgrid_present
def dens(case):
dens=read_stella_float(case, 'dens')
dens_exp=factormult(dens,1e19)
return dens_exp, size(dens)
def upar(case):
# parallel flow fluctuation (kx,ky,z,t)
return read_stella_float(case,'upar')
def temp(case):
# temperature fluctuation (kx,ky,z,t)
temp=read_stella_float(case,'temp')
temp_exp=factormult(temp,1000)
return temp_exp, size(temp)
def species(case):
species=read_stella_float(case,'type_of_species')
return species, size(species)
def nprim(case):
return read_stella_float(case,'fprim')
def tprim(case):
return read_stella_float(case,'tprim')
def charge(case):
charge=read_stella_float(case,'charge')
return charge, size(charge)
def mass(case):
charge=read_stella_float(case,'mass')
return charge, size(mass)
# ==================================================================
| 31.915119 | 93 | 0.640376 | import numpy as np
from stella_dirs import *
from scipy.io import netcdf
import tabCompleter
from tabCompleter import *
from plotbox import *
from aux_functions import *
from os import listdir
from netCDF4 import *
import glob
import os.path
def format1(value):
return "%.3e" % value
def format2(value):
return "%14.6e" % value
def format3(value):
return "%4.2f" % value
def format4(value):
return "%6.2f" % value
def format6(value):
return "%7.3f" % value
def format5(value):
return "%.5e" % value
def format7(value):
return "%22.3f" % value
def format8(value):
return "%04d" % value
def format9(value):
return "%7.5f" % value
def casestr(case=None):
if case.endswith(".in"):
buff = case.split("/")
return buff[size(buff)-1].split(".in")[0]
else:
if size(inputlist(case)) > 1:
print("\nSpecify the input in the case field, more than one input file found:\n")
print(inputlist(case))
exit
elif size(inputlist(case) == 1):
return inputlist(case)[0].split(".in")[0]
def inputlist_r(case):
inputs_level_0 = glob.glob(outdir(case)+'/*.in', recursive = True)
inputs_level_1 = glob.glob(outdir(case)+'/*/*.in', recursive = True)
return (inputs_level_0+inputs_level_1)
def inputlist(case, recursive=False):
inlist = []
if recursive:
inlist = inputlist_r(case=case)
else:
for f in listdir(outdir(case)):
if f.endswith('.in'):
if not f.startswith('.'):
inputname=f
inlist.append(f)
return inlist
def outdir(case=None):
if case.endswith(".in"):
vcase=case.split("/")
return runsdir()+'/'+ case.replace("/"+vcase[size(vcase)-1], '')
else:
return runsdir()+'/'+ case
def geotxtfile(case=None):
if os.path.isfile(case):
return case.split('.in')[0] + '.geometry'
else:
return outdir(case) + '/' + casestr(case) + '.geometry'
def outfile(case=None, quant=None):
if os.path.isfile(case):
return case.split('.in')[0] + '.' + quant
else:
return outdir(case) + '/' + casestr(case) + '.' + quant
def infile(case=None):
return outfile(case, quant='out.nc')
def fluxes_txt(case=None):
return outfile(case, quant='fluxes')
def torflux(case):
myfile = open(outfile(case, quant='in'))
content = float(myfile.read().split('torflux')[1].split('\n')[0].split('=')[1])
return content
def read_stella_float(case, var):
import numpy as np
ncfile = netcdf.netcdf_file(infile(case),'r')
try:
arr = np.copy(ncfile.variables[var][:])
flag = True
except KeyError:
print('INFO: '+var+' not found in netcdf file')
arr = np.arange(1,dtype=float)
flag = False
return arr
def read_stella_value(case, var):
woutfile = infile(case)
d = Dataset(woutfile, mode='r')
return d.variables[var][:]
def kx(case):
ncfile = netcdf.netcdf_file(infile(case),'r')
kx_stella = np.copy(ncfile.variables['kx'][:])
nakx = ncfile.dimensions['kx']
nakx_mid = nakx//2+1
kx = np.concatenate((kx_stella[nakx_mid:],kx_stella[:nakx_mid]))
return kx, nakx, nakx_mid
def kx_stella(case):
ncfile = netcdf.netcdf_file(infile(case),'r')
kx_stella = np.copy(ncfile.variables['kx'][:])
return kx_stella
def ky(case):
ncfile = netcdf.netcdf_file(infile(case),'r')
ky = np.copy(ncfile.variables['ky'][:])
naky = ncfile.dimensions['ky']
return ky, naky
def zed(case):
ncfile = netcdf.netcdf_file(infile(case),'r')
zed = np.copy(ncfile.variables['zed'][:])
nzed = zed.size
iz0 = nzed//2+1
return zed, nzed, iz0
def time(case):
ncfile = netcdf.netcdf_file(infile(case),'r')
time = np.copy(ncfile.variables['t'][:])
ntime = time.size
return time, ntime
def nspec(case):
ncfile = netcdf.netcdf_file(infile(case),'r')
nspec = ncfile.dimensions['species']
return nspec
def geo(case):
d = Dataset(infile(case), mode='r')
ncfile = netcdf.netcdf_file(infile(case),'r')
bmag = np.copy(ncfile.variables['bmag'][:])
gradpar = np.copy(ncfile.variables['gradpar'][:])
gbdrift = np.copy(ncfile.variables['gbdrift'][:])
gbdrift0 = np.copy(ncfile.variables['gbdrift0'][:])
cvdrift = np.copy(ncfile.variables['cvdrift'][:])
cvdrift0 = np.copy(ncfile.variables['cvdrift0'][:])
gds2 = np.copy(ncfile.variables['gds2'][:])
gds21 = np.copy(ncfile.variables['gds21'][:])
gds22 = np.copy(ncfile.variables['gds22'][:])
shat = float(d.variables['shat'][:])
return bmag, gradpar, gbdrift, gbdrift0, cvdrift, cvdrift0, gds2, gds21, gds22, shat
def phi2_vs_kxky(case):
phi2_vs_kxky_stella = read_stella_float(case, 'phi2_vs_kxky')
return phi2_vs_kxky_stella
def pflux_vs_kxky(case):
pflux_vs_kxky_stella = read_stella_float(case, 'pflx_kxky')
return pflux_vs_kxky_stella
def vflux_vs_kxky(case):
vflux_vs_kxky_stella = read_stella_float(case, 'vflx_kxky')
return vflux_vs_kxky_stella
def qflux_vs_kxky(case):
qflux_vs_kxky_stella = read_stella_float(case, 'qflx_kxky')
return qflux_vs_kxky_stella
def density_vs_kxky(case):
density_vs_kxky_stella = read_stella_float(case, 'density')
return density_vs_kxky_stella
def upar_vs_kxky(case):
upar_vs_kxky_stella = read_stella_float(case, 'upar')
return upar_vs_kxky_stella
def temperature_vs_kxky(case):
temperature_vs_kxky_stella = read_stella_float(case, 'temperature')
return temperature_vs_kxky_stella
def phi_vs_t(case):
phi_vs_t_stella = read_stella_float(case, 'phi_vs_t')
return phi_vs_t_stella
def gvmus(case):
return read_stella_float(case, 'gvmus')
def gzvs(case):
return read_stella_float(case, 'gzvs')
def jacob(case):
return read_stella_float(case, 'jacob')
def jtwist(case):
return read_stella_value(case, 'jtwist')
def grho(case):
return read_stella_float(case, 'grho')
def phi2_stella(case):
return read_stella_float(case, 'phi2')
def es_part_flux(case):
return read_stella_float(case, 'es_part_flux')
def es_heat_flux(case):
return read_stella_float(case, 'es_heat_flux')
def es_mom_flux(case):
return read_stella_float(case, 'es_mom_flux')
def es_energy_exchange(case):
return read_stella_float(case, 'es_energy_exchange')
def es_part_by_k(case):
es_part_by_k_stella, es_part_by_k_present = \
read_stella_float(case, 'es_part_by_k')
if es_part_by_k_present is not True:
es_part_by_k_stella, es_part_by_k_present = \
read_stella_float(case, 'es_part_flux_by_mode')
return es_part_by_k_stella, es_part_by_k_present
def es_mom_by_k(case):
es_mom_by_k_stella, es_mom_by_k_present = \
read_stella_float(case, 'es_mom_by_k')
if es_mom_by_k_present is not True:
es_mom_by_k_stella, es_mom_by_k_present = \
read_stella_float(case, 'es_mom_flux_by_mode')
return es_mom_by_k_stella, es_mom_by_k_present
def es_energy_exchange_by_k(case):
es_energy_exchange_by_k_stella, es_energy_exchange_by_k_present = \
read_stella_float(case, 'es_energy_exchange_by_k')
if es_energy_exchange_by_k_present is not True:
es_energy_exchange_by_k_stella, es_energy_exchange_by_k_present = \
read_stella_float(case, 'es_energy_exchange_by_mode')
return es_energy_exchange_by_k_stella, es_energy_exchange_by_k_present
def es_energy_exchange_by_ky(case):
return read_stella_float(case, 'es_energy_exchange_by_ky')
def vpa(case):
return read_stella_float(case, 'vpa')
def mu(case):
return read_stella_float(case, 'mu')
def es_part_sym(case):
return read_stella_float(case, 'es_part_sym')
def es_heat_sym(case):
return read_stella_float(case, 'es_heat_sym')
def es_mom_sym(case):
es_mom_sym_stella, es_mom_sym_present = read_stella_float(case, 'es_mom_sym')
if vpa(case)[1] == False:
es_mom_sym_present = False
return es_mom_sym_stella, es_mom_sym_present
def xgrid(case):
xgrid_stella, xgrid_present = \
read_stella_float(case, 'xgrid')
xgrid = np.concatenate((xgrid_stella[kx_stella(case).shape[0]//2+1:],\
xgrid_stella[:kx_stella(case).shape[0]//2+1]))
return xgrid, xgrid_present
def dens(case):
dens=read_stella_float(case, 'dens')
dens_exp=factormult(dens,1e19)
return dens_exp, size(dens)
def upar(case):
return read_stella_float(case,'upar')
def temp(case):
temp=read_stella_float(case,'temp')
temp_exp=factormult(temp,1000)
return temp_exp, size(temp)
def species(case):
species=read_stella_float(case,'type_of_species')
return species, size(species)
def nprim(case):
return read_stella_float(case,'fprim')
def tprim(case):
return read_stella_float(case,'tprim')
def charge(case):
charge=read_stella_float(case,'charge')
return charge, size(charge)
def mass(case):
charge=read_stella_float(case,'mass')
return charge, size(mass)
| true | true |
1c478c2f72be04820d92305cfffce27aa98c7fa4 | 907 | py | Python | electrum/tests/__init__.py | checho1989/electrum-civx | 4853bf42f0aa96bb894992c1abf7b8bdda587543 | [
"MIT"
] | null | null | null | electrum/tests/__init__.py | checho1989/electrum-civx | 4853bf42f0aa96bb894992c1abf7b8bdda587543 | [
"MIT"
] | null | null | null | electrum/tests/__init__.py | checho1989/electrum-civx | 4853bf42f0aa96bb894992c1abf7b8bdda587543 | [
"MIT"
] | null | null | null | import unittest
import threading
from electrum_civx import constants
# Set this locally to make the test suite run faster.
# If set, unit tests that would normally test functions with multiple implementations,
# will only be run once, using the fastest implementation.
# e.g. libsecp256k1 vs python-ecdsa. pycryptodomex vs pyaes.
FAST_TESTS = False
# some unit tests are modifying globals; sorry.
class SequentialTestCase(unittest.TestCase):
test_lock = threading.Lock()
def setUp(self):
super().setUp()
self.test_lock.acquire()
def tearDown(self):
super().tearDown()
self.test_lock.release()
class TestCaseForTestnet(SequentialTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
constants.set_testnet()
@classmethod
def tearDownClass(cls):
super().tearDownClass()
constants.set_mainnet()
| 23.25641 | 86 | 0.705623 | import unittest
import threading
from electrum_civx import constants
FAST_TESTS = False
class SequentialTestCase(unittest.TestCase):
test_lock = threading.Lock()
def setUp(self):
super().setUp()
self.test_lock.acquire()
def tearDown(self):
super().tearDown()
self.test_lock.release()
class TestCaseForTestnet(SequentialTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
constants.set_testnet()
@classmethod
def tearDownClass(cls):
super().tearDownClass()
constants.set_mainnet()
| true | true |
1c478c32bd4fd3adda92f37777aa80cd495fcafb | 926 | py | Python | common/models/notice/UserNews.py | apanly/python_learn_master | 93a214241812f77a006cc8350a7bad6c4eec6c89 | [
"BSD-3-Clause"
] | 5 | 2020-11-29T14:21:18.000Z | 2021-10-07T04:11:29.000Z | common/models/notice/UserNews.py | linkgeek/python_flask_cms | ff5e794b5b11075670e5d11a8cbda0a137319876 | [
"BSD-3-Clause"
] | null | null | null | common/models/notice/UserNews.py | linkgeek/python_flask_cms | ff5e794b5b11075670e5d11a8cbda0a137319876 | [
"BSD-3-Clause"
] | 2 | 2020-11-30T09:55:53.000Z | 2022-03-19T12:49:40.000Z | # coding: utf-8
from application import db
class UserNews(db.Model):
__tablename__ = 'user_news'
id = db.Column(db.Integer, primary_key=True, info='消息id')
uid = db.Column(db.Integer, nullable=False, server_default=db.FetchedValue(), info='用户id')
title = db.Column(db.String(255), nullable=False, server_default=db.FetchedValue(), info='标题')
content = db.Column(db.String(1500), nullable=False, server_default=db.FetchedValue(), info='内容')
status = db.Column(db.Integer, nullable=False, server_default=db.FetchedValue(), info='状态 0:未读 1:已读')
updated_time = db.Column(db.DateTime, nullable=False, server_default=db.FetchedValue(), info='更新时间')
created_time = db.Column(db.DateTime, nullable=False, server_default=db.FetchedValue(), info='创建时间')
def __init__(self, **items):
for key in items:
if hasattr(self, key):
setattr(self, key, items[key])
| 51.444444 | 105 | 0.686825 |
from application import db
class UserNews(db.Model):
__tablename__ = 'user_news'
id = db.Column(db.Integer, primary_key=True, info='消息id')
uid = db.Column(db.Integer, nullable=False, server_default=db.FetchedValue(), info='用户id')
title = db.Column(db.String(255), nullable=False, server_default=db.FetchedValue(), info='标题')
content = db.Column(db.String(1500), nullable=False, server_default=db.FetchedValue(), info='内容')
status = db.Column(db.Integer, nullable=False, server_default=db.FetchedValue(), info='状态 0:未读 1:已读')
updated_time = db.Column(db.DateTime, nullable=False, server_default=db.FetchedValue(), info='更新时间')
created_time = db.Column(db.DateTime, nullable=False, server_default=db.FetchedValue(), info='创建时间')
def __init__(self, **items):
for key in items:
if hasattr(self, key):
setattr(self, key, items[key])
| true | true |
1c478d69dfa8ae825ea6fc0e5a10dfc164798605 | 3,750 | py | Python | nova/scheduler/filters/disk_filter.py | panguan737/nova | 0d177185a439baa228b42c948cab4e934d6ac7b8 | [
"Apache-2.0"
] | null | null | null | nova/scheduler/filters/disk_filter.py | panguan737/nova | 0d177185a439baa228b42c948cab4e934d6ac7b8 | [
"Apache-2.0"
] | null | null | null | nova/scheduler/filters/disk_filter.py | panguan737/nova | 0d177185a439baa228b42c948cab4e934d6ac7b8 | [
"Apache-2.0"
] | 1 | 2020-11-02T10:17:13.000Z | 2020-11-02T10:17:13.000Z | # Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
import nova.conf
from nova.i18n import _LW
from nova.scheduler import filters
from nova.scheduler.filters import utils
LOG = logging.getLogger(__name__)
CONF = nova.conf.CONF
class DiskFilter(filters.BaseHostFilter):
"""Disk Filter with over subscription flag."""
RUN_ON_REBUILD = False
def _get_disk_allocation_ratio(self, host_state, spec_obj):
return host_state.disk_allocation_ratio
def host_passes(self, host_state, spec_obj):
"""Filter based on disk usage."""
requested_disk = (1024 * (spec_obj.root_gb +
spec_obj.ephemeral_gb) +
spec_obj.swap)
free_disk_mb = host_state.free_disk_mb
total_usable_disk_mb = host_state.total_usable_disk_gb * 1024
# Do not allow an instance to overcommit against itself, only against
# other instances. In other words, if there isn't room for even just
# this one instance in total_usable_disk space, consider the host full.
if total_usable_disk_mb < requested_disk:
LOG.debug("%(host_state)s does not have %(requested_disk)s "
"MB usable disk space before overcommit, it only "
"has %(physical_disk_size)s MB.",
{'host_state': host_state,
'requested_disk': requested_disk,
'physical_disk_size':
total_usable_disk_mb})
return False
disk_allocation_ratio = self._get_disk_allocation_ratio(
host_state, spec_obj)
disk_mb_limit = total_usable_disk_mb * disk_allocation_ratio
used_disk_mb = total_usable_disk_mb - free_disk_mb
usable_disk_mb = disk_mb_limit - used_disk_mb
if not usable_disk_mb >= requested_disk:
LOG.debug("%(host_state)s does not have %(requested_disk)s MB "
"usable disk, it only has %(usable_disk_mb)s MB usable "
"disk.", {'host_state': host_state,
'requested_disk': requested_disk,
'usable_disk_mb': usable_disk_mb})
return False
disk_gb_limit = disk_mb_limit / 1024
host_state.limits['disk_gb'] = disk_gb_limit
return True
class AggregateDiskFilter(DiskFilter):
"""AggregateDiskFilter with per-aggregate disk allocation ratio flag.
Fall back to global disk_allocation_ratio if no per-aggregate setting
found.
"""
RUN_ON_REBUILD = False
def _get_disk_allocation_ratio(self, host_state, spec_obj):
aggregate_vals = utils.aggregate_values_from_key(
host_state,
'disk_allocation_ratio')
try:
ratio = utils.validate_num_values(
aggregate_vals, host_state.disk_allocation_ratio,
cast_to=float)
except ValueError as e:
LOG.warning(_LW("Could not decode disk_allocation_ratio: '%s'"), e)
ratio = host_state.disk_allocation_ratio
return ratio
| 37.5 | 79 | 0.6496 |
from oslo_log import log as logging
import nova.conf
from nova.i18n import _LW
from nova.scheduler import filters
from nova.scheduler.filters import utils
LOG = logging.getLogger(__name__)
CONF = nova.conf.CONF
class DiskFilter(filters.BaseHostFilter):
RUN_ON_REBUILD = False
def _get_disk_allocation_ratio(self, host_state, spec_obj):
return host_state.disk_allocation_ratio
def host_passes(self, host_state, spec_obj):
requested_disk = (1024 * (spec_obj.root_gb +
spec_obj.ephemeral_gb) +
spec_obj.swap)
free_disk_mb = host_state.free_disk_mb
total_usable_disk_mb = host_state.total_usable_disk_gb * 1024
# this one instance in total_usable_disk space, consider the host full.
if total_usable_disk_mb < requested_disk:
LOG.debug("%(host_state)s does not have %(requested_disk)s "
"MB usable disk space before overcommit, it only "
"has %(physical_disk_size)s MB.",
{'host_state': host_state,
'requested_disk': requested_disk,
'physical_disk_size':
total_usable_disk_mb})
return False
disk_allocation_ratio = self._get_disk_allocation_ratio(
host_state, spec_obj)
disk_mb_limit = total_usable_disk_mb * disk_allocation_ratio
used_disk_mb = total_usable_disk_mb - free_disk_mb
usable_disk_mb = disk_mb_limit - used_disk_mb
if not usable_disk_mb >= requested_disk:
LOG.debug("%(host_state)s does not have %(requested_disk)s MB "
"usable disk, it only has %(usable_disk_mb)s MB usable "
"disk.", {'host_state': host_state,
'requested_disk': requested_disk,
'usable_disk_mb': usable_disk_mb})
return False
disk_gb_limit = disk_mb_limit / 1024
host_state.limits['disk_gb'] = disk_gb_limit
return True
class AggregateDiskFilter(DiskFilter):
RUN_ON_REBUILD = False
def _get_disk_allocation_ratio(self, host_state, spec_obj):
aggregate_vals = utils.aggregate_values_from_key(
host_state,
'disk_allocation_ratio')
try:
ratio = utils.validate_num_values(
aggregate_vals, host_state.disk_allocation_ratio,
cast_to=float)
except ValueError as e:
LOG.warning(_LW("Could not decode disk_allocation_ratio: '%s'"), e)
ratio = host_state.disk_allocation_ratio
return ratio
| true | true |
1c478dfbc5d80108de891f08ddbc1d37b7c4fa6e | 7,930 | py | Python | tests/user/test_scoreboard.py | HYU-ICEWALL/CTFd | d2d95d882663d39d32527afd4382f07188ecb89a | [
"Apache-2.0"
] | null | null | null | tests/user/test_scoreboard.py | HYU-ICEWALL/CTFd | d2d95d882663d39d32527afd4382f07188ecb89a | [
"Apache-2.0"
] | null | null | null | tests/user/test_scoreboard.py | HYU-ICEWALL/CTFd | d2d95d882663d39d32527afd4382f07188ecb89a | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from CTFd.models import Teams, Solves, WrongKeys
from CTFd.utils import get_config, set_config
from CTFd import utils
from tests.helpers import *
from freezegun import freeze_time
from mock import patch
import json
def test_top_10():
'''Make sure top10 returns correct information'''
app = create_ctfd()
with app.app_context():
register_user(app, name="user1", email="user1@hanyang.ac.kr")
register_user(app, name="user2", email="user2@hanyang.ac.kr")
chal1 = gen_challenge(app.db)
flag1 = gen_flag(app.db, chal=chal1.id, flag='flag')
chal1_id = chal1.id
chal2 = gen_challenge(app.db)
flag2 = gen_flag(app.db, chal=chal2.id, flag='flag')
chal2_id = chal2.id
# Generates solve for user1
with freeze_time("2017-10-3 03:21:34"):
gen_solve(app.db, teamid=2, chalid=chal1_id)
with freeze_time("2017-10-4 03:25:45"):
gen_solve(app.db, teamid=2, chalid=chal2_id)
# Generate solve for user2
with freeze_time("2017-10-3 03:21:34"):
gen_solve(app.db, teamid=3, chalid=chal1_id)
client = login_as_user(app)
r = client.get('/top/10')
response = r.get_data(as_text=True)
saved = '''{
"places": {
"1": {
"id": 2,
"name": "user1",
"solves": [
{
"chal": 1,
"team": 2,
"time": 1507000894,
"value": 100
},
{
"chal": 2,
"team": 2,
"time": 1507087545,
"value": 100
}
]
},
"2": {
"id": 3,
"name": "user2",
"solves": [
{
"chal": 1,
"team": 3,
"time": 1507000894,
"value": 100
}
]
}
}
}'''
saved = json.loads(saved)
received = json.loads(response)
assert saved == received
destroy_ctfd(app)
def test_scoring_logic():
"""Test that scoring logic is correct"""
app = create_ctfd()
with app.app_context():
admin = login_as_user(app, name="admin", password="password")
register_user(app, name="user1", email="user1@hanyang.ac.kr", password="password")
client1 = login_as_user(app, name="user1", password="password")
register_user(app, name="user2", email="user2@hanyang.ac.kr", password="password")
client2 = login_as_user(app, name="user2", password="password")
chal1 = gen_challenge(app.db)
flag1 = gen_flag(app.db, chal=chal1.id, flag='flag')
chal1_id = chal1.id
chal2 = gen_challenge(app.db)
flag2 = gen_flag(app.db, chal=chal2.id, flag='flag')
chal2_id = chal2.id
# user1 solves chal1
with freeze_time("2017-10-3 03:21:34"):
with client1.session_transaction() as sess:
data = {
"key": 'flag',
"nonce": sess.get('nonce')
}
r = client1.post('/chal/{}'.format(chal1_id), data=data)
# user1 is now on top
scores = get_scores(admin)
assert scores[0]['team'] == 'user1'
# user2 solves chal1 and chal2
with freeze_time("2017-10-4 03:30:34"):
with client2.session_transaction() as sess:
# solve chal1
data = {
"key": 'flag',
"nonce": sess.get('nonce')
}
r = client2.post('/chal/{}'.format(chal1_id), data=data)
# solve chal2
data = {
"key": 'flag',
"nonce": sess.get('nonce')
}
r = client2.post('/chal/{}'.format(chal2_id), data=data)
# user2 is now on top
scores = get_scores(admin)
assert scores[0]['team'] == 'user2'
# user1 solves chal2
with freeze_time("2017-10-5 03:50:34"):
with client1.session_transaction() as sess:
data = {
"key": 'flag',
"nonce": sess.get('nonce')
}
r = client1.post('/chal/{}'.format(chal2_id), data=data)
# user2 should still be on top because they solved chal2 first
scores = get_scores(admin)
assert scores[0]['team'] == 'user2'
destroy_ctfd(app)
def test_scoring_logic_with_zero_point_challenges():
"""Test that scoring logic is correct with zero point challenges. Zero point challenges should not tie break"""
app = create_ctfd()
with app.app_context():
admin = login_as_user(app, name="admin", password="password")
register_user(app, name="user1", email="user1@hanyang.ac.kr", password="password")
client1 = login_as_user(app, name="user1", password="password")
register_user(app, name="user2", email="user2@hanyang.ac.kr", password="password")
client2 = login_as_user(app, name="user2", password="password")
chal1 = gen_challenge(app.db)
flag1 = gen_flag(app.db, chal=chal1.id, flag='flag')
chal1_id = chal1.id
chal2 = gen_challenge(app.db)
flag2 = gen_flag(app.db, chal=chal2.id, flag='flag')
chal2_id = chal2.id
# A 0 point challenge shouldn't influence the scoreboard (see #577)
chal0 = gen_challenge(app.db, value=0)
flag0 = gen_flag(app.db, chal=chal0.id, flag='flag')
chal0_id = chal0.id
# user1 solves chal1
with freeze_time("2017-10-3 03:21:34"):
with client1.session_transaction() as sess:
data = {
"key": 'flag',
"nonce": sess.get('nonce')
}
r = client1.post('/chal/{}'.format(chal1_id), data=data)
# user1 is now on top
scores = get_scores(admin)
assert scores[0]['team'] == 'user1'
# user2 solves chal1 and chal2
with freeze_time("2017-10-4 03:30:34"):
with client2.session_transaction() as sess:
# solve chal1
data = {
"key": 'flag',
"nonce": sess.get('nonce')
}
r = client2.post('/chal/{}'.format(chal1_id), data=data)
# solve chal2
data = {
"key": 'flag',
"nonce": sess.get('nonce')
}
r = client2.post('/chal/{}'.format(chal2_id), data=data)
# user2 is now on top
scores = get_scores(admin)
assert scores[0]['team'] == 'user2'
# user1 solves chal2
with freeze_time("2017-10-5 03:50:34"):
with client1.session_transaction() as sess:
data = {
"key": 'flag',
"nonce": sess.get('nonce')
}
r = client1.post('/chal/{}'.format(chal2_id), data=data)
# user2 should still be on top because they solved chal2 first
scores = get_scores(admin)
assert scores[0]['team'] == 'user2'
# user2 solves a 0 point challenge
with freeze_time("2017-10-5 03:55:34"):
with client2.session_transaction() as sess:
data = {
"key": 'flag',
"nonce": sess.get('nonce')
}
r = client2.post('/chal/{}'.format(chal0_id), data=data)
# user2 should still be on top because 0 point challenges should not tie break
scores = get_scores(admin)
assert scores[0]['team'] == 'user2'
destroy_ctfd(app)
| 34.034335 | 115 | 0.509458 |
from CTFd.models import Teams, Solves, WrongKeys
from CTFd.utils import get_config, set_config
from CTFd import utils
from tests.helpers import *
from freezegun import freeze_time
from mock import patch
import json
def test_top_10():
app = create_ctfd()
with app.app_context():
register_user(app, name="user1", email="user1@hanyang.ac.kr")
register_user(app, name="user2", email="user2@hanyang.ac.kr")
chal1 = gen_challenge(app.db)
flag1 = gen_flag(app.db, chal=chal1.id, flag='flag')
chal1_id = chal1.id
chal2 = gen_challenge(app.db)
flag2 = gen_flag(app.db, chal=chal2.id, flag='flag')
chal2_id = chal2.id
with freeze_time("2017-10-3 03:21:34"):
gen_solve(app.db, teamid=2, chalid=chal1_id)
with freeze_time("2017-10-4 03:25:45"):
gen_solve(app.db, teamid=2, chalid=chal2_id)
with freeze_time("2017-10-3 03:21:34"):
gen_solve(app.db, teamid=3, chalid=chal1_id)
client = login_as_user(app)
r = client.get('/top/10')
response = r.get_data(as_text=True)
saved = '''{
"places": {
"1": {
"id": 2,
"name": "user1",
"solves": [
{
"chal": 1,
"team": 2,
"time": 1507000894,
"value": 100
},
{
"chal": 2,
"team": 2,
"time": 1507087545,
"value": 100
}
]
},
"2": {
"id": 3,
"name": "user2",
"solves": [
{
"chal": 1,
"team": 3,
"time": 1507000894,
"value": 100
}
]
}
}
}'''
saved = json.loads(saved)
received = json.loads(response)
assert saved == received
destroy_ctfd(app)
def test_scoring_logic():
app = create_ctfd()
with app.app_context():
admin = login_as_user(app, name="admin", password="password")
register_user(app, name="user1", email="user1@hanyang.ac.kr", password="password")
client1 = login_as_user(app, name="user1", password="password")
register_user(app, name="user2", email="user2@hanyang.ac.kr", password="password")
client2 = login_as_user(app, name="user2", password="password")
chal1 = gen_challenge(app.db)
flag1 = gen_flag(app.db, chal=chal1.id, flag='flag')
chal1_id = chal1.id
chal2 = gen_challenge(app.db)
flag2 = gen_flag(app.db, chal=chal2.id, flag='flag')
chal2_id = chal2.id
with freeze_time("2017-10-3 03:21:34"):
with client1.session_transaction() as sess:
data = {
"key": 'flag',
"nonce": sess.get('nonce')
}
r = client1.post('/chal/{}'.format(chal1_id), data=data)
scores = get_scores(admin)
assert scores[0]['team'] == 'user1'
with freeze_time("2017-10-4 03:30:34"):
with client2.session_transaction() as sess:
data = {
"key": 'flag',
"nonce": sess.get('nonce')
}
r = client2.post('/chal/{}'.format(chal1_id), data=data)
data = {
"key": 'flag',
"nonce": sess.get('nonce')
}
r = client2.post('/chal/{}'.format(chal2_id), data=data)
scores = get_scores(admin)
assert scores[0]['team'] == 'user2'
with freeze_time("2017-10-5 03:50:34"):
with client1.session_transaction() as sess:
data = {
"key": 'flag',
"nonce": sess.get('nonce')
}
r = client1.post('/chal/{}'.format(chal2_id), data=data)
scores = get_scores(admin)
assert scores[0]['team'] == 'user2'
destroy_ctfd(app)
def test_scoring_logic_with_zero_point_challenges():
app = create_ctfd()
with app.app_context():
admin = login_as_user(app, name="admin", password="password")
register_user(app, name="user1", email="user1@hanyang.ac.kr", password="password")
client1 = login_as_user(app, name="user1", password="password")
register_user(app, name="user2", email="user2@hanyang.ac.kr", password="password")
client2 = login_as_user(app, name="user2", password="password")
chal1 = gen_challenge(app.db)
flag1 = gen_flag(app.db, chal=chal1.id, flag='flag')
chal1_id = chal1.id
chal2 = gen_challenge(app.db)
flag2 = gen_flag(app.db, chal=chal2.id, flag='flag')
chal2_id = chal2.id
chal0 = gen_challenge(app.db, value=0)
flag0 = gen_flag(app.db, chal=chal0.id, flag='flag')
chal0_id = chal0.id
# user1 solves chal1
with freeze_time("2017-10-3 03:21:34"):
with client1.session_transaction() as sess:
data = {
"key": 'flag',
"nonce": sess.get('nonce')
}
r = client1.post('/chal/{}'.format(chal1_id), data=data)
# user1 is now on top
scores = get_scores(admin)
assert scores[0]['team'] == 'user1'
# user2 solves chal1 and chal2
with freeze_time("2017-10-4 03:30:34"):
with client2.session_transaction() as sess:
# solve chal1
data = {
"key": 'flag',
"nonce": sess.get('nonce')
}
r = client2.post('/chal/{}'.format(chal1_id), data=data)
# solve chal2
data = {
"key": 'flag',
"nonce": sess.get('nonce')
}
r = client2.post('/chal/{}'.format(chal2_id), data=data)
# user2 is now on top
scores = get_scores(admin)
assert scores[0]['team'] == 'user2'
# user1 solves chal2
with freeze_time("2017-10-5 03:50:34"):
with client1.session_transaction() as sess:
data = {
"key": 'flag',
"nonce": sess.get('nonce')
}
r = client1.post('/chal/{}'.format(chal2_id), data=data)
# user2 should still be on top because they solved chal2 first
scores = get_scores(admin)
assert scores[0]['team'] == 'user2'
# user2 solves a 0 point challenge
with freeze_time("2017-10-5 03:55:34"):
with client2.session_transaction() as sess:
data = {
"key": 'flag',
"nonce": sess.get('nonce')
}
r = client2.post('/chal/{}'.format(chal0_id), data=data)
# user2 should still be on top because 0 point challenges should not tie break
scores = get_scores(admin)
assert scores[0]['team'] == 'user2'
destroy_ctfd(app)
| true | true |
1c478ed2cb7df85c8da293ebd6985cd21b3671a5 | 3,769 | py | Python | pynventory/hosts.py | kufsa/pynventory | 708e7950c38e873b2a4b7bdc779c0533888ac811 | [
"MIT"
] | null | null | null | pynventory/hosts.py | kufsa/pynventory | 708e7950c38e873b2a4b7bdc779c0533888ac811 | [
"MIT"
] | null | null | null | pynventory/hosts.py | kufsa/pynventory | 708e7950c38e873b2a4b7bdc779c0533888ac811 | [
"MIT"
] | null | null | null | from fabric import Connection
from invoke.exceptions import UnexpectedExit
class LinuxHost:
def __init__(self, host, user):
self.connection = Connection(host, connect_timeout=1, user=user, )
self.host = host
@staticmethod
def display_name():
return 'Host'
def __str__(self):
return self.host
class GetOsRelease:
def __init__(self, parent):
try:
self.output = parent.connection.run('cat /etc/os-release | grep "PRETTY_NAME"', hide=True)
self.output = self.output.stdout.split('=')[1].replace('"', '')
except UnexpectedExit:
try:
self.output = parent.connection.run(' cat /etc/redhat-release', hide=True)
self.output = self.output.stdout
except UnexpectedExit:
self.output = "Failed to retrieve OS Release"
def __str__(self):
# some words to remove from output as they are redundant
clean_up = ['Linux', 'Server', 'release']
_out = []
for i in self.output.split():
if i not in clean_up:
_out.append(i)
return ' '.join(_out)
@staticmethod
def display_name():
return 'OS Version'
class GetHostname:
def __init__(self, parent):
self.output = parent.connection.run('hostname', hide=True).stdout
@staticmethod
def display_name():
return 'Hostname'
def __str__(self):
return self.output.strip()
class GetNtpServer:
def __init__(self, parent):
output = parent.connection.run('ntpq -pn', hide=True)
# ntpq will output error if daemon is not running
if output.stderr:
self.output = [output.stderr.strip(), ]
else:
# remove header from ntpq output
self.output = output.stdout.strip().split('\n')[2:]
def __str__(self):
# Filter out details and only return server ip
servers = []
for line in self.output:
servers.append(line.split(' ')[0])
return ', '.join(servers)
@staticmethod
def display_name():
return 'NTP Server'
class GetCpuCores:
def __init__(self, parent):
self.output = parent.connection.run('nproc', hide=True).stdout
def __str__(self):
return self.output.strip()
@staticmethod
def display_name():
return 'Core count'
class GetMemory:
def __init__(self, parent):
output = parent.connection.run('free -h', hide=True).stdout
# Split output into lines, then split the columns and take total memory value
self.memory = output.split('\n')[1].split()[1]
def __str__(self):
return self.memory
@staticmethod
def display_name():
return 'Memory'
class GetDiskSize:
def __init__(self, parent):
output = parent.connection.run('df -h -l --total', hide=True).stdout
# Split output into lines, then split the columns and take disk size
self.disk_size = output.split('\n')[-2].split()[1]
def __str__(self):
return self.disk_size
@staticmethod
def display_name():
return 'Disk size'
class GetKernelVersion:
def __init__(self, parent):
self.output = parent.connection.run('uname -r', hide=True).stdout
def __str__(self):
return self.output.strip()
@staticmethod
def display_name():
return 'Kernel version'
| 30.893443 | 106 | 0.555585 | from fabric import Connection
from invoke.exceptions import UnexpectedExit
class LinuxHost:
def __init__(self, host, user):
self.connection = Connection(host, connect_timeout=1, user=user, )
self.host = host
@staticmethod
def display_name():
return 'Host'
def __str__(self):
return self.host
class GetOsRelease:
def __init__(self, parent):
try:
self.output = parent.connection.run('cat /etc/os-release | grep "PRETTY_NAME"', hide=True)
self.output = self.output.stdout.split('=')[1].replace('"', '')
except UnexpectedExit:
try:
self.output = parent.connection.run(' cat /etc/redhat-release', hide=True)
self.output = self.output.stdout
except UnexpectedExit:
self.output = "Failed to retrieve OS Release"
def __str__(self):
# some words to remove from output as they are redundant
clean_up = ['Linux', 'Server', 'release']
_out = []
for i in self.output.split():
if i not in clean_up:
_out.append(i)
return ' '.join(_out)
@staticmethod
def display_name():
return 'OS Version'
class GetHostname:
def __init__(self, parent):
self.output = parent.connection.run('hostname', hide=True).stdout
@staticmethod
def display_name():
return 'Hostname'
def __str__(self):
return self.output.strip()
class GetNtpServer:
def __init__(self, parent):
output = parent.connection.run('ntpq -pn', hide=True)
# ntpq will output error if daemon is not running
if output.stderr:
self.output = [output.stderr.strip(), ]
else:
# remove header from ntpq output
self.output = output.stdout.strip().split('\n')[2:]
def __str__(self):
# Filter out details and only return server ip
servers = []
for line in self.output:
servers.append(line.split(' ')[0])
return ', '.join(servers)
@staticmethod
def display_name():
return 'NTP Server'
class GetCpuCores:
def __init__(self, parent):
self.output = parent.connection.run('nproc', hide=True).stdout
def __str__(self):
return self.output.strip()
@staticmethod
def display_name():
return 'Core count'
class GetMemory:
def __init__(self, parent):
output = parent.connection.run('free -h', hide=True).stdout
# Split output into lines, then split the columns and take total memory value
self.memory = output.split('\n')[1].split()[1]
def __str__(self):
return self.memory
@staticmethod
def display_name():
return 'Memory'
class GetDiskSize:
def __init__(self, parent):
output = parent.connection.run('df -h -l --total', hide=True).stdout
# Split output into lines, then split the columns and take disk size
self.disk_size = output.split('\n')[-2].split()[1]
def __str__(self):
return self.disk_size
@staticmethod
def display_name():
return 'Disk size'
class GetKernelVersion:
def __init__(self, parent):
self.output = parent.connection.run('uname -r', hide=True).stdout
def __str__(self):
return self.output.strip()
@staticmethod
def display_name():
return 'Kernel version'
| true | true |
1c478ee5315f97fa1a7ac3ba3481af09e56571ff | 786 | py | Python | setup.py | AndrewRPorter/stocki | 0793fe05735c8c803f5cb3ef2ea029a82243dbbd | [
"MIT"
] | 33 | 2018-07-11T19:22:00.000Z | 2021-01-02T13:01:10.000Z | setup.py | AndrewRPorter/stocki | 0793fe05735c8c803f5cb3ef2ea029a82243dbbd | [
"MIT"
] | 2 | 2018-07-12T12:33:46.000Z | 2018-07-16T13:07:59.000Z | setup.py | AndrewRPorter/stocki | 0793fe05735c8c803f5cb3ef2ea029a82243dbbd | [
"MIT"
] | 5 | 2018-07-11T17:22:07.000Z | 2019-03-19T08:48:08.000Z | from setuptools import setup
try:
with open("LICENSE.txt", "r") as f:
_license = f.read()
except Exception:
_license = ""
try:
with open("README.md", "r") as f:
_readme = f.read()
except Exception:
_readme = ""
install_requires = ["requests", "urwid", "pycodestyle"]
setup(
name="stocki",
version="0.2.0",
description="The CLI for fetching stock market data.",
long_description=_readme,
license=_license,
install_requires=install_requires,
packages=["stocki"],
entry_points={"console_scripts": ["stocki = stocki.stocki:main"]},
include_package_data=True,
python_requires=">=2.7",
url="https://github.com/andrewrporter/stocki",
author="AndrewRPorter",
author_email="porter.r.andrew@gmail.com",
)
| 22.457143 | 70 | 0.652672 | from setuptools import setup
try:
with open("LICENSE.txt", "r") as f:
_license = f.read()
except Exception:
_license = ""
try:
with open("README.md", "r") as f:
_readme = f.read()
except Exception:
_readme = ""
install_requires = ["requests", "urwid", "pycodestyle"]
setup(
name="stocki",
version="0.2.0",
description="The CLI for fetching stock market data.",
long_description=_readme,
license=_license,
install_requires=install_requires,
packages=["stocki"],
entry_points={"console_scripts": ["stocki = stocki.stocki:main"]},
include_package_data=True,
python_requires=">=2.7",
url="https://github.com/andrewrporter/stocki",
author="AndrewRPorter",
author_email="porter.r.andrew@gmail.com",
)
| true | true |
1c478fc5baec380c9474bb2707520c938527aa52 | 1,730 | py | Python | Puzzle5/binaryPartitioning.py | manasharma90/AoC-2020-Python | 6a979eff34136b6b74a340c40121da76e35451da | [
"Apache-2.0"
] | null | null | null | Puzzle5/binaryPartitioning.py | manasharma90/AoC-2020-Python | 6a979eff34136b6b74a340c40121da76e35451da | [
"Apache-2.0"
] | null | null | null | Puzzle5/binaryPartitioning.py | manasharma90/AoC-2020-Python | 6a979eff34136b6b74a340c40121da76e35451da | [
"Apache-2.0"
] | null | null | null |
# defining a function to execute binary partitioning of a list
# input = list; output = tuple with two lists ie. ([first half list], [second half list])
def list_half(input_list):
half = len(input_list)//2
lower_half = input_list[:half]
upper_half = input_list[half:]
return lower_half, upper_half
with open('input.txt', 'r') as f:
a = f.read()
boarding_passes = a.split('\n')
#cleaning the file by validating that the elements are 10 characters and each character is either F,B,R or L
boarding_passes_cleaned = []
for bp in boarding_passes:
if len(bp) == 10:
valid = True
for l in bp:
if l not in ['F', 'B', 'R', 'L']:
valid = False
if valid:
boarding_passes_cleaned.append(bp)
largest_sID = 0
#defining a function to decode the row number from the boarding pass code
def decode_row(bp_code):
rows = list(range(128))
for letter in bp_code:
if letter == 'F':
rows = list_half(rows)[0]
if letter == 'B':
rows = list_half(rows)[1]
return rows[0]
#defining a function to decode the column number from the boarding pass code
def decode_column(bp_code):
columns = list(range(8))
for letter in bp_code:
if letter == 'L':
columns = list_half(columns)[0]
if letter == 'R':
columns = list_half(columns)[1]
return columns[0]
# finding out the largest seat ID on the given list of boarding passes
for bp_code in boarding_passes_cleaned:
r = decode_row(bp_code)
c = decode_column(bp_code)
sID = (r * 8) + c
if sID > largest_sID:
largest_sID = sID
print(largest_sID)
| 25.441176 | 108 | 0.616185 |
def list_half(input_list):
half = len(input_list)//2
lower_half = input_list[:half]
upper_half = input_list[half:]
return lower_half, upper_half
with open('input.txt', 'r') as f:
a = f.read()
boarding_passes = a.split('\n')
boarding_passes_cleaned = []
for bp in boarding_passes:
if len(bp) == 10:
valid = True
for l in bp:
if l not in ['F', 'B', 'R', 'L']:
valid = False
if valid:
boarding_passes_cleaned.append(bp)
largest_sID = 0
def decode_row(bp_code):
rows = list(range(128))
for letter in bp_code:
if letter == 'F':
rows = list_half(rows)[0]
if letter == 'B':
rows = list_half(rows)[1]
return rows[0]
def decode_column(bp_code):
columns = list(range(8))
for letter in bp_code:
if letter == 'L':
columns = list_half(columns)[0]
if letter == 'R':
columns = list_half(columns)[1]
return columns[0]
for bp_code in boarding_passes_cleaned:
r = decode_row(bp_code)
c = decode_column(bp_code)
sID = (r * 8) + c
if sID > largest_sID:
largest_sID = sID
print(largest_sID)
| true | true |
1c4790bd2a51657327ca769fe5588e04bb77bab6 | 2,878 | py | Python | python/src/nnabla/backward_function/div2.py | chunxiaosz/nnabla | 9f4249313129d0fd23d304453830157fee96a2e5 | [
"Apache-2.0"
] | 1 | 2019-09-10T06:51:37.000Z | 2019-09-10T06:51:37.000Z | python/src/nnabla/backward_function/div2.py | langbin2014/nnabla | e94bac5bed65337010e2ac07a5937fb862ab2dd8 | [
"Apache-2.0"
] | null | null | null | python/src/nnabla/backward_function/div2.py | langbin2014/nnabla | e94bac5bed65337010e2ac07a5937fb862ab2dd8 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2017 Sony Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import nnabla as nn
import nnabla.functions as F
from .backward_function import BackwardFunction
class Div2Backward(BackwardFunction):
def name(self):
return 'Div2Backward'
def _create_forward_inputs_and_outputs(self, inputs, outputs):
# Inputs on the forward graph
inputs_fwd = []
for i in range(self._num_inputs_fwd):
need_grad = self.forward_func.inputs[i].need_grad
v = nn.Variable(inputs[i].shape, need_grad=need_grad)
v.data = inputs[i].data
v.grad = outputs[i].data
inputs_fwd += [v]
# Outputs on the forward graph
outputs_fwd = []
for i in range(self._num_outputs_fwd):
inp = inputs[self._num_inputs_fwd + i]
v = nn.Variable(inp.shape)
v.grad = inp.data
outputs_fwd += [v]
return inputs_fwd, outputs_fwd
def backward_impl(self, inputs, outputs, prop_down, accum):
# inputs: [inputs_fwd_graph] + [inputs_bwd_graph] or
# [inputs_fwd_graph] + [outputs_fwd_graph] + [inputs_bwd_graph]
# Inputs
x0 = inputs[0].data
x1 = inputs[1].data
dy = inputs[2].data
# Outputs
dx0 = outputs[0].data
dx1 = outputs[1].data
# Grads of inputs
g_x0 = inputs[0].grad
g_x1 = inputs[1].grad
g_dy = inputs[2].grad
# Grads of outputs
g_dx0 = outputs[0].grad
g_dx1 = outputs[1].grad
# Computation
x1_inv_square = F.pow_scalar(x1, -2.0)
if prop_down[0]:
if accum[0]:
g_x0 -= g_dx1 * dy * x1_inv_square
else:
g_x0.copy_from(- g_dx1 * dy * x1_inv_square)
if prop_down[1]:
if accum[1]:
g_x1 += dy * (g_dx1 * 2 * x0 *
F.pow_scalar(x1, -3.0) - g_dx0 * x1_inv_square)
else:
g_x1.copy_from(
dy * (2 * g_dx1 * x0 * F.pow_scalar(x1, -3.0) - g_dx0 * x1_inv_square))
if prop_down[2]:
if accum[2]:
g_dy += g_dx0 / x1 - g_dx1 * x0 * x1_inv_square
else:
g_dy.copy_from(g_dx0 / x1 - g_dx1 * x0 * x1_inv_square)
| 35.097561 | 91 | 0.589298 |
import numpy as np
import nnabla as nn
import nnabla.functions as F
from .backward_function import BackwardFunction
class Div2Backward(BackwardFunction):
def name(self):
return 'Div2Backward'
def _create_forward_inputs_and_outputs(self, inputs, outputs):
inputs_fwd = []
for i in range(self._num_inputs_fwd):
need_grad = self.forward_func.inputs[i].need_grad
v = nn.Variable(inputs[i].shape, need_grad=need_grad)
v.data = inputs[i].data
v.grad = outputs[i].data
inputs_fwd += [v]
outputs_fwd = []
for i in range(self._num_outputs_fwd):
inp = inputs[self._num_inputs_fwd + i]
v = nn.Variable(inp.shape)
v.grad = inp.data
outputs_fwd += [v]
return inputs_fwd, outputs_fwd
def backward_impl(self, inputs, outputs, prop_down, accum):
x0 = inputs[0].data
x1 = inputs[1].data
dy = inputs[2].data
dx0 = outputs[0].data
dx1 = outputs[1].data
g_x0 = inputs[0].grad
g_x1 = inputs[1].grad
g_dy = inputs[2].grad
g_dx0 = outputs[0].grad
g_dx1 = outputs[1].grad
x1_inv_square = F.pow_scalar(x1, -2.0)
if prop_down[0]:
if accum[0]:
g_x0 -= g_dx1 * dy * x1_inv_square
else:
g_x0.copy_from(- g_dx1 * dy * x1_inv_square)
if prop_down[1]:
if accum[1]:
g_x1 += dy * (g_dx1 * 2 * x0 *
F.pow_scalar(x1, -3.0) - g_dx0 * x1_inv_square)
else:
g_x1.copy_from(
dy * (2 * g_dx1 * x0 * F.pow_scalar(x1, -3.0) - g_dx0 * x1_inv_square))
if prop_down[2]:
if accum[2]:
g_dy += g_dx0 / x1 - g_dx1 * x0 * x1_inv_square
else:
g_dy.copy_from(g_dx0 / x1 - g_dx1 * x0 * x1_inv_square)
| true | true |
1c4791f5de8986417f5d44fefb3cdffd7192c28f | 2,568 | py | Python | python_scripts/linear_models_sol_03.py | odotreppe/scikit-learn-mooc | da97773fc9b860371e94e3c72791b0c92471b22d | [
"CC-BY-4.0"
] | 2 | 2021-09-30T11:07:28.000Z | 2021-09-30T11:07:31.000Z | python_scripts/linear_models_sol_03.py | Ravimk07/scikit-learn-mooc | c3aaf8c5a9aa4f1d749ebc1b7d5ae24619fee4bf | [
"CC-BY-4.0"
] | null | null | null | python_scripts/linear_models_sol_03.py | Ravimk07/scikit-learn-mooc | c3aaf8c5a9aa4f1d749ebc1b7d5ae24619fee4bf | [
"CC-BY-4.0"
] | null | null | null | # %% [markdown]
# # 📃 Solution for Exercise M4.03
#
# In all previous notebooks, we only used a single feature in `data`. But we
# have already shown that we could add new features to make the model more
# expressive by deriving new features, based on the original feature.
#
# The aim of this notebook is to train a linear regression algorithm on a
# dataset with more than a single feature.
#
# We will load a dataset about house prices in California.
# The dataset consists of 8 features regarding the demography and geography of
# districts in California and the aim is to predict the median house price of
# each district. We will use all 8 features to predict the target, the median
# house price.
# %% [markdown]
# ```{note}
# If you want a deeper overview regarding this dataset, you can refer to the
# Appendix - Datasets description section at the end of this MOOC.
# ```
# %%
from sklearn.datasets import fetch_california_housing
data, target = fetch_california_housing(as_frame=True, return_X_y=True)
target *= 100 # rescale the target in k$
data.head()
# %% [markdown]
# Now this is your turn to train a linear regression model on this dataset.
# You will need to:
# * create a linear regression model;
# * execute a cross-validation with 10 folds and use the mean absolute error
# (MAE) as metric. Ensure to return the fitted estimators;
# * compute mean and std of the MAE in thousands of dollars (k$);
# * show the values of the coefficients for each feature using a boxplot by
# inspecting the fitted model returned from the cross-validation. Hint: you
# use the function
# [`df.plot.box()`](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.plot.box.html)
# to plot a box plot.
# %%
from sklearn.linear_model import LinearRegression
linear_regression = LinearRegression()
# %%
from sklearn.model_selection import cross_validate
cv_results = cross_validate(linear_regression, data, target,
scoring="neg_mean_absolute_error",
return_estimator=True, cv=10, n_jobs=2)
# %%
print(f"Mean absolute error on testing set: "
f"{-cv_results['test_score'].mean():.3f} k$ +/- "
f"{cv_results['test_score'].std():.3f}")
# %%
import pandas as pd
weights = pd.DataFrame(
[est.coef_ for est in cv_results["estimator"]], columns=data.columns)
# %%
import matplotlib.pyplot as plt
color = {"whiskers": "black", "medians": "black", "caps": "black"}
weights.plot.box(color=color, vert=False)
_ = plt.title("Value of linear regression coefficients")
| 35.666667 | 112 | 0.720405 |
arn.datasets import fetch_california_housing
data, target = fetch_california_housing(as_frame=True, return_X_y=True)
target *= 100
data.head()
from sklearn.linear_model import LinearRegression
linear_regression = LinearRegression()
from sklearn.model_selection import cross_validate
cv_results = cross_validate(linear_regression, data, target,
scoring="neg_mean_absolute_error",
return_estimator=True, cv=10, n_jobs=2)
print(f"Mean absolute error on testing set: "
f"{-cv_results['test_score'].mean():.3f} k$ +/- "
f"{cv_results['test_score'].std():.3f}")
import pandas as pd
weights = pd.DataFrame(
[est.coef_ for est in cv_results["estimator"]], columns=data.columns)
import matplotlib.pyplot as plt
color = {"whiskers": "black", "medians": "black", "caps": "black"}
weights.plot.box(color=color, vert=False)
_ = plt.title("Value of linear regression coefficients")
| true | true |
1c47920152539c32902149b890e26eb84bfb3c09 | 5,674 | py | Python | novaclient/v1_1/volumes.py | citrix-openstack-build/python-novaclient | 3d73fb36e7c5e5f933560760f46ff6aec74ff093 | [
"Apache-1.1"
] | 1 | 2015-02-16T09:37:00.000Z | 2015-02-16T09:37:00.000Z | novaclient/v1_1/volumes.py | sivel/python-novaclient | 810857849ed32773c38df12785715f89d33e83af | [
"Apache-1.1"
] | null | null | null | novaclient/v1_1/volumes.py | sivel/python-novaclient | 810857849ed32773c38df12785715f89d33e83af | [
"Apache-1.1"
] | null | null | null | # Copyright 2011 Denali Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Volume interface (1.1 extension).
"""
import six
from novaclient import base
from novaclient.openstack.common.py3kcompat import urlutils
class Volume(base.Resource):
"""
A volume is an extra block level storage to the OpenStack instances.
"""
NAME_ATTR = 'display_name'
def __repr__(self):
return "<Volume: %s>" % self.id
def delete(self):
"""
Delete this volume.
"""
self.manager.delete(self)
class VolumeManager(base.ManagerWithFind):
"""
Manage :class:`Volume` resources.
"""
resource_class = Volume
def create(self, size, snapshot_id=None,
display_name=None, display_description=None,
volume_type=None, availability_zone=None,
imageRef=None):
"""
Create a volume.
:param size: Size of volume in GB
:param snapshot_id: ID of the snapshot
:param display_name: Name of the volume
:param display_description: Description of the volume
:param volume_type: Type of volume
:param availability_zone: Availability Zone for volume
:rtype: :class:`Volume`
:param imageRef: reference to an image stored in glance
"""
body = {'volume': {'size': size,
'snapshot_id': snapshot_id,
'display_name': display_name,
'display_description': display_description,
'volume_type': volume_type,
'availability_zone': availability_zone,
'imageRef': imageRef}}
return self._create('/volumes', body, 'volume')
def get(self, volume_id):
"""
Get a volume.
:param volume_id: The ID of the volume to delete.
:rtype: :class:`Volume`
"""
return self._get("/volumes/%s" % volume_id, "volume")
def list(self, detailed=True, search_opts=None):
"""
Get a list of all volumes.
:rtype: list of :class:`Volume`
"""
search_opts = search_opts or {}
qparams = dict((k, v) for (k, v) in six.iteritems(search_opts) if v)
query_string = '?%s' % urlutils.urlencode(qparams) if qparams else ''
if detailed is True:
return self._list("/volumes/detail%s" % query_string, "volumes")
else:
return self._list("/volumes%s" % query_string, "volumes")
def delete(self, volume):
"""
Delete a volume.
:param volume: The :class:`Volume` to delete.
"""
self._delete("/volumes/%s" % base.getid(volume))
def create_server_volume(self, server_id, volume_id, device):
"""
Attach a volume identified by the volume ID to the given server ID
:param server_id: The ID of the server
:param volume_id: The ID of the volume to attach.
:param device: The device name
:rtype: :class:`Volume`
"""
body = {'volumeAttachment': {'volumeId': volume_id,
'device': device}}
return self._create("/servers/%s/os-volume_attachments" % server_id,
body, "volumeAttachment")
def update_server_volume(self, server_id, attachment_id, new_volume_id):
"""
Update the volume identified by the attachment ID, that is attached to
the given server ID
:param server_id: The ID of the server
:param attachment_id: The ID of the attachment
:param new_volume_id: The ID of the new volume to attach
:rtype: :class:`Volume`
"""
body = {'volumeAttachment': {'volumeId': new_volume_id}}
return self._update("/servers/%s/os-volume_attachments/%s" %
(server_id, attachment_id,), body, "volumeAttachment")
def get_server_volume(self, server_id, attachment_id):
"""
Get the volume identified by the attachment ID, that is attached to
the given server ID
:param server_id: The ID of the server
:param attachment_id: The ID of the attachment
:rtype: :class:`Volume`
"""
return self._get("/servers/%s/os-volume_attachments/%s" % (server_id,
attachment_id,), "volumeAttachment")
def get_server_volumes(self, server_id):
"""
Get a list of all the attached volumes for the given server ID
:param server_id: The ID of the server
:rtype: list of :class:`Volume`
"""
return self._list("/servers/%s/os-volume_attachments" % server_id,
"volumeAttachments")
def delete_server_volume(self, server_id, attachment_id):
"""
Detach a volume identified by the attachment ID from the given server
:param server_id: The ID of the server
:param attachment_id: The ID of the attachment
"""
self._delete("/servers/%s/os-volume_attachments/%s" %
(server_id, attachment_id,))
| 34.180723 | 78 | 0.605217 |
import six
from novaclient import base
from novaclient.openstack.common.py3kcompat import urlutils
class Volume(base.Resource):
NAME_ATTR = 'display_name'
def __repr__(self):
return "<Volume: %s>" % self.id
def delete(self):
self.manager.delete(self)
class VolumeManager(base.ManagerWithFind):
resource_class = Volume
def create(self, size, snapshot_id=None,
display_name=None, display_description=None,
volume_type=None, availability_zone=None,
imageRef=None):
body = {'volume': {'size': size,
'snapshot_id': snapshot_id,
'display_name': display_name,
'display_description': display_description,
'volume_type': volume_type,
'availability_zone': availability_zone,
'imageRef': imageRef}}
return self._create('/volumes', body, 'volume')
def get(self, volume_id):
return self._get("/volumes/%s" % volume_id, "volume")
def list(self, detailed=True, search_opts=None):
search_opts = search_opts or {}
qparams = dict((k, v) for (k, v) in six.iteritems(search_opts) if v)
query_string = '?%s' % urlutils.urlencode(qparams) if qparams else ''
if detailed is True:
return self._list("/volumes/detail%s" % query_string, "volumes")
else:
return self._list("/volumes%s" % query_string, "volumes")
def delete(self, volume):
self._delete("/volumes/%s" % base.getid(volume))
def create_server_volume(self, server_id, volume_id, device):
body = {'volumeAttachment': {'volumeId': volume_id,
'device': device}}
return self._create("/servers/%s/os-volume_attachments" % server_id,
body, "volumeAttachment")
def update_server_volume(self, server_id, attachment_id, new_volume_id):
body = {'volumeAttachment': {'volumeId': new_volume_id}}
return self._update("/servers/%s/os-volume_attachments/%s" %
(server_id, attachment_id,), body, "volumeAttachment")
def get_server_volume(self, server_id, attachment_id):
return self._get("/servers/%s/os-volume_attachments/%s" % (server_id,
attachment_id,), "volumeAttachment")
def get_server_volumes(self, server_id):
return self._list("/servers/%s/os-volume_attachments" % server_id,
"volumeAttachments")
def delete_server_volume(self, server_id, attachment_id):
self._delete("/servers/%s/os-volume_attachments/%s" %
(server_id, attachment_id,))
| true | true |
1c47922da6f61b01101caee74d5b39091250523f | 5,778 | py | Python | deepspeech_pytorch/validation.py | RaphaelOlivier/deepspeech.pytorch | eb73ef61807ab01fad3662ad03dfea8fd44439aa | [
"MIT"
] | 1 | 2021-08-07T07:12:40.000Z | 2021-08-07T07:12:40.000Z | deepspeech_pytorch/validation.py | RaphaelOlivier/deepspeech.pytorch | eb73ef61807ab01fad3662ad03dfea8fd44439aa | [
"MIT"
] | 1 | 2019-02-07T12:52:46.000Z | 2019-02-07T12:52:46.000Z | deepspeech_pytorch/validation.py | RaphaelOlivier/deepspeech.pytorch | eb73ef61807ab01fad3662ad03dfea8fd44439aa | [
"MIT"
] | null | null | null | from abc import ABC, abstractmethod
import torch
from torch.cuda.amp import autocast
from tqdm import tqdm
from deepspeech_pytorch.decoder import Decoder, GreedyDecoder
from pytorch_lightning.metrics import Metric
import Levenshtein as Lev
class ErrorRate(Metric, ABC):
def __init__(self,
decoder: Decoder,
target_decoder: GreedyDecoder,
save_output: bool = False,
dist_sync_on_step: bool = False):
super().__init__(dist_sync_on_step=dist_sync_on_step)
self.decoder = decoder
self.target_decoder = target_decoder
self.save_output = save_output
@abstractmethod
def calculate_metric(self, transcript, reference):
raise NotImplementedError
def update(self, preds: torch.Tensor,
preds_sizes: torch.Tensor,
targets: torch.Tensor,
target_sizes: torch.Tensor):
# unflatten targets
split_targets = []
offset = 0
for size in target_sizes:
split_targets.append(targets[offset:offset + size])
offset += size
decoded_output, _ = self.decoder.decode(preds, preds_sizes)
target_strings = self.target_decoder.convert_to_strings(split_targets)
for x in range(len(target_strings)):
transcript, reference = decoded_output[x][0], target_strings[x][0]
self.calculate_metric(
transcript=transcript,
reference=reference
)
class CharErrorRate(ErrorRate):
def __init__(self,
decoder: Decoder,
target_decoder: GreedyDecoder,
save_output: bool = False,
dist_sync_on_step: bool = False):
super().__init__(
decoder=decoder,
target_decoder=target_decoder,
save_output=save_output,
dist_sync_on_step=dist_sync_on_step
)
self.decoder = decoder
self.target_decoder = target_decoder
self.save_output = save_output
self.add_state("cer", default=torch.tensor(0), dist_reduce_fx="sum")
self.add_state("n_chars", default=torch.tensor(0), dist_reduce_fx="sum")
def calculate_metric(self, transcript, reference):
cer_inst = self.cer_calc(transcript, reference)
self.cer += cer_inst
self.n_chars += len(reference.replace(' ', ''))
def compute(self):
cer = float(self.cer) / self.n_chars
return cer.item() * 100
def cer_calc(self, s1, s2):
"""
Computes the Character Error Rate, defined as the edit distance.
Arguments:
s1 (string): space-separated sentence
s2 (string): space-separated sentence
"""
s1, s2, = s1.replace(' ', ''), s2.replace(' ', '')
return Lev.distance(s1, s2)
class WordErrorRate(ErrorRate):
def __init__(self,
decoder: Decoder,
target_decoder: GreedyDecoder,
save_output: bool = False,
dist_sync_on_step: bool = False):
super().__init__(
decoder=decoder,
target_decoder=target_decoder,
save_output=save_output,
dist_sync_on_step=dist_sync_on_step
)
self.decoder = decoder
self.target_decoder = target_decoder
self.save_output = save_output
self.add_state("wer", default=torch.tensor(0), dist_reduce_fx="sum")
self.add_state("n_tokens", default=torch.tensor(0), dist_reduce_fx="sum")
def calculate_metric(self, transcript, reference):
wer_inst = self.wer_calc(transcript, reference)
self.wer += wer_inst
self.n_tokens += len(reference.split())
def compute(self):
wer = float(self.wer) / self.n_tokens
return wer.item() * 100
def wer_calc(self, s1, s2):
"""
Computes the Word Error Rate, defined as the edit distance between the
two provided sentences after tokenizing to words.
Arguments:
s1 (string): space-separated sentence
s2 (string): space-separated sentence
"""
# build mapping of words to integers
b = set(s1.split() + s2.split())
word2char = dict(zip(b, range(len(b))))
# map the words to a char array (Levenshtein packages only accepts
# strings)
w1 = [chr(word2char[w]) for w in s1.split()]
w2 = [chr(word2char[w]) for w in s2.split()]
return Lev.distance(''.join(w1), ''.join(w2))
@torch.no_grad()
def run_evaluation(test_loader,
model,
decoder: Decoder,
device: torch.device,
target_decoder: Decoder,
precision: int):
model.eval()
wer = WordErrorRate(
decoder=decoder,
target_decoder=target_decoder
)
cer = CharErrorRate(
decoder=decoder,
target_decoder=target_decoder
)
for i, (batch) in tqdm(enumerate(test_loader), total=len(test_loader)):
inputs, targets, input_percentages, target_sizes = batch
input_sizes = input_percentages.mul_(int(inputs.size(3))).int()
inputs = inputs.to(device)
with autocast(enabled=precision == 16):
out, output_sizes = model(inputs, input_sizes)
decoded_output, _ = decoder.decode(out, output_sizes)
wer.update(
preds=out,
preds_sizes=output_sizes,
targets=targets,
target_sizes=target_sizes
)
cer.update(
preds=out,
preds_sizes=output_sizes,
targets=targets,
target_sizes=target_sizes
)
return wer.compute(), cer.compute()
| 33.789474 | 81 | 0.602631 | from abc import ABC, abstractmethod
import torch
from torch.cuda.amp import autocast
from tqdm import tqdm
from deepspeech_pytorch.decoder import Decoder, GreedyDecoder
from pytorch_lightning.metrics import Metric
import Levenshtein as Lev
class ErrorRate(Metric, ABC):
def __init__(self,
decoder: Decoder,
target_decoder: GreedyDecoder,
save_output: bool = False,
dist_sync_on_step: bool = False):
super().__init__(dist_sync_on_step=dist_sync_on_step)
self.decoder = decoder
self.target_decoder = target_decoder
self.save_output = save_output
@abstractmethod
def calculate_metric(self, transcript, reference):
raise NotImplementedError
def update(self, preds: torch.Tensor,
preds_sizes: torch.Tensor,
targets: torch.Tensor,
target_sizes: torch.Tensor):
split_targets = []
offset = 0
for size in target_sizes:
split_targets.append(targets[offset:offset + size])
offset += size
decoded_output, _ = self.decoder.decode(preds, preds_sizes)
target_strings = self.target_decoder.convert_to_strings(split_targets)
for x in range(len(target_strings)):
transcript, reference = decoded_output[x][0], target_strings[x][0]
self.calculate_metric(
transcript=transcript,
reference=reference
)
class CharErrorRate(ErrorRate):
def __init__(self,
decoder: Decoder,
target_decoder: GreedyDecoder,
save_output: bool = False,
dist_sync_on_step: bool = False):
super().__init__(
decoder=decoder,
target_decoder=target_decoder,
save_output=save_output,
dist_sync_on_step=dist_sync_on_step
)
self.decoder = decoder
self.target_decoder = target_decoder
self.save_output = save_output
self.add_state("cer", default=torch.tensor(0), dist_reduce_fx="sum")
self.add_state("n_chars", default=torch.tensor(0), dist_reduce_fx="sum")
def calculate_metric(self, transcript, reference):
cer_inst = self.cer_calc(transcript, reference)
self.cer += cer_inst
self.n_chars += len(reference.replace(' ', ''))
def compute(self):
cer = float(self.cer) / self.n_chars
return cer.item() * 100
def cer_calc(self, s1, s2):
s1, s2, = s1.replace(' ', ''), s2.replace(' ', '')
return Lev.distance(s1, s2)
class WordErrorRate(ErrorRate):
def __init__(self,
decoder: Decoder,
target_decoder: GreedyDecoder,
save_output: bool = False,
dist_sync_on_step: bool = False):
super().__init__(
decoder=decoder,
target_decoder=target_decoder,
save_output=save_output,
dist_sync_on_step=dist_sync_on_step
)
self.decoder = decoder
self.target_decoder = target_decoder
self.save_output = save_output
self.add_state("wer", default=torch.tensor(0), dist_reduce_fx="sum")
self.add_state("n_tokens", default=torch.tensor(0), dist_reduce_fx="sum")
def calculate_metric(self, transcript, reference):
wer_inst = self.wer_calc(transcript, reference)
self.wer += wer_inst
self.n_tokens += len(reference.split())
def compute(self):
wer = float(self.wer) / self.n_tokens
return wer.item() * 100
def wer_calc(self, s1, s2):
b = set(s1.split() + s2.split())
word2char = dict(zip(b, range(len(b))))
w1 = [chr(word2char[w]) for w in s1.split()]
w2 = [chr(word2char[w]) for w in s2.split()]
return Lev.distance(''.join(w1), ''.join(w2))
@torch.no_grad()
def run_evaluation(test_loader,
model,
decoder: Decoder,
device: torch.device,
target_decoder: Decoder,
precision: int):
model.eval()
wer = WordErrorRate(
decoder=decoder,
target_decoder=target_decoder
)
cer = CharErrorRate(
decoder=decoder,
target_decoder=target_decoder
)
for i, (batch) in tqdm(enumerate(test_loader), total=len(test_loader)):
inputs, targets, input_percentages, target_sizes = batch
input_sizes = input_percentages.mul_(int(inputs.size(3))).int()
inputs = inputs.to(device)
with autocast(enabled=precision == 16):
out, output_sizes = model(inputs, input_sizes)
decoded_output, _ = decoder.decode(out, output_sizes)
wer.update(
preds=out,
preds_sizes=output_sizes,
targets=targets,
target_sizes=target_sizes
)
cer.update(
preds=out,
preds_sizes=output_sizes,
targets=targets,
target_sizes=target_sizes
)
return wer.compute(), cer.compute()
| true | true |
1c479507647de6ce6ea1f9c6b660694c87468544 | 4,167 | py | Python | polish/utils/host_call_fn.py | kinoute/google-research | 4a59cab927579ea9722e43252c695de5da4eb5e2 | [
"Apache-2.0"
] | 11 | 2020-01-29T07:25:04.000Z | 2022-03-05T16:01:21.000Z | polish/utils/host_call_fn.py | RubensZimbres/google-research | 562c7c6ef959cb3cb382b1b660ccc45e8f5289c4 | [
"Apache-2.0"
] | 13 | 2020-01-28T22:19:53.000Z | 2022-02-10T00:39:26.000Z | polish/utils/host_call_fn.py | RubensZimbres/google-research | 562c7c6ef959cb3cb382b1b660ccc45e8f5289c4 | [
"Apache-2.0"
] | 2 | 2020-02-27T11:09:49.000Z | 2021-08-25T07:32:15.000Z | # coding=utf-8
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""APIs for building host call function for TF estimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gin
import tensorflow as tf
from tensorflow.contrib import summary as contrib_summary
@gin.configurable
def build_host_call_fn_every_n_global_steps(
params,
names_and_tensors,
n,
summary_dir=None):
"""Wrapper to build `host_call` for `TPUEstimator`.
This function records the summaries if global_step % n == 0
Args:
params: A `tf.contrib.train.HParams` object.
names_and_tensors: List of elemens such as `("loss", loss)`. These are the
tensors' names and values.
n: Defines the frequency of recording the summaries.
Performance-wise on TPU, it is better to set n equal to
the number of iterations per loop.
In TPU, each training loop (each call to estimator.train)
consists of multiple iterations. There is a communication overhead
between host and TPU per training loop to send/receive data.
As such, it is better not to interrupt the TPU loop for saving
the summaries. You may also need to save the summaries
after multiple training loops.
summary_dir: Summary directory used to store TF summaries.
Returns:
A pair of `(host_call_fn, tensors)` for `TPUEstimatorSpec`.
"""
del params
assert summary_dir, 'Please specify a directory for summaries.'
names, tensors = zip(*names_and_tensors)
def host_call_fn(global_step, *tensors):
"""Training host call."""
global_step = global_step[0]
with contrib_summary.create_file_writer(summary_dir +
'/metrics').as_default():
with contrib_summary.record_summaries_every_n_global_steps(
n=n, global_step=global_step):
for i, tensor in enumerate(tensors):
contrib_summary.scalar(names[i], tensor[0], step=global_step)
return contrib_summary.all_summary_ops()
global_step = tf.reshape(tf.train.get_or_create_global_step(), [1])
tensors = [
tf.expand_dims(tf.cast(t, dtype=tf.float32), axis=0) for t in tensors
]
return (host_call_fn, [global_step] + tensors)
@gin.configurable
def build_host_call_fn(
params,
names_and_tensors,
summary_dir=None):
"""Wrapper to build `host_call` for `TPUEstimator`.
Adopted from: experimental/users/hyhieu/patch_based_unsup/utils.py
Args:
params: A `tf.contrib.train.HParams` object.
names_and_tensors: List of elemens such as `("loss", loss)`. These are the
tensors' names and values.
summary_dir: Summary directory used to store TF summaries.
Returns:
A pair of `(host_call_fn, tensors)` for `TPUEstimatorSpec`.
"""
del params
assert summary_dir, 'Please specify a directory for summaries.'
names, tensors = zip(*names_and_tensors)
def host_call_fn(global_step, *tensors):
"""Training host call."""
global_step = global_step[0]
with contrib_summary.create_file_writer(summary_dir +
'/metrics').as_default():
with contrib_summary.always_record_summaries():
for i, tensor in enumerate(tensors):
contrib_summary.scalar(names[i], tensor[0], step=global_step)
return contrib_summary.all_summary_ops()
global_step = tf.reshape(tf.train.get_or_create_global_step(), [1])
tensors = [
tf.expand_dims(tf.cast(t, dtype=tf.float32), axis=0) for t in tensors
]
return (host_call_fn, [global_step] + tensors)
| 36.552632 | 78 | 0.708903 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gin
import tensorflow as tf
from tensorflow.contrib import summary as contrib_summary
@gin.configurable
def build_host_call_fn_every_n_global_steps(
params,
names_and_tensors,
n,
summary_dir=None):
del params
assert summary_dir, 'Please specify a directory for summaries.'
names, tensors = zip(*names_and_tensors)
def host_call_fn(global_step, *tensors):
global_step = global_step[0]
with contrib_summary.create_file_writer(summary_dir +
'/metrics').as_default():
with contrib_summary.record_summaries_every_n_global_steps(
n=n, global_step=global_step):
for i, tensor in enumerate(tensors):
contrib_summary.scalar(names[i], tensor[0], step=global_step)
return contrib_summary.all_summary_ops()
global_step = tf.reshape(tf.train.get_or_create_global_step(), [1])
tensors = [
tf.expand_dims(tf.cast(t, dtype=tf.float32), axis=0) for t in tensors
]
return (host_call_fn, [global_step] + tensors)
@gin.configurable
def build_host_call_fn(
params,
names_and_tensors,
summary_dir=None):
del params
assert summary_dir, 'Please specify a directory for summaries.'
names, tensors = zip(*names_and_tensors)
def host_call_fn(global_step, *tensors):
global_step = global_step[0]
with contrib_summary.create_file_writer(summary_dir +
'/metrics').as_default():
with contrib_summary.always_record_summaries():
for i, tensor in enumerate(tensors):
contrib_summary.scalar(names[i], tensor[0], step=global_step)
return contrib_summary.all_summary_ops()
global_step = tf.reshape(tf.train.get_or_create_global_step(), [1])
tensors = [
tf.expand_dims(tf.cast(t, dtype=tf.float32), axis=0) for t in tensors
]
return (host_call_fn, [global_step] + tensors)
| true | true |
1c47967d6dc098c03dfcc9f615566eb99f55f87c | 78,681 | py | Python | src/transformers/modeling_tf_utils.py | holazzer/transformers | 53191d75ecca21c028077b3227f9ac47379e4690 | [
"Apache-2.0"
] | 9 | 2021-07-31T12:02:20.000Z | 2021-09-21T00:40:43.000Z | src/transformers/modeling_tf_utils.py | holazzer/transformers | 53191d75ecca21c028077b3227f9ac47379e4690 | [
"Apache-2.0"
] | null | null | null | src/transformers/modeling_tf_utils.py | holazzer/transformers | 53191d75ecca21c028077b3227f9ac47379e4690 | [
"Apache-2.0"
] | 1 | 2021-10-01T05:32:22.000Z | 2021-10-01T05:32:22.000Z | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TF general model utils."""
import functools
import inspect
import os
import re
import warnings
from typing import Dict, List, Optional, Union
import h5py
import numpy as np
import tensorflow as tf
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.saving import hdf5_format
from .configuration_utils import PretrainedConfig
from .file_utils import (
DUMMY_INPUTS,
TF2_WEIGHTS_NAME,
WEIGHTS_NAME,
ModelOutput,
PushToHubMixin,
cached_path,
copy_func,
hf_bucket_url,
is_offline_mode,
is_remote_url,
)
from .generation_tf_utils import TFGenerationMixin
from .tokenization_utils_base import BatchEncoding
from .utils import logging
logger = logging.get_logger(__name__)
tf_logger = tf.get_logger()
TFModelInputType = Union[
List[tf.Tensor], List[np.ndarray], Dict[str, tf.Tensor], Dict[str, np.ndarray], np.ndarray, tf.Tensor
]
class TFModelUtilsMixin:
"""
A few utilities for :obj:`tf.keras.Model`, to be used as a mixin.
"""
def num_parameters(self, only_trainable: bool = False) -> int:
"""
Get the number of (optionally, trainable) parameters in the model.
Args:
only_trainable (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to return only the number of trainable parameters
Returns:
:obj:`int`: The number of parameters.
"""
if only_trainable:
return int(sum(np.prod(w.shape.as_list()) for w in self.trainable_variables))
else:
return self.count_params()
def keras_serializable(cls):
"""
Decorate a Keras Layer class to support Keras serialization.
This is done by:
1. Adding a :obj:`transformers_config` dict to the Keras config dictionary in :obj:`get_config` (called by Keras at
serialization time.
2. Wrapping :obj:`__init__` to accept that :obj:`transformers_config` dict (passed by Keras at deserialization
time) and convert it to a config object for the actual layer initializer.
3. Registering the class as a custom object in Keras (if the Tensorflow version supports this), so that it does not
need to be supplied in :obj:`custom_objects` in the call to :obj:`tf.keras.models.load_model`.
Args:
cls (a :obj:`tf.keras.layers.Layers subclass`):
Typically a :obj:`TF.MainLayer` class in this project, in general must accept a :obj:`config` argument to
its initializer.
Returns:
The same class object, with modifications for Keras deserialization.
"""
initializer = cls.__init__
config_class = getattr(cls, "config_class", None)
if config_class is None:
raise AttributeError("Must set `config_class` to use @keras_serializable")
@functools.wraps(initializer)
def wrapped_init(self, *args, **kwargs):
config = args[0] if args and isinstance(args[0], PretrainedConfig) else kwargs.pop("config", None)
if isinstance(config, dict):
config = config_class.from_dict(config)
initializer(self, config, *args, **kwargs)
elif isinstance(config, PretrainedConfig):
if len(args) > 0:
initializer(self, *args, **kwargs)
else:
initializer(self, config, *args, **kwargs)
else:
raise ValueError("Must pass either `config` (PretrainedConfig) or `config` (dict)")
self._config = config
self._kwargs = kwargs
cls.__init__ = wrapped_init
if not hasattr(cls, "get_config"):
raise TypeError("Only use @keras_serializable on tf.keras.layers.Layer subclasses")
if hasattr(cls.get_config, "_is_default"):
def get_config(self):
cfg = super(cls, self).get_config()
cfg["config"] = self._config.to_dict()
cfg.update(self._kwargs)
return cfg
cls.get_config = get_config
cls._keras_serializable = True
if hasattr(tf.keras.utils, "register_keras_serializable"):
cls = tf.keras.utils.register_keras_serializable()(cls)
return cls
class TFCausalLanguageModelingLoss:
"""
Loss function suitable for causal language modeling (CLM), that is, the task of guessing the next token.
.. note::
Any label of -100 will be ignored (along with the corresponding logits) in the loss computation.
"""
def compute_loss(self, labels, logits):
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction=tf.keras.losses.Reduction.NONE
)
# make sure only labels that are not equal to -100 affect the loss
active_loss = tf.not_equal(tf.reshape(labels, (-1,)), -100)
reduced_logits = tf.boolean_mask(tf.reshape(logits, (-1, shape_list(logits)[2])), active_loss)
labels = tf.boolean_mask(tf.reshape(labels, (-1,)), active_loss)
return loss_fn(labels, reduced_logits)
class TFQuestionAnsweringLoss:
"""
Loss function suitable for question answering.
"""
def compute_loss(self, labels, logits):
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction=tf.keras.losses.Reduction.NONE
)
start_loss = loss_fn(labels["start_position"], logits[0])
end_loss = loss_fn(labels["end_position"], logits[1])
return (start_loss + end_loss) / 2.0
class TFTokenClassificationLoss:
"""
Loss function suitable for token classification.
.. note::
Any label of -100 will be ignored (along with the corresponding logits) in the loss computation.
"""
def compute_loss(self, labels, logits):
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction=tf.keras.losses.Reduction.NONE
)
# make sure only labels that are not equal to -100
# are taken into account as loss
if tf.math.reduce_any(labels == -1):
warnings.warn("Using `-1` to mask the loss for the token is deprecated. Please use `-100` instead.")
active_loss = tf.reshape(labels, (-1,)) != -1
else:
active_loss = tf.reshape(labels, (-1,)) != -100
reduced_logits = tf.boolean_mask(tf.reshape(logits, (-1, shape_list(logits)[2])), active_loss)
labels = tf.boolean_mask(tf.reshape(labels, (-1,)), active_loss)
return loss_fn(labels, reduced_logits)
class TFSequenceClassificationLoss:
"""
Loss function suitable for sequence classification.
"""
def compute_loss(self, labels, logits):
if len(shape_list(logits)) == 1 or shape_list(logits)[1] == 1:
loss_fn = tf.keras.losses.MeanSquaredError(reduction=tf.keras.losses.Reduction.NONE)
else:
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction=tf.keras.losses.Reduction.NONE
)
return loss_fn(labels, logits)
class TFMultipleChoiceLoss:
"""Loss function suitable for multiple choice tasks."""
def compute_loss(self, labels, logits):
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction=tf.keras.losses.Reduction.NONE
)
return loss_fn(labels, logits)
class TFMaskedLanguageModelingLoss(TFCausalLanguageModelingLoss):
"""
Loss function suitable for masked language modeling (MLM), that is, the task of guessing the masked tokens.
.. note::
Any label of -100 will be ignored (along with the corresponding logits) in the loss computation.
"""
class TFNextSentencePredictionLoss:
"""
Loss function suitable for next sentence prediction (NSP), that is, the task of guessing the next sentence.
.. note::
Any label of -100 will be ignored (along with the corresponding logits) in the loss computation.
"""
def compute_loss(self, labels, logits):
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction=tf.keras.losses.Reduction.NONE
)
# make sure only labels that are not equal to -100
# are taken into account as loss
next_sentence_active_loss = tf.not_equal(tf.reshape(labels, (-1,)), -100)
next_sentence_reduced_logits = tf.boolean_mask(tf.reshape(logits, (-1, 2)), next_sentence_active_loss)
next_sentence_label = tf.boolean_mask(tf.reshape(labels, (-1,)), next_sentence_active_loss)
return loss_fn(next_sentence_label, next_sentence_reduced_logits)
def booleans_processing(config, **kwargs):
"""
Process the input booleans of each model in order to be sure they are compliant with the execution mode (eager or
graph)
Args:
config (:class:`~transformers.PretrainedConfig`):
The config of the running model.
**kwargs:
The boolean parameters
Returns:
A dictionary with the proper values for each boolean
"""
final_booleans = {}
if tf.executing_eagerly():
final_booleans["output_attentions"] = (
kwargs["output_attentions"] if kwargs["output_attentions"] is not None else config.output_attentions
)
final_booleans["output_hidden_states"] = (
kwargs["output_hidden_states"]
if kwargs["output_hidden_states"] is not None
else config.output_hidden_states
)
final_booleans["return_dict"] = (
kwargs["return_dict"] if kwargs["return_dict"] is not None else config.return_dict
)
if "use_cache" in kwargs:
final_booleans["use_cache"] = kwargs["use_cache"] if kwargs["use_cache"] is not None else config.use_cache
else:
if (
kwargs["output_attentions"] is not None
or kwargs["output_hidden_states"] is not None
or ("use_cache" in kwargs and kwargs["use_cache"] is not None)
):
tf_logger.warning(
"The parameters `output_attentions`, `output_hidden_states` and `use_cache` cannot be updated when calling a model."
"They have to be set to True/False in the config object (i.e.: `config=XConfig.from_pretrained('name', output_attentions=True)`)."
)
final_booleans["output_attentions"] = config.output_attentions
final_booleans["output_hidden_states"] = config.output_hidden_states
if kwargs["return_dict"] is not None:
tf_logger.warning(
"The parameter `return_dict` cannot be set in graph mode and will always be set to `True`."
)
final_booleans["return_dict"] = True
if "use_cache" in kwargs:
final_booleans["use_cache"] = config.use_cache
return final_booleans
def input_processing(func, config, input_ids, **kwargs):
"""
Process the input of each TensorFlow model including the booleans. In case of a list of symbolic inputs, each input
has to be named accordingly to the parameters name, i.e. `input_ids = tf.keras.Input(shape=(128,), dtype='int32',
name="input_ids")` otherwise the order of the tensors will not be guaranteed during the training.
Args:
func (:obj:`callable`):
The callable function of the TensorFlow model.
config (:class:`~transformers.PretrainedConfig`):
The config of the running model.
**kwargs:
The inputs of the model.
Returns:
Two lists, one for the missing layers, and another one for the unexpected layers.
"""
signature = dict(inspect.signature(func).parameters)
signature.pop("kwargs", None)
signature.pop("self", None)
parameter_names = list(signature.keys())
output = {}
allowed_types = (tf.Tensor, bool, int, ModelOutput, tuple, list, dict, np.ndarray)
if "inputs" in kwargs["kwargs_call"]:
warnings.warn(
"The `inputs` argument is deprecated and will be removed in a future version, use `input_ids` instead.",
FutureWarning,
)
output["input_ids"] = kwargs["kwargs_call"].pop("inputs")
if "decoder_cached_states" in kwargs["kwargs_call"]:
warnings.warn(
"The `decoder_cached_states` argument is deprecated and will be removed in a future version, use `past_key_values` instead.",
FutureWarning,
)
output["past_key_values"] = kwargs["kwargs_call"].pop("decoder_cached_states")
if len(kwargs["kwargs_call"]) > 0:
raise ValueError(
f"The following keyword arguments are not supported by this model: {list(kwargs['kwargs_call'].keys())}."
)
kwargs.pop("kwargs_call")
for k, v in kwargs.items():
if isinstance(v, allowed_types) or v is None:
output[k] = v
else:
raise ValueError(f"Data of type {type(v)} is not allowed only {allowed_types} is accepted for {k}.")
if isinstance(input_ids, (tuple, list)):
for i, input in enumerate(input_ids):
# EagerTensors don't allow to use the .name property so we check for a real Tensor
if type(input) == tf.Tensor:
# Tensor names have always the pattern `name:id` then we check only the
# `name` part
tensor_name = input.name.split(":")[0]
if tensor_name in parameter_names:
output[tensor_name] = input
else:
output[parameter_names[i]] = input
elif isinstance(input, allowed_types) or input is None:
output[parameter_names[i]] = input
else:
raise ValueError(
f"Data of type {type(input)} is not allowed only {allowed_types} is accepted for {parameter_names[i]}."
)
elif isinstance(input_ids, (dict, BatchEncoding)):
if "inputs" in input_ids:
warnings.warn(
"The `inputs` argument is deprecated and will be removed in a future version, use `input_ids` instead.",
FutureWarning,
)
output["input_ids"] = input_ids.pop("inputs")
if "decoder_cached_states" in input_ids:
warnings.warn(
"The `decoder_cached_states` argument is deprecated and will be removed in a future version, use `past_key_values` instead.",
FutureWarning,
)
output["past_key_values"] = input_ids.pop("decoder_cached_states")
for k, v in dict(input_ids).items():
if isinstance(v, allowed_types) or v is None:
output[k] = v
elif k not in parameter_names and "args" not in parameter_names:
logger.warning(
f"The parameter {k} does not belongs to the parameter list {parameter_names} and will be ignored."
)
continue
else:
raise ValueError(f"Data of type {type(v)} is not allowed only {allowed_types} is accepted for {k}.")
else:
if isinstance(input_ids, tf.Tensor) or input_ids is None:
output[parameter_names[0]] = input_ids
else:
raise ValueError(
f"Data of type {type(input_ids)} is not allowed only {allowed_types} is accepted for {parameter_names[0]}."
)
for name in parameter_names:
if name not in list(output.keys()) and name != "args":
output[name] = kwargs.pop(name, signature[name].default)
# When creating a SavedModel TF calls the method with LayerCall.__call__(args, **kwargs)
# So to respect the proper output we have to add this exception
if "args" in output:
if output["args"] is not None and type(output["args"]) == tf.Tensor:
tensor_name = output["args"].name.split(":")[0]
output[tensor_name] = output["args"]
else:
# `args` in this case is always the first parameter, then `input_ids`
output["input_ids"] = output["args"]
del output["args"]
if "kwargs" in output:
del output["kwargs"]
boolean_dict = {
k: v
for k, v in output.items()
if k in ["return_dict", "output_attentions", "output_hidden_states", "use_cache"]
}
output.update(
booleans_processing(
config=config,
**boolean_dict,
)
)
return output
def load_tf_weights(model, resolved_archive_file, ignore_mismatched_sizes=False, _prefix=None):
"""
Detect missing and unexpected layers and load the TF weights accordingly to their names and shapes.
Args:
model (:obj:`tf.keras.models.Model`):
The model to load the weights into.
resolved_archive_file (:obj:`str`):
The location of the H5 file.
ignore_mismatched_sizes (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to ignore weights with shapes that don't match between the checkpoint of the model.
Returns:
Three lists, one for the missing layers, another one for the unexpected layers, and a last one for the
mismatched layers.
"""
missing_layers = []
unexpected_layers = []
mismatched_layers = []
# Read the H5 file
with h5py.File(resolved_archive_file, "r") as f:
# Retrieve the name of each layer from the H5 file
saved_h5_model_layers_name = set(hdf5_format.load_attributes_from_hdf5_group(f, "layer_names"))
# Find the missing layers from the high level list of layers
missing_layers = list(set([layer.name for layer in model.layers]) - saved_h5_model_layers_name)
# Find the unexpected layers from the high level list of layers
unexpected_layers = list(saved_h5_model_layers_name - set([layer.name for layer in model.layers]))
saved_weight_names_set = set()
symbolic_weights_names = set()
weight_value_tuples = []
# Compute missing and unexpected sub layers
# Store the weights in list of tuples that looks like [(weight_object, value_of_weight),...]
for layer in model.layers:
# if layer_name from the H5 file belongs to the layers from the instantiated model
if layer.name in saved_h5_model_layers_name:
# Get the H5 layer object from its name
h5_layer_object = f[layer.name]
# Get all the weights as a list from the layer object
symbolic_weights = layer.trainable_weights + layer.non_trainable_weights
saved_weights = {}
# Create a dict from the H5 saved model that looks like {"weight_name": weight_value}
# And a set with only the names
for weight_name in hdf5_format.load_attributes_from_hdf5_group(h5_layer_object, "weight_names"):
# TF names always start with the model name so we ignore it
name = "/".join(weight_name.split("/")[1:])
if _prefix is not None:
name = _prefix + "/" + name
saved_weights[name] = np.asarray(h5_layer_object[weight_name])
# Add the updated name to the final list for computing missing/unexpected values
saved_weight_names_set.add(name)
# Loop over each weights from the instantiated model and compare with the weights from the H5 file
for symbolic_weight in symbolic_weights:
# TF names always start with the model name so we ignore it
if _prefix is not None:
delimeter = len(_prefix.split("/"))
symbolic_weight_name = "/".join(
symbolic_weight.name.split("/")[:delimeter]
+ symbolic_weight.name.split("/")[delimeter + 1 :]
)
else:
symbolic_weight_name = "/".join(symbolic_weight.name.split("/")[1:])
# here we check if the current weight is among the weights from the H5 file
# If yes, get the weight_value of the corresponding weight from the H5 file
# If not, make the value to None
saved_weight_value = saved_weights.get(symbolic_weight_name, None)
# Add the updated name to the final list for computing missing/unexpected values
symbolic_weights_names.add(symbolic_weight_name)
# If the current weight is found
if saved_weight_value is not None:
# Check if the shape of the current weight and the one from the H5 file are different
if K.int_shape(symbolic_weight) != saved_weight_value.shape:
# If yes we reshape the weight from the H5 file accordingly to the current weight
# If the two shapes are not compatible we raise an issue
try:
array = np.reshape(saved_weight_value, K.int_shape(symbolic_weight))
except ValueError as e:
if ignore_mismatched_sizes:
mismatched_layers.append(
(symbolic_weight_name, saved_weight_value.shape, K.int_shape(symbolic_weight))
)
continue
else:
raise e
else:
array = saved_weight_value
# We create the tuple that will be loaded and add it to the final list
weight_value_tuples.append((symbolic_weight, array))
# Load all the weights
K.batch_set_value(weight_value_tuples)
# Compute the missing and unexpected layers
missing_layers.extend(list(symbolic_weights_names - saved_weight_names_set))
unexpected_layers.extend(list(saved_weight_names_set - symbolic_weights_names))
return missing_layers, unexpected_layers, mismatched_layers
def init_copy_embeddings(old_embeddings, new_num_tokens):
r"""
This function aims to reduce the embeddings in case new_num_tokens < old_num_tokens or to pad with -1 in case
new_num_tokens > old_num_tokens. A mask is also computed in order to know which weight in the embeddings should be
kept or not. Example:
- if new_num_tokens=5 and old_num_tokens=4 and old_embeddings=[w1,w2,w3,w4]
- mask=[True,True,True,True,False] and current_weights=[w1,w2,w3,w4,-1]
- if new_num_tokens=4 and old_num_tokens=5 and old_embeddings=[w1,w2,w3,w4,w5]
- mask=[True,True,True,True] and current_weights=[w1,w2,w3,w4]
"""
old_num_tokens, old_embedding_dim = shape_list(old_embeddings)
size_diff = new_num_tokens - old_num_tokens
# initialize new embeddings
# Copy token embeddings from the previous ones
if tf.math.greater(size_diff, 0):
# if the new size is greater than the old one, we extend the current embeddings with a padding until getting new size
# and we create a mask to properly identify the padded values and be replaced by the values of the newly created
# embeddings
current_weights = tf.pad(
old_embeddings.value(), tf.convert_to_tensor([[0, size_diff], [0, 0]]), constant_values=-1
)
num_tokens_to_copy = min(old_num_tokens, new_num_tokens)
mask = tf.fill(tf.convert_to_tensor([num_tokens_to_copy, 1]), True)
mask = tf.pad(mask, tf.convert_to_tensor([[0, size_diff], [0, 0]]), constant_values=False)
else:
# if the new size if lower than the old one, we take the current embeddings until the new size
current_weights = tf.slice(
old_embeddings.value(),
tf.convert_to_tensor([0, 0]),
tf.convert_to_tensor([new_num_tokens, old_embedding_dim]),
)
mask = tf.fill(tf.convert_to_tensor([new_num_tokens, 1]), True)
return mask, current_weights
class TFPreTrainedModel(tf.keras.Model, TFModelUtilsMixin, TFGenerationMixin, PushToHubMixin):
r"""
Base class for all TF models.
:class:`~transformers.TFPreTrainedModel` takes care of storing the configuration of the models and handles methods
for loading, downloading and saving models as well as a few methods common to all models to:
* resize the input embeddings,
* prune heads in the self-attention heads.
Class attributes (overridden by derived classes):
- **config_class** (:class:`~transformers.PretrainedConfig`) -- A subclass of
:class:`~transformers.PretrainedConfig` to use as configuration class for this model architecture.
- **base_model_prefix** (:obj:`str`) -- A string indicating the attribute associated to the base model in
derived classes of the same architecture adding modules on top of the base model.
"""
config_class = None
base_model_prefix = ""
# a list of re pattern of tensor names to ignore from the model when loading the model weights
# (and avoid unnecessary warnings).
_keys_to_ignore_on_load_missing = None
# a list of re pattern of tensor names to ignore from the weights when loading the model weights
# (and avoid unnecessary warnings).
_keys_to_ignore_on_load_unexpected = None
_requires_load_weight_prefix = False
@property
def dummy_inputs(self) -> Dict[str, tf.Tensor]:
"""
Dummy inputs to build the network.
Returns:
:obj:`Dict[str, tf.Tensor]`: The dummy inputs.
"""
return {
"input_ids": tf.constant(DUMMY_INPUTS),
}
def __init__(self, config, *inputs, **kwargs):
super().__init__(*inputs, **kwargs)
if not isinstance(config, PretrainedConfig):
raise ValueError(
f"Parameter config in `{self.__class__.__name__}(config)` should be an instance of class "
"`PretrainedConfig`. To create a model from a pretrained model use "
f"`model = {self.__class__.__name__}.from_pretrained(PRETRAINED_MODEL_NAME)`"
)
# Save config and origin of the pretrained weights if given in model
self.config = config
self.name_or_path = config.name_or_path
@classmethod
def _from_config(cls, config, **kwargs):
"""
All context managers that the model should be initialized under go here.
"""
return cls(config, **kwargs)
@tf.function(
input_signature=[
{
"input_ids": tf.TensorSpec((None, None), tf.int32, name="input_ids"),
"attention_mask": tf.TensorSpec((None, None), tf.int32, name="attention_mask"),
"token_type_ids": tf.TensorSpec((None, None), tf.int32, name="token_type_ids"),
}
]
)
def serving(self, inputs):
"""
Method used for serving the model.
Args:
inputs (:obj:`Dict[str, tf.Tensor]`):
The input of the saved model as a dictionary of tensors.
"""
output = self.call(inputs)
return self.serving_output(output)
def serving_output(output):
"""
Prepare the output of the saved model. Each model must implement this function.
Args:
output (:obj:`~transformers.TFBaseModelOutput`):
The output returned by the model.
"""
raise NotImplementedError
def get_input_embeddings(self) -> tf.keras.layers.Layer:
"""
Returns the model's input embeddings layer.
Returns:
:obj:`tf.Variable`: The embeddings layer mapping vocabulary to hidden states.
"""
main_layer = getattr(self, self.base_model_prefix, self)
if main_layer is not self:
return main_layer.get_input_embeddings()
else:
raise NotImplementedError
def set_input_embeddings(self, value):
"""
Set model's input embeddings
Args:
value (:obj:`tf.Variable`):
The new weights mapping hidden states to vocabulary.
"""
main_layer = getattr(self, self.base_model_prefix)
if main_layer is None:
raise NotImplementedError("The model does not implements the base_model_prefix attribute.")
try:
main_layer.set_input_embeddings(value)
except AttributeError:
logger.info("Building the model")
self(self.dummy_inputs)
main_layer.set_input_embeddings(value)
def get_output_embeddings(self) -> Union[None, tf.keras.layers.Layer]:
"""
Returns the model's output embeddings
Returns:
:obj:`tf.Variable`: The new weights mapping vocabulary to hidden states.
"""
if self.get_lm_head() is not None:
lm_head = self.get_lm_head()
try:
return lm_head.get_output_embeddings()
except AttributeError:
logger.info("Building the model")
self(self.dummy_inputs)
return lm_head().get_output_embeddings()
return None # Overwrite for models with output embeddings
def set_output_embeddings(self, value):
"""
Set model's output embeddings
Args:
value (:obj:`tf.Variable`):
The new weights mapping hidden states to vocabulary.
"""
if self.get_lm_head() is not None:
lm_head = self.get_lm_head()
try:
lm_head.set_output_embeddings(value)
except AttributeError:
logger.info("Building the model")
self(self.dummy_inputs)
lm_head.set_output_embeddings(value)
def get_output_layer_with_bias(self) -> Union[None, tf.keras.layers.Layer]:
"""
Get the layer that handles a bias attribute in case the model has an LM head with weights tied to the
embeddings
Return:
:obj:`tf.keras.layers.Layer`: The layer that handles the bias, None if not an LM model.
"""
warnings.warn(
"The method get_output_layer_with_bias is deprecated. Please use `get_lm_head` instead.", FutureWarning
)
return self.get_lm_head()
def get_prefix_bias_name(self) -> Union[None, str]:
"""
Get the concatenated _prefix name of the bias from the model name to the parent layer
Return:
:obj:`str`: The _prefix name of the bias.
"""
warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning)
return None
def get_bias(self) -> Union[None, Dict[str, tf.Variable]]:
"""
Dict of bias attached to an LM head. The key represents the name of the bias attribute.
Return:
:obj:`tf.Variable`: The weights representing the bias, None if not an LM model.
"""
if self.get_lm_head() is not None:
lm_head = self.get_lm_head()
try:
return lm_head.get_bias()
except AttributeError:
self(self.dummy_inputs)
return lm_head.get_bias()
return None
def set_bias(self, value):
"""
Set all the bias in the LM head.
Args:
value (:obj:`Dict[tf.Variable]`):
All the new bias attached to an LM head.
"""
if self.get_lm_head() is not None:
lm_head = self.get_lm_head()
try:
lm_head.set_bias(value)
except AttributeError:
self(self.dummy_inputs)
lm_head.set_bias(value)
def get_lm_head(self) -> tf.keras.layers.Layer:
"""
The LM Head layer. This method must be overwritten by all the models that have a lm head.
Return:
:obj:`tf.keras.layers.Layer`: The LM head layer if the model has one, None if not.
"""
return None
def resize_token_embeddings(self, new_num_tokens=None) -> tf.Variable:
"""
Resizes input token embeddings matrix of the model if :obj:`new_num_tokens != config.vocab_size`.
Takes care of tying weights embeddings afterwards if the model class has a :obj:`tie_weights()` method.
Arguments:
new_num_tokens (:obj:`int`, `optional`):
The number of new tokens in the embedding matrix. Increasing the size will add newly initialized
vectors at the end. Reducing the size will remove vectors from the end. If not provided or :obj:`None`,
just returns a pointer to the input tokens :obj:`tf.Variable` module of the model without doing
anything.
Return:
:obj:`tf.Variable`: Pointer to the input tokens Embeddings Module of the model.
"""
if new_num_tokens is None or new_num_tokens == self.config.vocab_size:
return self._get_word_embedding_weight(self.get_input_embeddings())
model_embeds = self._resize_token_embeddings(new_num_tokens)
# Update base model and current model config
self.config.vocab_size = new_num_tokens
return model_embeds
def _get_word_embedding_weight(model, embedding_layer):
embeds = getattr(embedding_layer, "weight", None)
if embeds is not None:
return embeds
embeds = getattr(embedding_layer, "decoder", None)
if embeds is not None:
return embeds
# The reason why the attributes don't exist might be
# because the model is not built, so retry getting
# the argument after building the model
model(model.dummy_inputs)
embeds = getattr(embedding_layer, "weight", None)
if embeds is not None:
return embeds
embeds = getattr(embedding_layer, "decoder", None)
if embeds is not None:
return embeds
return None
def _resize_token_embeddings(self, new_num_tokens):
old_embeddings = self._get_word_embedding_weight(self.get_input_embeddings())
new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens)
# if word embeddings are not tied, make sure that lm head bias is resized as well
if self.get_bias() is not None:
old_lm_head_bias = self.get_bias()
new_lm_head_bias = self._get_resized_lm_head_bias(old_lm_head_bias, new_num_tokens)
self.set_bias(new_lm_head_bias)
# if word embeddings are not tied, make sure that lm head decoder is resized as well
if self.get_output_embeddings() is not None:
old_lm_head_decoder = self._get_word_embedding_weight(self.get_output_embeddings())
new_lm_head_decoder = self._get_resized_lm_head_decoder(old_lm_head_decoder, new_num_tokens)
self.set_output_embeddings(new_lm_head_decoder)
self.set_input_embeddings(new_embeddings)
return self.get_input_embeddings()
def _get_resized_lm_head_bias(self, old_lm_head_bias, new_num_tokens):
"""
Build a resized bias from the old ones. Increasing the size will add newly initialized vectors at the end.
Reducing the size will remove vectors from the end
Args:
old_lm_head_bias (:obj:`tf.Variable`):
Old lm head bias to be resized.
new_num_tokens (:obj:`int`, `optional`):
New number of tokens in the linear matrix.
Increasing the size will add newly initialized vectors at the end. Reducing the size will remove
vectors from the end. If not provided or :obj:`None`, just returns None
Return:
:obj:`tf.Variable`: Pointer to the resized bias.
"""
new_lm_head_bias = {}
for attr, weight in old_lm_head_bias.items():
first_dim, old_num_tokens = (None, shape_list(weight)[0]) if tf.rank(weight) == 1 else shape_list(weight)
size_diff = new_num_tokens - old_num_tokens
final_shape = [new_num_tokens] if first_dim is None else [first_dim, new_num_tokens]
# initialize new bias
if tf.math.greater(size_diff, 0):
padding_shape = [[0, size_diff]] if first_dim is None else [[0, 0], [0, size_diff]]
current_bias = tf.pad(weight.value(), tf.convert_to_tensor(padding_shape), constant_values=-1)
num_tokens_to_copy = min(old_num_tokens, new_num_tokens)
mask_shape = [num_tokens_to_copy] if first_dim is None else [1, num_tokens_to_copy]
bias_mask = tf.fill(tf.convert_to_tensor(mask_shape), True)
bias_mask = tf.pad(bias_mask, tf.convert_to_tensor(padding_shape), constant_values=False)
else:
slice_from = [0] if first_dim is None else [0, 0]
current_bias = tf.slice(
weight.value(), tf.convert_to_tensor(slice_from), tf.convert_to_tensor(final_shape)
)
bias_mask = tf.fill(tf.convert_to_tensor(final_shape), True)
new_bias = self.add_weight(
shape=final_shape,
initializer="zeros",
trainable=True,
name=weight.name.split(":")[0],
)
init_bias = tf.where(bias_mask, current_bias, new_bias.value())
new_bias.assign(init_bias)
new_lm_head_bias[attr] = new_bias
return new_lm_head_bias
def _get_resized_lm_head_decoder(self, old_lm_head_decoder, new_num_tokens):
"""
Build a resized decoder from the old ones. Increasing the size will add newly initialized vectors at the end.
Reducing the size will remove vectors from the end
Args:
old_lm_head_decoder (:obj:`tf.Variable`):
Old lm head decoder to be resized.
new_num_tokens (:obj:`int`, `optional`):
New number of tokens in the linear matrix.
Increasing the size will add newly initialized vectors at the end. Reducing the size will remove
vectors from the end. If not provided or :obj:`None`, just returns None
Return:
:obj:`tf.Variable`: Pointer to the resized decoder or None if the output embeddings are different from the
input ones.
"""
new_lm_head_decoder = old_lm_head_decoder
is_input_output_equals = tf.reduce_any(
self._get_word_embedding_weight(self.get_input_embeddings()) == old_lm_head_decoder
)
if old_lm_head_decoder is not None and not is_input_output_equals:
old_embedding_dim = shape_list(old_lm_head_decoder)[1]
decoder_mask, current_decoder = init_copy_embeddings(old_lm_head_decoder, new_num_tokens)
new_lm_head_decoder = self.add_weight(
shape=(new_num_tokens, old_embedding_dim),
initializer="zeros",
trainable=True,
name=old_lm_head_decoder.name.split(":")[0],
)
init_decoder = tf.where(decoder_mask, current_decoder, new_lm_head_decoder.value())
new_lm_head_decoder.assign(init_decoder)
return new_lm_head_decoder
def _get_resized_embeddings(self, old_embeddings, new_num_tokens=None) -> tf.Variable:
"""
Build a resized Embedding weights from a provided token Embedding weights. Increasing the size will add newly
initialized vectors at the end. Reducing the size will remove vectors from the end
Args:
old_embeddings (:obj:`tf.Variable`):
Old embeddings to be resized.
new_num_tokens (:obj:`int`, `optional`):
New number of tokens in the embedding matrix.
Increasing the size will add newly initialized vectors at the end. Reducing the size will remove
vectors from the end. If not provided or :obj:`None`, just returns a pointer to the input tokens
:obj:`tf.Variable`` module of the model without doing anything.
Return:
:obj:`tf.Variable`: Pointer to the resized Embedding Module or the old Embedding Module if
:obj:`new_num_tokens` is :obj:`None`
"""
old_embedding_dim = shape_list(old_embeddings)[1]
init_range = getattr(self.config, "initializer_range", 0.02)
embeddings_mask, current_embeddings = init_copy_embeddings(old_embeddings, new_num_tokens)
new_embeddings = self.add_weight(
name=old_embeddings.name.split(":")[0],
shape=[new_num_tokens, old_embedding_dim],
initializer=get_initializer(init_range),
dtype=tf.float32,
)
init_embeddings = tf.where(embeddings_mask, current_embeddings, new_embeddings.value())
new_embeddings.assign(init_embeddings)
return new_embeddings
def prune_heads(self, heads_to_prune):
"""
Prunes heads of the base model.
Arguments:
heads_to_prune (:obj:`Dict[int, List[int]]`):
Dictionary with keys being selected layer indices (:obj:`int`) and associated values being the list of
heads to prune in said layer (list of :obj:`int`). For instance {1: [0, 2], 2: [2, 3]} will prune heads
0 and 2 on layer 1 and heads 2 and 3 on layer 2.
"""
raise NotImplementedError
def save_pretrained(self, save_directory, saved_model=False, version=1, push_to_hub=False, **kwargs):
"""
Save a model and its configuration file to a directory, so that it can be re-loaded using the
:func:`~transformers.TFPreTrainedModel.from_pretrained` class method.
Arguments:
save_directory (:obj:`str`):
Directory to which to save. Will be created if it doesn't exist.
saved_model (:obj:`bool`, `optional`, defaults to :obj:`False`):
If the model has to be saved in saved model format as well or not.
version (:obj:`int`, `optional`, defaults to 1):
The version of the saved model. A saved model needs to be versioned in order to be properly loaded by
TensorFlow Serving as detailed in the official documentation
https://www.tensorflow.org/tfx/serving/serving_basic
push_to_hub (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to push your model to the Hugging Face model hub after saving it.
.. warning::
Using :obj:`push_to_hub=True` will synchronize the repository you are pushing to with
:obj:`save_directory`, which requires :obj:`save_directory` to be a local clone of the repo you are
pushing to if it's an existing folder. Pass along :obj:`temp_dir=True` to use a temporary directory
instead.
kwargs:
Additional key word arguments passed along to the
:meth:`~transformers.file_utils.PushToHubMixin.push_to_hub` method.
"""
if os.path.isfile(save_directory):
logger.error(f"Provided path ({save_directory}) should be a directory, not a file")
return
if push_to_hub:
commit_message = kwargs.pop("commit_message", None)
repo = self._create_or_get_repo(save_directory, **kwargs)
os.makedirs(save_directory, exist_ok=True)
if saved_model:
saved_model_dir = os.path.join(save_directory, "saved_model", str(version))
self.save(saved_model_dir, include_optimizer=False, signatures=self.serving)
logger.info(f"Saved model created in {saved_model_dir}")
# Save configuration file
self.config.architectures = [self.__class__.__name__[2:]]
self.config.save_pretrained(save_directory)
# If we save using the predefined names, we can load using `from_pretrained`
output_model_file = os.path.join(save_directory, TF2_WEIGHTS_NAME)
self.save_weights(output_model_file)
logger.info(f"Model weights saved in {output_model_file}")
if push_to_hub:
url = self._push_to_hub(repo, commit_message=commit_message)
logger.info(f"Model pushed to the hub in this commit: {url}")
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r"""
Instantiate a pretrained TF 2.0 model from a pre-trained model configuration.
The warning `Weights from XXX not initialized from pretrained model` means that the weights of XXX do not come
pretrained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning
task.
The warning `Weights from XXX not used in YYY` means that the layer XXX is not used by YYY, therefore those
weights are discarded.
Parameters:
pretrained_model_name_or_path (:obj:`str`, `optional`):
Can be either:
- A string, the `model id` of a pretrained model hosted inside a model repo on huggingface.co.
Valid model ids can be located at the root-level, like ``bert-base-uncased``, or namespaced under
a user or organization name, like ``dbmdz/bert-base-german-cased``.
- A path to a `directory` containing model weights saved using
:func:`~transformers.TFPreTrainedModel.save_pretrained`, e.g., ``./my_model_directory/``.
- A path or url to a `PyTorch state_dict save file` (e.g, ``./pt_model/pytorch_model.bin``). In
this case, ``from_pt`` should be set to :obj:`True` and a configuration object should be provided
as ``config`` argument. This loading path is slower than converting the PyTorch model in a
TensorFlow model using the provided conversion scripts and loading the TensorFlow model
afterwards.
- :obj:`None` if you are both providing the configuration and state dictionary (resp. with keyword
arguments ``config`` and ``state_dict``).
model_args (sequence of positional arguments, `optional`):
All remaining positional arguments will be passed to the underlying model's ``__init__`` method.
config (:obj:`Union[PretrainedConfig, str]`, `optional`):
Can be either:
- an instance of a class derived from :class:`~transformers.PretrainedConfig`,
- a string valid as input to :func:`~transformers.PretrainedConfig.from_pretrained`.
Configuration for the model to use instead of an automatically loaded configuration. Configuration can
be automatically loaded when:
- The model is a model provided by the library (loaded with the `model id` string of a pretrained
model).
- The model was saved using :func:`~transformers.TFPreTrainedModel.save_pretrained` and is reloaded
by supplying the save directory.
- The model is loaded by supplying a local directory as ``pretrained_model_name_or_path`` and a
configuration JSON file named `config.json` is found in the directory.
from_pt: (:obj:`bool`, `optional`, defaults to :obj:`False`):
Load the model weights from a PyTorch state_dict save file (see docstring of
``pretrained_model_name_or_path`` argument).
ignore_mismatched_sizes (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to raise an error if some of the weights from the checkpoint do not have the same size
as the weights of the model (if for instance, you are instantiating a model with 10 labels from a
checkpoint with 3 labels).
cache_dir (:obj:`str`, `optional`):
Path to a directory in which a downloaded pretrained model configuration should be cached if the
standard cache should not be used.
force_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to force the (re-)download of the model weights and configuration files, overriding the
cached versions if they exist.
resume_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to delete incompletely received files. Will attempt to resume the download if such a
file exists.
proxies: (:obj:`Dict[str, str], `optional`):
A dictionary of proxy servers to use by protocol or endpoint, e.g., :obj:`{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
output_loading_info(:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages.
local_files_only(:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to only look at local files (e.g., not try doanloading the model).
use_auth_token (:obj:`str` or `bool`, `optional`):
The token to use as HTTP bearer authorization for remote files. If :obj:`True`, will use the token
generated when running :obj:`transformers-cli login` (stored in :obj:`~/.huggingface`).
revision(:obj:`str`, `optional`, defaults to :obj:`"main"`):
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
git-based system for storing models and other artifacts on huggingface.co, so ``revision`` can be any
identifier allowed by git.
mirror(:obj:`str`, `optional`):
Mirror source to accelerate downloads in China. If you are from China and have an accessibility
problem, you can set this option to resolve it. Note that we do not guarantee the timeliness or safety.
Please refer to the mirror site for more information.
kwargs (remaining dictionary of keyword arguments, `optional`):
Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
:obj:`output_attentions=True`). Behaves differently depending on whether a ``config`` is provided or
automatically loaded:
- If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the
underlying model's ``__init__`` method (we assume all relevant updates to the configuration have
already been done)
- If a configuration is not provided, ``kwargs`` will be first passed to the configuration class
initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of
``kwargs`` that corresponds to a configuration attribute will be used to override said attribute
with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration
attribute will be passed to the underlying model's ``__init__`` function.
.. note::
Passing :obj:`use_auth_token=True` is required when you want to use a private model.
Examples::
>>> from transformers import BertConfig, TFBertModel
>>> # Download model and configuration from huggingface.co and cache.
>>> model = TFBertModel.from_pretrained('bert-base-uncased')
>>> # Model was saved using `save_pretrained('./test/saved_model/')` (for example purposes, not runnable).
>>> model = TFBertModel.from_pretrained('./test/saved_model/')
>>> # Update configuration during loading.
>>> model = TFBertModel.from_pretrained('bert-base-uncased', output_attentions=True)
>>> assert model.config.output_attentions == True
>>> # Loading from a Pytorch model file instead of a TensorFlow checkpoint (slower, for example purposes, not runnable).
>>> config = BertConfig.from_json_file('./pt_model/my_pt_model_config.json')
>>> model = TFBertModel.from_pretrained('./pt_model/my_pytorch_model.bin', from_pt=True, config=config)
"""
config = kwargs.pop("config", None)
cache_dir = kwargs.pop("cache_dir", None)
from_pt = kwargs.pop("from_pt", False)
ignore_mismatched_sizes = kwargs.pop("ignore_mismatched_sizes", False)
force_download = kwargs.pop("force_download", False)
resume_download = kwargs.pop("resume_download", False)
proxies = kwargs.pop("proxies", None)
output_loading_info = kwargs.pop("output_loading_info", False)
local_files_only = kwargs.pop("local_files_only", False)
use_auth_token = kwargs.pop("use_auth_token", None)
revision = kwargs.pop("revision", None)
mirror = kwargs.pop("mirror", None)
load_weight_prefix = kwargs.pop("load_weight_prefix", None)
from_pipeline = kwargs.pop("_from_pipeline", None)
from_auto_class = kwargs.pop("_from_auto", False)
user_agent = {"file_type": "model", "framework": "tensorflow", "from_auto_class": from_auto_class}
if from_pipeline is not None:
user_agent["using_pipeline"] = from_pipeline
if is_offline_mode() and not local_files_only:
logger.info("Offline mode: forcing local_files_only=True")
local_files_only = True
# Load config if we don't provide a configuration
if not isinstance(config, PretrainedConfig):
config_path = config if config is not None else pretrained_model_name_or_path
config, model_kwargs = cls.config_class.from_pretrained(
config_path,
*model_args,
cache_dir=cache_dir,
return_unused_kwargs=True,
force_download=force_download,
resume_download=resume_download,
proxies=proxies,
local_files_only=local_files_only,
use_auth_token=use_auth_token,
revision=revision,
_from_auto=from_auto_class,
_from_pipeline=from_pipeline,
**kwargs,
)
else:
model_kwargs = kwargs
# Load model
if pretrained_model_name_or_path is not None:
if os.path.isdir(pretrained_model_name_or_path):
if from_pt and os.path.isfile(os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)):
# Load from a PyTorch checkpoint in priority if from_pt
archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)
elif os.path.isfile(os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)):
# Load from a TF 2.0 checkpoint
archive_file = os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)
else:
raise EnvironmentError(
f"Error no file named {[WEIGHTS_NAME, TF2_WEIGHTS_NAME]} found in directory "
f"{pretrained_model_name_or_path} or `from_pt` set to False"
)
elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path):
archive_file = pretrained_model_name_or_path
elif os.path.isfile(pretrained_model_name_or_path + ".index"):
archive_file = pretrained_model_name_or_path + ".index"
else:
archive_file = hf_bucket_url(
pretrained_model_name_or_path,
filename=(WEIGHTS_NAME if from_pt else TF2_WEIGHTS_NAME),
revision=revision,
mirror=mirror,
)
try:
# Load from URL or cache if already cached
resolved_archive_file = cached_path(
archive_file,
cache_dir=cache_dir,
force_download=force_download,
proxies=proxies,
resume_download=resume_download,
local_files_only=local_files_only,
use_auth_token=use_auth_token,
user_agent=user_agent,
)
except EnvironmentError as err:
logger.error(err)
msg = (
f"Can't load weights for '{pretrained_model_name_or_path}'. Make sure that:\n\n"
f"- '{pretrained_model_name_or_path}' is a correct model identifier listed on 'https://huggingface.co/models'\n\n"
f"- or '{pretrained_model_name_or_path}' is the correct path to a directory containing a file named one of {TF2_WEIGHTS_NAME}, {WEIGHTS_NAME}.\n\n"
)
raise EnvironmentError(msg)
if resolved_archive_file == archive_file:
logger.info(f"loading weights file {archive_file}")
else:
logger.info(f"loading weights file {archive_file} from cache at {resolved_archive_file}")
else:
resolved_archive_file = None
config.name_or_path = pretrained_model_name_or_path
# composed models, *e.g.* TFRag, require special treatment when it comes to loading
# pre-trained weights.
if cls._requires_load_weight_prefix and model_kwargs.get("name") is not None:
model_kwargs["load_weight_prefix"] = load_weight_prefix + "/" + model_kwargs.get("name")
# Instantiate model.
model = cls(config, *model_args, **model_kwargs)
if from_pt:
from .modeling_tf_pytorch_utils import load_pytorch_checkpoint_in_tf2_model
# Load from a PyTorch checkpoint
return load_pytorch_checkpoint_in_tf2_model(model, resolved_archive_file, allow_missing_keys=True)
# we might need to extend the variable scope for composite models
if load_weight_prefix is not None:
with tf.compat.v1.variable_scope(load_weight_prefix):
model(model.dummy_inputs) # build the network with dummy inputs
else:
model(model.dummy_inputs) # build the network with dummy inputs
assert os.path.isfile(resolved_archive_file), f"Error retrieving file {resolved_archive_file}"
# 'by_name' allow us to do transfer learning by skipping/adding layers
# see https://github.com/tensorflow/tensorflow/blob/00fad90125b18b80fe054de1055770cfb8fe4ba3/tensorflow/python/keras/engine/network.py#L1339-L1357
try:
missing_keys, unexpected_keys, mismatched_keys = load_tf_weights(
model,
resolved_archive_file,
ignore_mismatched_sizes=ignore_mismatched_sizes,
_prefix=load_weight_prefix,
)
except OSError as e:
try:
with open(resolved_archive_file) as f:
if f.read().startswith("version"):
raise OSError(
"You seem to have cloned a repository without having git-lfs installed. Please install "
"git-lfs and run `git lfs install` followed by `git lfs pull` in the folder "
"you cloned."
)
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise OSError(
"Unable to load weights from h5 file. "
"If you tried to load a TF 2.0 model from a PyTorch checkpoint, please set from_pt=True. "
)
model(model.dummy_inputs) # Make sure restore ops are run
if cls._keys_to_ignore_on_load_missing is not None:
for pat in cls._keys_to_ignore_on_load_missing:
missing_keys = [k for k in missing_keys if re.search(pat, k) is None]
if cls._keys_to_ignore_on_load_unexpected is not None:
for pat in cls._keys_to_ignore_on_load_unexpected:
unexpected_keys = [k for k in unexpected_keys if re.search(pat, k) is None]
if len(unexpected_keys) > 0:
logger.warning(
f"Some layers from the model checkpoint at {pretrained_model_name_or_path} were not used when "
f"initializing {model.__class__.__name__}: {unexpected_keys}\n"
f"- This IS expected if you are initializing {model.__class__.__name__} from the checkpoint of a model trained on another task "
f"or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n"
f"- This IS NOT expected if you are initializing {model.__class__.__name__} from the checkpoint of a model that you expect "
f"to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model)."
)
else:
logger.warning(f"All model checkpoint layers were used when initializing {model.__class__.__name__}.\n")
if len(missing_keys) > 0:
logger.warning(
f"Some layers of {model.__class__.__name__} were not initialized from the model checkpoint at {pretrained_model_name_or_path} "
f"and are newly initialized: {missing_keys}\n"
f"You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference."
)
elif len(mismatched_keys) == 0:
logger.warning(
f"All the layers of {model.__class__.__name__} were initialized from the model checkpoint at {pretrained_model_name_or_path}.\n"
f"If your task is similar to the task the model of the checkpoint was trained on, "
f"you can already use {model.__class__.__name__} for predictions without further training."
)
if len(mismatched_keys) > 0:
mismatched_warning = "\n".join(
[
f"- {key}: found shape {shape1} in the checkpoint and {shape2} in the model instantiated"
for key, shape1, shape2 in mismatched_keys
]
)
logger.warning(
f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at {pretrained_model_name_or_path} "
f"and are newly initialized because the shapes did not match:\n{mismatched_warning}\n"
f"You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference."
)
if output_loading_info:
loading_info = {
"missing_keys": missing_keys,
"unexpected_keys": unexpected_keys,
"mismatched_keys": mismatched_keys,
}
return model, loading_info
return model
# To update the docstring, we need to copy the method, otherwise we change the original docstring.
TFPreTrainedModel.push_to_hub = copy_func(TFPreTrainedModel.push_to_hub)
TFPreTrainedModel.push_to_hub.__doc__ = TFPreTrainedModel.push_to_hub.__doc__.format(
object="model", object_class="TFAutoModel", object_files="model checkpoint"
)
class TFConv1D(tf.keras.layers.Layer):
"""
1D-convolutional layer as defined by Radford et al. for OpenAI GPT (and also used in GPT-2).
Basically works like a linear layer but the weights are transposed.
Args:
nf (:obj:`int`):
The number of output features.
nx (:obj:`int`):
The number of input features.
initializer_range (:obj:`float`, `optional`, defaults to 0.02):
The standard deviation to use to initialize the weights.
kwargs:
Additional keyword arguments passed along to the :obj:`__init__` of :obj:`tf.keras.layers.Layer`.
"""
def __init__(self, nf, nx, initializer_range=0.02, **kwargs):
super().__init__(**kwargs)
self.nf = nf
self.nx = nx
self.initializer_range = initializer_range
def build(self, input_shape):
self.weight = self.add_weight(
"weight", shape=[self.nx, self.nf], initializer=get_initializer(self.initializer_range)
)
self.bias = self.add_weight("bias", shape=[1, self.nf], initializer=tf.zeros_initializer())
def call(self, x):
bz, sl = shape_list(x)[:2]
x = tf.reshape(x, [-1, self.nx])
x = tf.matmul(x, self.weight) + self.bias
x = tf.reshape(x, [bz, sl, self.nf])
return x
class TFSharedEmbeddings(tf.keras.layers.Layer):
r"""
Construct shared token embeddings.
The weights of the embedding layer is usually shared with the weights of the linear decoder when doing language
modeling.
Args:
vocab_size (:obj:`int`):
The size of the vocabulary, e.g., the number of unique tokens.
hidden_size (:obj:`int`):
The size of the embedding vectors.
initializer_range (:obj:`float`, `optional`):
The standard deviation to use when initializing the weights. If no value is provided, it will default to
:math:`1/\sqrt{hidden\_size}`.
kwargs:
Additional keyword arguments passed along to the :obj:`__init__` of :obj:`tf.keras.layers.Layer`.
"""
def __init__(self, vocab_size: int, hidden_size: int, initializer_range: Optional[float] = None, **kwargs):
super().__init__(**kwargs)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.initializer_range = hidden_size ** -0.5 if initializer_range is None else initializer_range
def build(self, input_shape):
"""
Build shared token embedding layer Shared weights logic adapted from
https://github.com/tensorflow/models/blob/a009f4fb9d2fc4949e32192a944688925ef78659/official/transformer/v2/embedding_layer.py#L24
"""
self.weight = self.add_weight(
"weight", shape=[self.vocab_size, self.hidden_size], initializer=get_initializer(self.initializer_range)
)
super().build(input_shape)
def get_config(self):
config = {
"vocab_size": self.vocab_size,
"hidden_size": self.hidden_size,
"initializer_range": self.initializer_range,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, inputs: tf.Tensor, mode: str = "embedding") -> tf.Tensor:
"""
Get token embeddings of inputs or decode final hidden state.
Args:
inputs (:obj:`tf.Tensor`):
In embedding mode, should be an int64 tensor with shape :obj:`[batch_size, length]`.
In linear mode, should be a float tensor with shape :obj:`[batch_size, length, hidden_size]`.
mode (:obj:`str`, defaults to :obj:`"embedding"`):
A valid value is either :obj:`"embedding"` or :obj:`"linear"`, the first one indicates that the layer
should be used as an embedding layer, the second one that the layer should be used as a linear decoder.
Returns:
:obj:`tf.Tensor`: In embedding mode, the output is a float32 embedding tensor, with shape
:obj:`[batch_size, length, embedding_size]`.
In linear mode, the output is a float32 with shape :obj:`[batch_size, length, vocab_size]`.
Raises:
ValueError: if :obj:`mode` is not valid.
Shared weights logic is adapted from `here
<https://github.com/tensorflow/models/blob/a009f4fb9d2fc4949e32192a944688925ef78659/official/transformer/v2/embedding_layer.py#L24>`__.
"""
if mode == "embedding":
return self._embedding(inputs)
elif mode == "linear":
return self._linear(inputs)
else:
raise ValueError(f"mode {mode} is not valid.")
def _embedding(self, input_ids):
"""Applies embedding based on inputs tensor."""
return tf.gather(self.weight, input_ids)
def _linear(self, inputs):
"""
Computes logits by running inputs through a linear layer.
Args:
inputs: A float32 tensor with shape [..., hidden_size]
Returns:
float32 tensor with shape [..., vocab_size].
"""
first_dims = shape_list(inputs)[:-1]
x = tf.reshape(inputs, [-1, self.hidden_size])
logits = tf.matmul(x, self.weight, transpose_b=True)
return tf.reshape(logits, first_dims + [self.vocab_size])
class TFSequenceSummary(tf.keras.layers.Layer):
"""
Compute a single vector summary of a sequence hidden states.
Args:
config (:class:`~transformers.PretrainedConfig`):
The config used by the model. Relevant arguments in the config class of the model are (refer to the actual
config class of your model for the default values it uses):
- **summary_type** (:obj:`str`) -- The method to use to make this summary. Accepted values are:
- :obj:`"last"` -- Take the last token hidden state (like XLNet)
- :obj:`"first"` -- Take the first token hidden state (like Bert)
- :obj:`"mean"` -- Take the mean of all tokens hidden states
- :obj:`"cls_index"` -- Supply a Tensor of classification token position (GPT/GPT-2)
- :obj:`"attn"` -- Not implemented now, use multi-head attention
- **summary_use_proj** (:obj:`bool`) -- Add a projection after the vector extraction.
- **summary_proj_to_labels** (:obj:`bool`) -- If :obj:`True`, the projection outputs to
:obj:`config.num_labels` classes (otherwise to :obj:`config.hidden_size`).
- **summary_activation** (:obj:`Optional[str]`) -- Set to :obj:`"tanh"` to add a tanh activation to the
output, another string or :obj:`None` will add no activation.
- **summary_first_dropout** (:obj:`float`) -- Optional dropout probability before the projection and
activation.
- **summary_last_dropout** (:obj:`float`)-- Optional dropout probability after the projection and
activation.
initializer_range (:obj:`float`, defaults to 0.02): The standard deviation to use to initialize the weights.
kwargs:
Additional keyword arguments passed along to the :obj:`__init__` of :obj:`tf.keras.layers.Layer`.
"""
def __init__(self, config: PretrainedConfig, initializer_range: float = 0.02, **kwargs):
super().__init__(**kwargs)
self.summary_type = config.summary_type if hasattr(config, "summary_use_proj") else "last"
if self.summary_type == "attn":
# We should use a standard multi-head attention module with absolute positional embedding for that.
# Cf. https://github.com/zihangdai/xlnet/blob/master/modeling.py#L253-L276
# We can probably just use the multi-head attention module of PyTorch >=1.1.0
raise NotImplementedError
self.has_summary = hasattr(config, "summary_use_proj") and config.summary_use_proj
if self.has_summary:
if hasattr(config, "summary_proj_to_labels") and config.summary_proj_to_labels and config.num_labels > 0:
num_classes = config.num_labels
else:
num_classes = config.hidden_size
self.summary = tf.keras.layers.Dense(
num_classes, kernel_initializer=get_initializer(initializer_range), name="summary"
)
self.has_activation = hasattr(config, "summary_activation") and config.summary_activation == "tanh"
if self.has_activation:
self.activation = tf.keras.activations.tanh
self.has_first_dropout = hasattr(config, "summary_first_dropout") and config.summary_first_dropout > 0
if self.has_first_dropout:
self.first_dropout = tf.keras.layers.Dropout(config.summary_first_dropout)
self.has_last_dropout = hasattr(config, "summary_last_dropout") and config.summary_last_dropout > 0
if self.has_last_dropout:
self.last_dropout = tf.keras.layers.Dropout(config.summary_last_dropout)
def call(self, inputs, cls_index=None, training=False):
if not isinstance(inputs, (dict, tuple, list)):
hidden_states = inputs
elif isinstance(inputs, (tuple, list)):
hidden_states = inputs[0]
cls_index = inputs[1] if len(inputs) > 1 else None
assert len(inputs) <= 2, "Too many inputs."
else:
hidden_states = inputs.get("hidden_states")
cls_index = inputs.get("cls_index", None)
if self.summary_type == "last":
output = hidden_states[:, -1]
elif self.summary_type == "first":
output = hidden_states[:, 0]
elif self.summary_type == "mean":
output = tf.reduce_mean(hidden_states, axis=1)
elif self.summary_type == "cls_index":
hidden_shape = shape_list(hidden_states) # e.g. [batch, num choices, seq length, hidden dims]
if cls_index is None:
cls_index = tf.fill(
hidden_shape[:-2], hidden_shape[-2] - 1
) # A tensor full of shape [batch] or [batch, num choices] full of sequence length
cls_shape = shape_list(cls_index)
if len(cls_shape) <= len(hidden_shape) - 2:
cls_index = tf.expand_dims(cls_index, axis=-1)
# else:
# cls_index = cls_index[..., tf.newaxis]
# cls_index = cls_index.expand((-1,) * (cls_index.dim()-1) + (hidden_states.size(-1),))
# shape of cls_index: (bsz, XX, 1, hidden_size) where XX are optional leading dim of hidden_states
output = tf.gather(hidden_states, cls_index, batch_dims=len(hidden_shape) - 2)
output = tf.squeeze(
output, axis=len(hidden_shape) - 2
) # shape of output: (batch, num choices, hidden_size)
elif self.summary_type == "attn":
raise NotImplementedError
if self.has_first_dropout:
output = self.first_dropout(output, training=training)
if self.has_summary:
output = self.summary(output)
if self.has_activation:
output = self.activation(output)
if self.has_last_dropout:
output = self.last_dropout(output, training=training)
return output
def shape_list(tensor: tf.Tensor) -> List[int]:
"""
Deal with dynamic shape in tensorflow cleanly.
Args:
tensor (:obj:`tf.Tensor`): The tensor we want the shape of.
Returns:
:obj:`List[int]`: The shape of the tensor as a list.
"""
dynamic = tf.shape(tensor)
if tensor.shape == tf.TensorShape(None):
return dynamic
static = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(static)]
def get_initializer(initializer_range: float = 0.02) -> tf.initializers.TruncatedNormal:
"""
Creates a :obj:`tf.initializers.TruncatedNormal` with the given range.
Args:
initializer_range (`float`, defaults to 0.02): Standard deviation of the initializer range.
Returns:
:obj:`tf.initializers.TruncatedNormal`: The truncated normal initializer.
"""
return tf.keras.initializers.TruncatedNormal(stddev=initializer_range)
class TFWrappedEmbeddings:
"""
this class wraps a the TFSharedEmbeddingTokens layer into a python 'no-keras-layer' class to avoid problem with
weight restoring. Also it makes sure that the layer is called from the correct scope to avoid problem with
saving/storing the correct weights
"""
def __init__(self, layer, abs_scope_name=None):
self._layer = layer
self._abs_scope_name = abs_scope_name
def call(self, inputs, mode="embedding"):
if self._abs_scope_name is None:
return self._layer.call(inputs, mode)
# if an abs scope name is given to the embedding variable, call variable from absolute scope
with tf.compat.v1.variable_scope(self._abs_scope_name, auxiliary_name_scope=False) as abs_scope_name:
with tf.name_scope(abs_scope_name.original_name_scope):
return self._layer.call(inputs, mode)
def __call__(self, inputs, mode="embedding"):
if self._abs_scope_name is None:
return self._layer(inputs, mode)
# if an abs scope name is given to the embedding variable, call variable from absolute scope
with tf.compat.v1.variable_scope(self._abs_scope_name, auxiliary_name_scope=False) as abs_scope_name:
with tf.name_scope(abs_scope_name.original_name_scope):
return self._layer(inputs, mode)
| 45.192993 | 167 | 0.63202 |
import functools
import inspect
import os
import re
import warnings
from typing import Dict, List, Optional, Union
import h5py
import numpy as np
import tensorflow as tf
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.saving import hdf5_format
from .configuration_utils import PretrainedConfig
from .file_utils import (
DUMMY_INPUTS,
TF2_WEIGHTS_NAME,
WEIGHTS_NAME,
ModelOutput,
PushToHubMixin,
cached_path,
copy_func,
hf_bucket_url,
is_offline_mode,
is_remote_url,
)
from .generation_tf_utils import TFGenerationMixin
from .tokenization_utils_base import BatchEncoding
from .utils import logging
logger = logging.get_logger(__name__)
tf_logger = tf.get_logger()
TFModelInputType = Union[
List[tf.Tensor], List[np.ndarray], Dict[str, tf.Tensor], Dict[str, np.ndarray], np.ndarray, tf.Tensor
]
class TFModelUtilsMixin:
def num_parameters(self, only_trainable: bool = False) -> int:
if only_trainable:
return int(sum(np.prod(w.shape.as_list()) for w in self.trainable_variables))
else:
return self.count_params()
def keras_serializable(cls):
initializer = cls.__init__
config_class = getattr(cls, "config_class", None)
if config_class is None:
raise AttributeError("Must set `config_class` to use @keras_serializable")
@functools.wraps(initializer)
def wrapped_init(self, *args, **kwargs):
config = args[0] if args and isinstance(args[0], PretrainedConfig) else kwargs.pop("config", None)
if isinstance(config, dict):
config = config_class.from_dict(config)
initializer(self, config, *args, **kwargs)
elif isinstance(config, PretrainedConfig):
if len(args) > 0:
initializer(self, *args, **kwargs)
else:
initializer(self, config, *args, **kwargs)
else:
raise ValueError("Must pass either `config` (PretrainedConfig) or `config` (dict)")
self._config = config
self._kwargs = kwargs
cls.__init__ = wrapped_init
if not hasattr(cls, "get_config"):
raise TypeError("Only use @keras_serializable on tf.keras.layers.Layer subclasses")
if hasattr(cls.get_config, "_is_default"):
def get_config(self):
cfg = super(cls, self).get_config()
cfg["config"] = self._config.to_dict()
cfg.update(self._kwargs)
return cfg
cls.get_config = get_config
cls._keras_serializable = True
if hasattr(tf.keras.utils, "register_keras_serializable"):
cls = tf.keras.utils.register_keras_serializable()(cls)
return cls
class TFCausalLanguageModelingLoss:
def compute_loss(self, labels, logits):
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction=tf.keras.losses.Reduction.NONE
)
active_loss = tf.not_equal(tf.reshape(labels, (-1,)), -100)
reduced_logits = tf.boolean_mask(tf.reshape(logits, (-1, shape_list(logits)[2])), active_loss)
labels = tf.boolean_mask(tf.reshape(labels, (-1,)), active_loss)
return loss_fn(labels, reduced_logits)
class TFQuestionAnsweringLoss:
def compute_loss(self, labels, logits):
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction=tf.keras.losses.Reduction.NONE
)
start_loss = loss_fn(labels["start_position"], logits[0])
end_loss = loss_fn(labels["end_position"], logits[1])
return (start_loss + end_loss) / 2.0
class TFTokenClassificationLoss:
def compute_loss(self, labels, logits):
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction=tf.keras.losses.Reduction.NONE
)
if tf.math.reduce_any(labels == -1):
warnings.warn("Using `-1` to mask the loss for the token is deprecated. Please use `-100` instead.")
active_loss = tf.reshape(labels, (-1,)) != -1
else:
active_loss = tf.reshape(labels, (-1,)) != -100
reduced_logits = tf.boolean_mask(tf.reshape(logits, (-1, shape_list(logits)[2])), active_loss)
labels = tf.boolean_mask(tf.reshape(labels, (-1,)), active_loss)
return loss_fn(labels, reduced_logits)
class TFSequenceClassificationLoss:
def compute_loss(self, labels, logits):
if len(shape_list(logits)) == 1 or shape_list(logits)[1] == 1:
loss_fn = tf.keras.losses.MeanSquaredError(reduction=tf.keras.losses.Reduction.NONE)
else:
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction=tf.keras.losses.Reduction.NONE
)
return loss_fn(labels, logits)
class TFMultipleChoiceLoss:
def compute_loss(self, labels, logits):
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction=tf.keras.losses.Reduction.NONE
)
return loss_fn(labels, logits)
class TFMaskedLanguageModelingLoss(TFCausalLanguageModelingLoss):
class TFNextSentencePredictionLoss:
def compute_loss(self, labels, logits):
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction=tf.keras.losses.Reduction.NONE
)
next_sentence_active_loss = tf.not_equal(tf.reshape(labels, (-1,)), -100)
next_sentence_reduced_logits = tf.boolean_mask(tf.reshape(logits, (-1, 2)), next_sentence_active_loss)
next_sentence_label = tf.boolean_mask(tf.reshape(labels, (-1,)), next_sentence_active_loss)
return loss_fn(next_sentence_label, next_sentence_reduced_logits)
def booleans_processing(config, **kwargs):
final_booleans = {}
if tf.executing_eagerly():
final_booleans["output_attentions"] = (
kwargs["output_attentions"] if kwargs["output_attentions"] is not None else config.output_attentions
)
final_booleans["output_hidden_states"] = (
kwargs["output_hidden_states"]
if kwargs["output_hidden_states"] is not None
else config.output_hidden_states
)
final_booleans["return_dict"] = (
kwargs["return_dict"] if kwargs["return_dict"] is not None else config.return_dict
)
if "use_cache" in kwargs:
final_booleans["use_cache"] = kwargs["use_cache"] if kwargs["use_cache"] is not None else config.use_cache
else:
if (
kwargs["output_attentions"] is not None
or kwargs["output_hidden_states"] is not None
or ("use_cache" in kwargs and kwargs["use_cache"] is not None)
):
tf_logger.warning(
"The parameters `output_attentions`, `output_hidden_states` and `use_cache` cannot be updated when calling a model."
"They have to be set to True/False in the config object (i.e.: `config=XConfig.from_pretrained('name', output_attentions=True)`)."
)
final_booleans["output_attentions"] = config.output_attentions
final_booleans["output_hidden_states"] = config.output_hidden_states
if kwargs["return_dict"] is not None:
tf_logger.warning(
"The parameter `return_dict` cannot be set in graph mode and will always be set to `True`."
)
final_booleans["return_dict"] = True
if "use_cache" in kwargs:
final_booleans["use_cache"] = config.use_cache
return final_booleans
def input_processing(func, config, input_ids, **kwargs):
signature = dict(inspect.signature(func).parameters)
signature.pop("kwargs", None)
signature.pop("self", None)
parameter_names = list(signature.keys())
output = {}
allowed_types = (tf.Tensor, bool, int, ModelOutput, tuple, list, dict, np.ndarray)
if "inputs" in kwargs["kwargs_call"]:
warnings.warn(
"The `inputs` argument is deprecated and will be removed in a future version, use `input_ids` instead.",
FutureWarning,
)
output["input_ids"] = kwargs["kwargs_call"].pop("inputs")
if "decoder_cached_states" in kwargs["kwargs_call"]:
warnings.warn(
"The `decoder_cached_states` argument is deprecated and will be removed in a future version, use `past_key_values` instead.",
FutureWarning,
)
output["past_key_values"] = kwargs["kwargs_call"].pop("decoder_cached_states")
if len(kwargs["kwargs_call"]) > 0:
raise ValueError(
f"The following keyword arguments are not supported by this model: {list(kwargs['kwargs_call'].keys())}."
)
kwargs.pop("kwargs_call")
for k, v in kwargs.items():
if isinstance(v, allowed_types) or v is None:
output[k] = v
else:
raise ValueError(f"Data of type {type(v)} is not allowed only {allowed_types} is accepted for {k}.")
if isinstance(input_ids, (tuple, list)):
for i, input in enumerate(input_ids):
if type(input) == tf.Tensor:
# Tensor names have always the pattern `name:id` then we check only the
# `name` part
tensor_name = input.name.split(":")[0]
if tensor_name in parameter_names:
output[tensor_name] = input
else:
output[parameter_names[i]] = input
elif isinstance(input, allowed_types) or input is None:
output[parameter_names[i]] = input
else:
raise ValueError(
f"Data of type {type(input)} is not allowed only {allowed_types} is accepted for {parameter_names[i]}."
)
elif isinstance(input_ids, (dict, BatchEncoding)):
if "inputs" in input_ids:
warnings.warn(
"The `inputs` argument is deprecated and will be removed in a future version, use `input_ids` instead.",
FutureWarning,
)
output["input_ids"] = input_ids.pop("inputs")
if "decoder_cached_states" in input_ids:
warnings.warn(
"The `decoder_cached_states` argument is deprecated and will be removed in a future version, use `past_key_values` instead.",
FutureWarning,
)
output["past_key_values"] = input_ids.pop("decoder_cached_states")
for k, v in dict(input_ids).items():
if isinstance(v, allowed_types) or v is None:
output[k] = v
elif k not in parameter_names and "args" not in parameter_names:
logger.warning(
f"The parameter {k} does not belongs to the parameter list {parameter_names} and will be ignored."
)
continue
else:
raise ValueError(f"Data of type {type(v)} is not allowed only {allowed_types} is accepted for {k}.")
else:
if isinstance(input_ids, tf.Tensor) or input_ids is None:
output[parameter_names[0]] = input_ids
else:
raise ValueError(
f"Data of type {type(input_ids)} is not allowed only {allowed_types} is accepted for {parameter_names[0]}."
)
for name in parameter_names:
if name not in list(output.keys()) and name != "args":
output[name] = kwargs.pop(name, signature[name].default)
# When creating a SavedModel TF calls the method with LayerCall.__call__(args, **kwargs)
# So to respect the proper output we have to add this exception
if "args" in output:
if output["args"] is not None and type(output["args"]) == tf.Tensor:
tensor_name = output["args"].name.split(":")[0]
output[tensor_name] = output["args"]
else:
# `args` in this case is always the first parameter, then `input_ids`
output["input_ids"] = output["args"]
del output["args"]
if "kwargs" in output:
del output["kwargs"]
boolean_dict = {
k: v
for k, v in output.items()
if k in ["return_dict", "output_attentions", "output_hidden_states", "use_cache"]
}
output.update(
booleans_processing(
config=config,
**boolean_dict,
)
)
return output
def load_tf_weights(model, resolved_archive_file, ignore_mismatched_sizes=False, _prefix=None):
missing_layers = []
unexpected_layers = []
mismatched_layers = []
# Read the H5 file
with h5py.File(resolved_archive_file, "r") as f:
# Retrieve the name of each layer from the H5 file
saved_h5_model_layers_name = set(hdf5_format.load_attributes_from_hdf5_group(f, "layer_names"))
# Find the missing layers from the high level list of layers
missing_layers = list(set([layer.name for layer in model.layers]) - saved_h5_model_layers_name)
# Find the unexpected layers from the high level list of layers
unexpected_layers = list(saved_h5_model_layers_name - set([layer.name for layer in model.layers]))
saved_weight_names_set = set()
symbolic_weights_names = set()
weight_value_tuples = []
# Compute missing and unexpected sub layers
# Store the weights in list of tuples that looks like [(weight_object, value_of_weight),...]
for layer in model.layers:
# if layer_name from the H5 file belongs to the layers from the instantiated model
if layer.name in saved_h5_model_layers_name:
# Get the H5 layer object from its name
h5_layer_object = f[layer.name]
# Get all the weights as a list from the layer object
symbolic_weights = layer.trainable_weights + layer.non_trainable_weights
saved_weights = {}
# Create a dict from the H5 saved model that looks like {"weight_name": weight_value}
# And a set with only the names
for weight_name in hdf5_format.load_attributes_from_hdf5_group(h5_layer_object, "weight_names"):
# TF names always start with the model name so we ignore it
name = "/".join(weight_name.split("/")[1:])
if _prefix is not None:
name = _prefix + "/" + name
saved_weights[name] = np.asarray(h5_layer_object[weight_name])
# Add the updated name to the final list for computing missing/unexpected values
saved_weight_names_set.add(name)
# Loop over each weights from the instantiated model and compare with the weights from the H5 file
for symbolic_weight in symbolic_weights:
# TF names always start with the model name so we ignore it
if _prefix is not None:
delimeter = len(_prefix.split("/"))
symbolic_weight_name = "/".join(
symbolic_weight.name.split("/")[:delimeter]
+ symbolic_weight.name.split("/")[delimeter + 1 :]
)
else:
symbolic_weight_name = "/".join(symbolic_weight.name.split("/")[1:])
# here we check if the current weight is among the weights from the H5 file
# If yes, get the weight_value of the corresponding weight from the H5 file
# If not, make the value to None
saved_weight_value = saved_weights.get(symbolic_weight_name, None)
# Add the updated name to the final list for computing missing/unexpected values
symbolic_weights_names.add(symbolic_weight_name)
# If the current weight is found
if saved_weight_value is not None:
# Check if the shape of the current weight and the one from the H5 file are different
if K.int_shape(symbolic_weight) != saved_weight_value.shape:
# If yes we reshape the weight from the H5 file accordingly to the current weight
# If the two shapes are not compatible we raise an issue
try:
array = np.reshape(saved_weight_value, K.int_shape(symbolic_weight))
except ValueError as e:
if ignore_mismatched_sizes:
mismatched_layers.append(
(symbolic_weight_name, saved_weight_value.shape, K.int_shape(symbolic_weight))
)
continue
else:
raise e
else:
array = saved_weight_value
# We create the tuple that will be loaded and add it to the final list
weight_value_tuples.append((symbolic_weight, array))
# Load all the weights
K.batch_set_value(weight_value_tuples)
# Compute the missing and unexpected layers
missing_layers.extend(list(symbolic_weights_names - saved_weight_names_set))
unexpected_layers.extend(list(saved_weight_names_set - symbolic_weights_names))
return missing_layers, unexpected_layers, mismatched_layers
def init_copy_embeddings(old_embeddings, new_num_tokens):
old_num_tokens, old_embedding_dim = shape_list(old_embeddings)
size_diff = new_num_tokens - old_num_tokens
# initialize new embeddings
# Copy token embeddings from the previous ones
if tf.math.greater(size_diff, 0):
# if the new size is greater than the old one, we extend the current embeddings with a padding until getting new size
# and we create a mask to properly identify the padded values and be replaced by the values of the newly created
# embeddings
current_weights = tf.pad(
old_embeddings.value(), tf.convert_to_tensor([[0, size_diff], [0, 0]]), constant_values=-1
)
num_tokens_to_copy = min(old_num_tokens, new_num_tokens)
mask = tf.fill(tf.convert_to_tensor([num_tokens_to_copy, 1]), True)
mask = tf.pad(mask, tf.convert_to_tensor([[0, size_diff], [0, 0]]), constant_values=False)
else:
# if the new size if lower than the old one, we take the current embeddings until the new size
current_weights = tf.slice(
old_embeddings.value(),
tf.convert_to_tensor([0, 0]),
tf.convert_to_tensor([new_num_tokens, old_embedding_dim]),
)
mask = tf.fill(tf.convert_to_tensor([new_num_tokens, 1]), True)
return mask, current_weights
class TFPreTrainedModel(tf.keras.Model, TFModelUtilsMixin, TFGenerationMixin, PushToHubMixin):
config_class = None
base_model_prefix = ""
# a list of re pattern of tensor names to ignore from the model when loading the model weights
# (and avoid unnecessary warnings).
_keys_to_ignore_on_load_missing = None
# a list of re pattern of tensor names to ignore from the weights when loading the model weights
# (and avoid unnecessary warnings).
_keys_to_ignore_on_load_unexpected = None
_requires_load_weight_prefix = False
@property
def dummy_inputs(self) -> Dict[str, tf.Tensor]:
return {
"input_ids": tf.constant(DUMMY_INPUTS),
}
def __init__(self, config, *inputs, **kwargs):
super().__init__(*inputs, **kwargs)
if not isinstance(config, PretrainedConfig):
raise ValueError(
f"Parameter config in `{self.__class__.__name__}(config)` should be an instance of class "
"`PretrainedConfig`. To create a model from a pretrained model use "
f"`model = {self.__class__.__name__}.from_pretrained(PRETRAINED_MODEL_NAME)`"
)
# Save config and origin of the pretrained weights if given in model
self.config = config
self.name_or_path = config.name_or_path
@classmethod
def _from_config(cls, config, **kwargs):
return cls(config, **kwargs)
@tf.function(
input_signature=[
{
"input_ids": tf.TensorSpec((None, None), tf.int32, name="input_ids"),
"attention_mask": tf.TensorSpec((None, None), tf.int32, name="attention_mask"),
"token_type_ids": tf.TensorSpec((None, None), tf.int32, name="token_type_ids"),
}
]
)
def serving(self, inputs):
output = self.call(inputs)
return self.serving_output(output)
def serving_output(output):
raise NotImplementedError
def get_input_embeddings(self) -> tf.keras.layers.Layer:
main_layer = getattr(self, self.base_model_prefix, self)
if main_layer is not self:
return main_layer.get_input_embeddings()
else:
raise NotImplementedError
def set_input_embeddings(self, value):
main_layer = getattr(self, self.base_model_prefix)
if main_layer is None:
raise NotImplementedError("The model does not implements the base_model_prefix attribute.")
try:
main_layer.set_input_embeddings(value)
except AttributeError:
logger.info("Building the model")
self(self.dummy_inputs)
main_layer.set_input_embeddings(value)
def get_output_embeddings(self) -> Union[None, tf.keras.layers.Layer]:
if self.get_lm_head() is not None:
lm_head = self.get_lm_head()
try:
return lm_head.get_output_embeddings()
except AttributeError:
logger.info("Building the model")
self(self.dummy_inputs)
return lm_head().get_output_embeddings()
return None # Overwrite for models with output embeddings
def set_output_embeddings(self, value):
if self.get_lm_head() is not None:
lm_head = self.get_lm_head()
try:
lm_head.set_output_embeddings(value)
except AttributeError:
logger.info("Building the model")
self(self.dummy_inputs)
lm_head.set_output_embeddings(value)
def get_output_layer_with_bias(self) -> Union[None, tf.keras.layers.Layer]:
warnings.warn(
"The method get_output_layer_with_bias is deprecated. Please use `get_lm_head` instead.", FutureWarning
)
return self.get_lm_head()
def get_prefix_bias_name(self) -> Union[None, str]:
warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning)
return None
def get_bias(self) -> Union[None, Dict[str, tf.Variable]]:
if self.get_lm_head() is not None:
lm_head = self.get_lm_head()
try:
return lm_head.get_bias()
except AttributeError:
self(self.dummy_inputs)
return lm_head.get_bias()
return None
def set_bias(self, value):
if self.get_lm_head() is not None:
lm_head = self.get_lm_head()
try:
lm_head.set_bias(value)
except AttributeError:
self(self.dummy_inputs)
lm_head.set_bias(value)
def get_lm_head(self) -> tf.keras.layers.Layer:
return None
def resize_token_embeddings(self, new_num_tokens=None) -> tf.Variable:
if new_num_tokens is None or new_num_tokens == self.config.vocab_size:
return self._get_word_embedding_weight(self.get_input_embeddings())
model_embeds = self._resize_token_embeddings(new_num_tokens)
# Update base model and current model config
self.config.vocab_size = new_num_tokens
return model_embeds
def _get_word_embedding_weight(model, embedding_layer):
embeds = getattr(embedding_layer, "weight", None)
if embeds is not None:
return embeds
embeds = getattr(embedding_layer, "decoder", None)
if embeds is not None:
return embeds
# The reason why the attributes don't exist might be
model(model.dummy_inputs)
embeds = getattr(embedding_layer, "weight", None)
if embeds is not None:
return embeds
embeds = getattr(embedding_layer, "decoder", None)
if embeds is not None:
return embeds
return None
def _resize_token_embeddings(self, new_num_tokens):
old_embeddings = self._get_word_embedding_weight(self.get_input_embeddings())
new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens)
if self.get_bias() is not None:
old_lm_head_bias = self.get_bias()
new_lm_head_bias = self._get_resized_lm_head_bias(old_lm_head_bias, new_num_tokens)
self.set_bias(new_lm_head_bias)
if self.get_output_embeddings() is not None:
old_lm_head_decoder = self._get_word_embedding_weight(self.get_output_embeddings())
new_lm_head_decoder = self._get_resized_lm_head_decoder(old_lm_head_decoder, new_num_tokens)
self.set_output_embeddings(new_lm_head_decoder)
self.set_input_embeddings(new_embeddings)
return self.get_input_embeddings()
def _get_resized_lm_head_bias(self, old_lm_head_bias, new_num_tokens):
new_lm_head_bias = {}
for attr, weight in old_lm_head_bias.items():
first_dim, old_num_tokens = (None, shape_list(weight)[0]) if tf.rank(weight) == 1 else shape_list(weight)
size_diff = new_num_tokens - old_num_tokens
final_shape = [new_num_tokens] if first_dim is None else [first_dim, new_num_tokens]
if tf.math.greater(size_diff, 0):
padding_shape = [[0, size_diff]] if first_dim is None else [[0, 0], [0, size_diff]]
current_bias = tf.pad(weight.value(), tf.convert_to_tensor(padding_shape), constant_values=-1)
num_tokens_to_copy = min(old_num_tokens, new_num_tokens)
mask_shape = [num_tokens_to_copy] if first_dim is None else [1, num_tokens_to_copy]
bias_mask = tf.fill(tf.convert_to_tensor(mask_shape), True)
bias_mask = tf.pad(bias_mask, tf.convert_to_tensor(padding_shape), constant_values=False)
else:
slice_from = [0] if first_dim is None else [0, 0]
current_bias = tf.slice(
weight.value(), tf.convert_to_tensor(slice_from), tf.convert_to_tensor(final_shape)
)
bias_mask = tf.fill(tf.convert_to_tensor(final_shape), True)
new_bias = self.add_weight(
shape=final_shape,
initializer="zeros",
trainable=True,
name=weight.name.split(":")[0],
)
init_bias = tf.where(bias_mask, current_bias, new_bias.value())
new_bias.assign(init_bias)
new_lm_head_bias[attr] = new_bias
return new_lm_head_bias
def _get_resized_lm_head_decoder(self, old_lm_head_decoder, new_num_tokens):
new_lm_head_decoder = old_lm_head_decoder
is_input_output_equals = tf.reduce_any(
self._get_word_embedding_weight(self.get_input_embeddings()) == old_lm_head_decoder
)
if old_lm_head_decoder is not None and not is_input_output_equals:
old_embedding_dim = shape_list(old_lm_head_decoder)[1]
decoder_mask, current_decoder = init_copy_embeddings(old_lm_head_decoder, new_num_tokens)
new_lm_head_decoder = self.add_weight(
shape=(new_num_tokens, old_embedding_dim),
initializer="zeros",
trainable=True,
name=old_lm_head_decoder.name.split(":")[0],
)
init_decoder = tf.where(decoder_mask, current_decoder, new_lm_head_decoder.value())
new_lm_head_decoder.assign(init_decoder)
return new_lm_head_decoder
def _get_resized_embeddings(self, old_embeddings, new_num_tokens=None) -> tf.Variable:
old_embedding_dim = shape_list(old_embeddings)[1]
init_range = getattr(self.config, "initializer_range", 0.02)
embeddings_mask, current_embeddings = init_copy_embeddings(old_embeddings, new_num_tokens)
new_embeddings = self.add_weight(
name=old_embeddings.name.split(":")[0],
shape=[new_num_tokens, old_embedding_dim],
initializer=get_initializer(init_range),
dtype=tf.float32,
)
init_embeddings = tf.where(embeddings_mask, current_embeddings, new_embeddings.value())
new_embeddings.assign(init_embeddings)
return new_embeddings
def prune_heads(self, heads_to_prune):
raise NotImplementedError
def save_pretrained(self, save_directory, saved_model=False, version=1, push_to_hub=False, **kwargs):
if os.path.isfile(save_directory):
logger.error(f"Provided path ({save_directory}) should be a directory, not a file")
return
if push_to_hub:
commit_message = kwargs.pop("commit_message", None)
repo = self._create_or_get_repo(save_directory, **kwargs)
os.makedirs(save_directory, exist_ok=True)
if saved_model:
saved_model_dir = os.path.join(save_directory, "saved_model", str(version))
self.save(saved_model_dir, include_optimizer=False, signatures=self.serving)
logger.info(f"Saved model created in {saved_model_dir}")
self.config.architectures = [self.__class__.__name__[2:]]
self.config.save_pretrained(save_directory)
output_model_file = os.path.join(save_directory, TF2_WEIGHTS_NAME)
self.save_weights(output_model_file)
logger.info(f"Model weights saved in {output_model_file}")
if push_to_hub:
url = self._push_to_hub(repo, commit_message=commit_message)
logger.info(f"Model pushed to the hub in this commit: {url}")
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
config = kwargs.pop("config", None)
cache_dir = kwargs.pop("cache_dir", None)
from_pt = kwargs.pop("from_pt", False)
ignore_mismatched_sizes = kwargs.pop("ignore_mismatched_sizes", False)
force_download = kwargs.pop("force_download", False)
resume_download = kwargs.pop("resume_download", False)
proxies = kwargs.pop("proxies", None)
output_loading_info = kwargs.pop("output_loading_info", False)
local_files_only = kwargs.pop("local_files_only", False)
use_auth_token = kwargs.pop("use_auth_token", None)
revision = kwargs.pop("revision", None)
mirror = kwargs.pop("mirror", None)
load_weight_prefix = kwargs.pop("load_weight_prefix", None)
from_pipeline = kwargs.pop("_from_pipeline", None)
from_auto_class = kwargs.pop("_from_auto", False)
user_agent = {"file_type": "model", "framework": "tensorflow", "from_auto_class": from_auto_class}
if from_pipeline is not None:
user_agent["using_pipeline"] = from_pipeline
if is_offline_mode() and not local_files_only:
logger.info("Offline mode: forcing local_files_only=True")
local_files_only = True
if not isinstance(config, PretrainedConfig):
config_path = config if config is not None else pretrained_model_name_or_path
config, model_kwargs = cls.config_class.from_pretrained(
config_path,
*model_args,
cache_dir=cache_dir,
return_unused_kwargs=True,
force_download=force_download,
resume_download=resume_download,
proxies=proxies,
local_files_only=local_files_only,
use_auth_token=use_auth_token,
revision=revision,
_from_auto=from_auto_class,
_from_pipeline=from_pipeline,
**kwargs,
)
else:
model_kwargs = kwargs
# Load model
if pretrained_model_name_or_path is not None:
if os.path.isdir(pretrained_model_name_or_path):
if from_pt and os.path.isfile(os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)):
# Load from a PyTorch checkpoint in priority if from_pt
archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)
elif os.path.isfile(os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)):
# Load from a TF 2.0 checkpoint
archive_file = os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)
else:
raise EnvironmentError(
f"Error no file named {[WEIGHTS_NAME, TF2_WEIGHTS_NAME]} found in directory "
f"{pretrained_model_name_or_path} or `from_pt` set to False"
)
elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path):
archive_file = pretrained_model_name_or_path
elif os.path.isfile(pretrained_model_name_or_path + ".index"):
archive_file = pretrained_model_name_or_path + ".index"
else:
archive_file = hf_bucket_url(
pretrained_model_name_or_path,
filename=(WEIGHTS_NAME if from_pt else TF2_WEIGHTS_NAME),
revision=revision,
mirror=mirror,
)
try:
# Load from URL or cache if already cached
resolved_archive_file = cached_path(
archive_file,
cache_dir=cache_dir,
force_download=force_download,
proxies=proxies,
resume_download=resume_download,
local_files_only=local_files_only,
use_auth_token=use_auth_token,
user_agent=user_agent,
)
except EnvironmentError as err:
logger.error(err)
msg = (
f"Can't load weights for '{pretrained_model_name_or_path}'. Make sure that:\n\n"
f"- '{pretrained_model_name_or_path}' is a correct model identifier listed on 'https://huggingface.co/models'\n\n"
f"- or '{pretrained_model_name_or_path}' is the correct path to a directory containing a file named one of {TF2_WEIGHTS_NAME}, {WEIGHTS_NAME}.\n\n"
)
raise EnvironmentError(msg)
if resolved_archive_file == archive_file:
logger.info(f"loading weights file {archive_file}")
else:
logger.info(f"loading weights file {archive_file} from cache at {resolved_archive_file}")
else:
resolved_archive_file = None
config.name_or_path = pretrained_model_name_or_path
if cls._requires_load_weight_prefix and model_kwargs.get("name") is not None:
model_kwargs["load_weight_prefix"] = load_weight_prefix + "/" + model_kwargs.get("name")
model = cls(config, *model_args, **model_kwargs)
if from_pt:
from .modeling_tf_pytorch_utils import load_pytorch_checkpoint_in_tf2_model
return load_pytorch_checkpoint_in_tf2_model(model, resolved_archive_file, allow_missing_keys=True)
if load_weight_prefix is not None:
with tf.compat.v1.variable_scope(load_weight_prefix):
model(model.dummy_inputs)
else:
model(model.dummy_inputs)
assert os.path.isfile(resolved_archive_file), f"Error retrieving file {resolved_archive_file}"
:
missing_keys, unexpected_keys, mismatched_keys = load_tf_weights(
model,
resolved_archive_file,
ignore_mismatched_sizes=ignore_mismatched_sizes,
_prefix=load_weight_prefix,
)
except OSError as e:
try:
with open(resolved_archive_file) as f:
if f.read().startswith("version"):
raise OSError(
"You seem to have cloned a repository without having git-lfs installed. Please install "
"git-lfs and run `git lfs install` followed by `git lfs pull` in the folder "
"you cloned."
)
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise OSError(
"Unable to load weights from h5 file. "
"If you tried to load a TF 2.0 model from a PyTorch checkpoint, please set from_pt=True. "
)
model(model.dummy_inputs)
if cls._keys_to_ignore_on_load_missing is not None:
for pat in cls._keys_to_ignore_on_load_missing:
missing_keys = [k for k in missing_keys if re.search(pat, k) is None]
if cls._keys_to_ignore_on_load_unexpected is not None:
for pat in cls._keys_to_ignore_on_load_unexpected:
unexpected_keys = [k for k in unexpected_keys if re.search(pat, k) is None]
if len(unexpected_keys) > 0:
logger.warning(
f"Some layers from the model checkpoint at {pretrained_model_name_or_path} were not used when "
f"initializing {model.__class__.__name__}: {unexpected_keys}\n"
f"- This IS expected if you are initializing {model.__class__.__name__} from the checkpoint of a model trained on another task "
f"or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n"
f"- This IS NOT expected if you are initializing {model.__class__.__name__} from the checkpoint of a model that you expect "
f"to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model)."
)
else:
logger.warning(f"All model checkpoint layers were used when initializing {model.__class__.__name__}.\n")
if len(missing_keys) > 0:
logger.warning(
f"Some layers of {model.__class__.__name__} were not initialized from the model checkpoint at {pretrained_model_name_or_path} "
f"and are newly initialized: {missing_keys}\n"
f"You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference."
)
elif len(mismatched_keys) == 0:
logger.warning(
f"All the layers of {model.__class__.__name__} were initialized from the model checkpoint at {pretrained_model_name_or_path}.\n"
f"If your task is similar to the task the model of the checkpoint was trained on, "
f"you can already use {model.__class__.__name__} for predictions without further training."
)
if len(mismatched_keys) > 0:
mismatched_warning = "\n".join(
[
f"- {key}: found shape {shape1} in the checkpoint and {shape2} in the model instantiated"
for key, shape1, shape2 in mismatched_keys
]
)
logger.warning(
f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at {pretrained_model_name_or_path} "
f"and are newly initialized because the shapes did not match:\n{mismatched_warning}\n"
f"You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference."
)
if output_loading_info:
loading_info = {
"missing_keys": missing_keys,
"unexpected_keys": unexpected_keys,
"mismatched_keys": mismatched_keys,
}
return model, loading_info
return model
TFPreTrainedModel.push_to_hub = copy_func(TFPreTrainedModel.push_to_hub)
TFPreTrainedModel.push_to_hub.__doc__ = TFPreTrainedModel.push_to_hub.__doc__.format(
object="model", object_class="TFAutoModel", object_files="model checkpoint"
)
class TFConv1D(tf.keras.layers.Layer):
def __init__(self, nf, nx, initializer_range=0.02, **kwargs):
super().__init__(**kwargs)
self.nf = nf
self.nx = nx
self.initializer_range = initializer_range
def build(self, input_shape):
self.weight = self.add_weight(
"weight", shape=[self.nx, self.nf], initializer=get_initializer(self.initializer_range)
)
self.bias = self.add_weight("bias", shape=[1, self.nf], initializer=tf.zeros_initializer())
def call(self, x):
bz, sl = shape_list(x)[:2]
x = tf.reshape(x, [-1, self.nx])
x = tf.matmul(x, self.weight) + self.bias
x = tf.reshape(x, [bz, sl, self.nf])
return x
class TFSharedEmbeddings(tf.keras.layers.Layer):
def __init__(self, vocab_size: int, hidden_size: int, initializer_range: Optional[float] = None, **kwargs):
super().__init__(**kwargs)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.initializer_range = hidden_size ** -0.5 if initializer_range is None else initializer_range
def build(self, input_shape):
self.weight = self.add_weight(
"weight", shape=[self.vocab_size, self.hidden_size], initializer=get_initializer(self.initializer_range)
)
super().build(input_shape)
def get_config(self):
config = {
"vocab_size": self.vocab_size,
"hidden_size": self.hidden_size,
"initializer_range": self.initializer_range,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, inputs: tf.Tensor, mode: str = "embedding") -> tf.Tensor:
if mode == "embedding":
return self._embedding(inputs)
elif mode == "linear":
return self._linear(inputs)
else:
raise ValueError(f"mode {mode} is not valid.")
def _embedding(self, input_ids):
return tf.gather(self.weight, input_ids)
def _linear(self, inputs):
first_dims = shape_list(inputs)[:-1]
x = tf.reshape(inputs, [-1, self.hidden_size])
logits = tf.matmul(x, self.weight, transpose_b=True)
return tf.reshape(logits, first_dims + [self.vocab_size])
class TFSequenceSummary(tf.keras.layers.Layer):
def __init__(self, config: PretrainedConfig, initializer_range: float = 0.02, **kwargs):
super().__init__(**kwargs)
self.summary_type = config.summary_type if hasattr(config, "summary_use_proj") else "last"
if self.summary_type == "attn":
raise NotImplementedError
self.has_summary = hasattr(config, "summary_use_proj") and config.summary_use_proj
if self.has_summary:
if hasattr(config, "summary_proj_to_labels") and config.summary_proj_to_labels and config.num_labels > 0:
num_classes = config.num_labels
else:
num_classes = config.hidden_size
self.summary = tf.keras.layers.Dense(
num_classes, kernel_initializer=get_initializer(initializer_range), name="summary"
)
self.has_activation = hasattr(config, "summary_activation") and config.summary_activation == "tanh"
if self.has_activation:
self.activation = tf.keras.activations.tanh
self.has_first_dropout = hasattr(config, "summary_first_dropout") and config.summary_first_dropout > 0
if self.has_first_dropout:
self.first_dropout = tf.keras.layers.Dropout(config.summary_first_dropout)
self.has_last_dropout = hasattr(config, "summary_last_dropout") and config.summary_last_dropout > 0
if self.has_last_dropout:
self.last_dropout = tf.keras.layers.Dropout(config.summary_last_dropout)
def call(self, inputs, cls_index=None, training=False):
if not isinstance(inputs, (dict, tuple, list)):
hidden_states = inputs
elif isinstance(inputs, (tuple, list)):
hidden_states = inputs[0]
cls_index = inputs[1] if len(inputs) > 1 else None
assert len(inputs) <= 2, "Too many inputs."
else:
hidden_states = inputs.get("hidden_states")
cls_index = inputs.get("cls_index", None)
if self.summary_type == "last":
output = hidden_states[:, -1]
elif self.summary_type == "first":
output = hidden_states[:, 0]
elif self.summary_type == "mean":
output = tf.reduce_mean(hidden_states, axis=1)
elif self.summary_type == "cls_index":
hidden_shape = shape_list(hidden_states)
if cls_index is None:
cls_index = tf.fill(
hidden_shape[:-2], hidden_shape[-2] - 1
)
cls_shape = shape_list(cls_index)
if len(cls_shape) <= len(hidden_shape) - 2:
cls_index = tf.expand_dims(cls_index, axis=-1)
output = tf.gather(hidden_states, cls_index, batch_dims=len(hidden_shape) - 2)
output = tf.squeeze(
output, axis=len(hidden_shape) - 2
)
elif self.summary_type == "attn":
raise NotImplementedError
if self.has_first_dropout:
output = self.first_dropout(output, training=training)
if self.has_summary:
output = self.summary(output)
if self.has_activation:
output = self.activation(output)
if self.has_last_dropout:
output = self.last_dropout(output, training=training)
return output
def shape_list(tensor: tf.Tensor) -> List[int]:
dynamic = tf.shape(tensor)
if tensor.shape == tf.TensorShape(None):
return dynamic
static = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(static)]
def get_initializer(initializer_range: float = 0.02) -> tf.initializers.TruncatedNormal:
return tf.keras.initializers.TruncatedNormal(stddev=initializer_range)
class TFWrappedEmbeddings:
def __init__(self, layer, abs_scope_name=None):
self._layer = layer
self._abs_scope_name = abs_scope_name
def call(self, inputs, mode="embedding"):
if self._abs_scope_name is None:
return self._layer.call(inputs, mode)
with tf.compat.v1.variable_scope(self._abs_scope_name, auxiliary_name_scope=False) as abs_scope_name:
with tf.name_scope(abs_scope_name.original_name_scope):
return self._layer.call(inputs, mode)
def __call__(self, inputs, mode="embedding"):
if self._abs_scope_name is None:
return self._layer(inputs, mode)
with tf.compat.v1.variable_scope(self._abs_scope_name, auxiliary_name_scope=False) as abs_scope_name:
with tf.name_scope(abs_scope_name.original_name_scope):
return self._layer(inputs, mode)
| true | true |
1c47973c175cf48b3b9eebccc97189614023378a | 3,319 | py | Python | zerver/lib/sessions.py | DD2480-group7-2020/zulip | 9a1e18bcf383c38c35da168563a7345768c6d784 | [
"Apache-2.0"
] | 1 | 2020-03-17T14:58:50.000Z | 2020-03-17T14:58:50.000Z | zerver/lib/sessions.py | DD2480-group7-2020/zulip | 9a1e18bcf383c38c35da168563a7345768c6d784 | [
"Apache-2.0"
] | null | null | null | zerver/lib/sessions.py | DD2480-group7-2020/zulip | 9a1e18bcf383c38c35da168563a7345768c6d784 | [
"Apache-2.0"
] | null | null | null | import logging
from datetime import timedelta
from django.conf import settings
from django.contrib.auth import SESSION_KEY, get_user_model
from django.contrib.sessions.models import Session
from django.utils.timezone import now as timezone_now
from importlib import import_module
from typing import Any, List, Mapping, Optional
from zerver.models import Realm, UserProfile, get_user_profile_by_id
from zerver.lib.timestamp import datetime_to_timestamp, timestamp_to_datetime
session_engine = import_module(settings.SESSION_ENGINE)
def get_session_dict_user(session_dict: Mapping[str, int]) -> Optional[int]:
# Compare django.contrib.auth._get_user_session_key
try:
return get_user_model()._meta.pk.to_python(session_dict[SESSION_KEY])
except KeyError:
return None
def get_session_user(session: Session) -> Optional[int]:
return get_session_dict_user(session.get_decoded())
def user_sessions(user_profile: UserProfile) -> List[Session]:
return [s for s in Session.objects.all()
if get_session_user(s) == user_profile.id]
def delete_session(session: Session) -> None:
session_engine.SessionStore(session.session_key).delete() # type: ignore # import_module
def delete_user_sessions(user_profile: UserProfile) -> None:
for session in Session.objects.all():
if get_session_user(session) == user_profile.id:
delete_session(session)
def delete_realm_user_sessions(realm: Realm) -> None:
realm_user_ids = [user_profile.id for user_profile in
UserProfile.objects.filter(realm=realm)]
for session in Session.objects.filter(expire_date__gte=timezone_now()):
if get_session_user(session) in realm_user_ids:
delete_session(session)
def delete_all_user_sessions() -> None:
for session in Session.objects.all():
delete_session(session)
def delete_all_deactivated_user_sessions() -> None:
for session in Session.objects.all():
user_profile_id = get_session_user(session)
if user_profile_id is None: # nocoverage # TODO: Investigate why we lost coverage on this
continue
user_profile = get_user_profile_by_id(user_profile_id)
if not user_profile.is_active or user_profile.realm.deactivated:
logging.info("Deactivating session for deactivated user %s" % (user_profile.id,))
delete_session(session)
def set_expirable_session_var(session: Session, var_name: str, var_value: Any, expiry_seconds: int) -> None:
expire_at = datetime_to_timestamp(timezone_now() + timedelta(seconds=expiry_seconds))
session[var_name] = {'value': var_value, 'expire_at': expire_at}
def get_expirable_session_var(session: Session, var_name: str, default_value: Any=None,
delete: bool=False) -> Any:
if var_name not in session:
return default_value
try:
value, expire_at = (session[var_name]['value'], session[var_name]['expire_at'])
except (KeyError, TypeError) as e:
logging.warning("get_expirable_session_var: Variable {}: {}".format(var_name, e))
return default_value
if timestamp_to_datetime(expire_at) < timezone_now():
del session[var_name]
return default_value
if delete:
del session[var_name]
return value
| 40.975309 | 108 | 0.730642 | import logging
from datetime import timedelta
from django.conf import settings
from django.contrib.auth import SESSION_KEY, get_user_model
from django.contrib.sessions.models import Session
from django.utils.timezone import now as timezone_now
from importlib import import_module
from typing import Any, List, Mapping, Optional
from zerver.models import Realm, UserProfile, get_user_profile_by_id
from zerver.lib.timestamp import datetime_to_timestamp, timestamp_to_datetime
session_engine = import_module(settings.SESSION_ENGINE)
def get_session_dict_user(session_dict: Mapping[str, int]) -> Optional[int]:
try:
return get_user_model()._meta.pk.to_python(session_dict[SESSION_KEY])
except KeyError:
return None
def get_session_user(session: Session) -> Optional[int]:
return get_session_dict_user(session.get_decoded())
def user_sessions(user_profile: UserProfile) -> List[Session]:
return [s for s in Session.objects.all()
if get_session_user(s) == user_profile.id]
def delete_session(session: Session) -> None:
session_engine.SessionStore(session.session_key).delete() er_sessions(user_profile: UserProfile) -> None:
for session in Session.objects.all():
if get_session_user(session) == user_profile.id:
delete_session(session)
def delete_realm_user_sessions(realm: Realm) -> None:
realm_user_ids = [user_profile.id for user_profile in
UserProfile.objects.filter(realm=realm)]
for session in Session.objects.filter(expire_date__gte=timezone_now()):
if get_session_user(session) in realm_user_ids:
delete_session(session)
def delete_all_user_sessions() -> None:
for session in Session.objects.all():
delete_session(session)
def delete_all_deactivated_user_sessions() -> None:
for session in Session.objects.all():
user_profile_id = get_session_user(session)
if user_profile_id is None: _user_profile_by_id(user_profile_id)
if not user_profile.is_active or user_profile.realm.deactivated:
logging.info("Deactivating session for deactivated user %s" % (user_profile.id,))
delete_session(session)
def set_expirable_session_var(session: Session, var_name: str, var_value: Any, expiry_seconds: int) -> None:
expire_at = datetime_to_timestamp(timezone_now() + timedelta(seconds=expiry_seconds))
session[var_name] = {'value': var_value, 'expire_at': expire_at}
def get_expirable_session_var(session: Session, var_name: str, default_value: Any=None,
delete: bool=False) -> Any:
if var_name not in session:
return default_value
try:
value, expire_at = (session[var_name]['value'], session[var_name]['expire_at'])
except (KeyError, TypeError) as e:
logging.warning("get_expirable_session_var: Variable {}: {}".format(var_name, e))
return default_value
if timestamp_to_datetime(expire_at) < timezone_now():
del session[var_name]
return default_value
if delete:
del session[var_name]
return value
| true | true |
1c47973f15053c421fd0ceb6b824666a3ce5fbc4 | 50,742 | py | Python | Funções Analíticas/Virtualenv/Lib/site-packages/matplotlib/__init__.py | Leonardo-Maciel/PSO_Maciel | 3939448da45716260f3ac7811afdd13be670f346 | [
"MIT"
] | 76 | 2020-07-06T14:44:05.000Z | 2022-02-14T15:30:21.000Z | Funções Analíticas/Virtualenv/Lib/site-packages/matplotlib/__init__.py | Leonardo-Maciel/PSO_Maciel | 3939448da45716260f3ac7811afdd13be670f346 | [
"MIT"
] | 11 | 2020-08-09T02:30:14.000Z | 2022-03-12T00:50:14.000Z | Funções Analíticas/Virtualenv/Lib/site-packages/matplotlib/__init__.py | Leonardo-Maciel/PSO_Maciel | 3939448da45716260f3ac7811afdd13be670f346 | [
"MIT"
] | 11 | 2020-07-12T16:18:07.000Z | 2022-02-05T16:48:35.000Z | """
An object-oriented plotting library.
A procedural interface is provided by the companion pyplot module,
which may be imported directly, e.g.::
import matplotlib.pyplot as plt
or using ipython::
ipython
at your terminal, followed by::
In [1]: %matplotlib
In [2]: import matplotlib.pyplot as plt
at the ipython shell prompt.
For the most part, direct use of the object-oriented library is encouraged when
programming; pyplot is primarily for working interactively. The exceptions are
the pyplot functions `.pyplot.figure`, `.pyplot.subplot`, `.pyplot.subplots`,
and `.pyplot.savefig`, which can greatly simplify scripting.
Modules include:
:mod:`matplotlib.axes`
The `~.axes.Axes` class. Most pyplot functions are wrappers for
`~.axes.Axes` methods. The axes module is the highest level of OO
access to the library.
:mod:`matplotlib.figure`
The `.Figure` class.
:mod:`matplotlib.artist`
The `.Artist` base class for all classes that draw things.
:mod:`matplotlib.lines`
The `.Line2D` class for drawing lines and markers.
:mod:`matplotlib.patches`
Classes for drawing polygons.
:mod:`matplotlib.text`
The `.Text` and `.Annotation` classes.
:mod:`matplotlib.image`
The `.AxesImage` and `.FigureImage` classes.
:mod:`matplotlib.collections`
Classes for efficient drawing of groups of lines or polygons.
:mod:`matplotlib.colors`
Color specifications and making colormaps.
:mod:`matplotlib.cm`
Colormaps, and the `.ScalarMappable` mixin class for providing color
mapping functionality to other classes.
:mod:`matplotlib.ticker`
Calculation of tick mark locations and formatting of tick labels.
:mod:`matplotlib.backends`
A subpackage with modules for various GUI libraries and output formats.
The base matplotlib namespace includes:
`~matplotlib.rcParams`
Default configuration settings; their defaults may be overridden using
a :file:`matplotlibrc` file.
`~matplotlib.use`
Setting the Matplotlib backend. This should be called before any
figure is created, because it is not possible to switch between
different GUI backends after that.
Matplotlib was initially written by John D. Hunter (1968-2012) and is now
developed and maintained by a host of others.
Occasionally the internal documentation (python docstrings) will refer
to MATLAB®, a registered trademark of The MathWorks, Inc.
"""
import atexit
from collections import namedtuple
from collections.abc import MutableMapping
import contextlib
from distutils.version import LooseVersion
import functools
import importlib
import inspect
from inspect import Parameter
import locale
import logging
import os
from pathlib import Path
import pprint
import re
import shutil
import subprocess
import sys
import tempfile
import warnings
# cbook must import matplotlib only within function
# definitions, so it is safe to import from it here.
from . import cbook, rcsetup
from matplotlib.cbook import MatplotlibDeprecationWarning, sanitize_sequence
from matplotlib.cbook import mplDeprecation # deprecated
from matplotlib.rcsetup import validate_backend, cycler
import numpy
# Get the version from the _version.py versioneer file. For a git checkout,
# this is computed based on the number of commits since the last tag.
from ._version import get_versions
__version__ = str(get_versions()['version'])
del get_versions
_log = logging.getLogger(__name__)
__bibtex__ = r"""@Article{Hunter:2007,
Author = {Hunter, J. D.},
Title = {Matplotlib: A 2D graphics environment},
Journal = {Computing in Science \& Engineering},
Volume = {9},
Number = {3},
Pages = {90--95},
abstract = {Matplotlib is a 2D graphics package used for Python
for application development, interactive scripting, and
publication-quality image generation across user
interfaces and operating systems.},
publisher = {IEEE COMPUTER SOC},
year = 2007
}"""
@cbook.deprecated("3.2")
def compare_versions(a, b):
"""Return whether version *a* is greater than or equal to version *b*."""
if isinstance(a, bytes):
cbook.warn_deprecated(
"3.0", message="compare_versions arguments should be strs.")
a = a.decode('ascii')
if isinstance(b, bytes):
cbook.warn_deprecated(
"3.0", message="compare_versions arguments should be strs.")
b = b.decode('ascii')
if a:
return LooseVersion(a) >= LooseVersion(b)
else:
return False
def _check_versions():
# Quickfix to ensure Microsoft Visual C++ redistributable
# DLLs are loaded before importing kiwisolver
from . import ft2font
for modname, minver in [
("cycler", "0.10"),
("dateutil", "2.1"),
("kiwisolver", "1.0.1"),
("numpy", "1.15"),
("pyparsing", "2.0.1"),
]:
module = importlib.import_module(modname)
if LooseVersion(module.__version__) < minver:
raise ImportError("Matplotlib requires {}>={}; you have {}"
.format(modname, minver, module.__version__))
_check_versions()
# The decorator ensures this always returns the same handler (and it is only
# attached once).
@functools.lru_cache()
def _ensure_handler():
"""
The first time this function is called, attach a `StreamHandler` using the
same format as `logging.basicConfig` to the Matplotlib root logger.
Return this handler every time this function is called.
"""
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter(logging.BASIC_FORMAT))
_log.addHandler(handler)
return handler
def set_loglevel(level):
"""
Set Matplotlib's root logger and root logger handler level, creating
the handler if it does not exist yet.
Typically, one should call ``set_loglevel("info")`` or
``set_loglevel("debug")`` to get additional debugging information.
Parameters
----------
level : {"notset", "debug", "info", "warning", "error", "critical"}
The log level of the handler.
Notes
-----
The first time this function is called, an additional handler is attached
to Matplotlib's root handler; this handler is reused every time and this
function simply manipulates the logger and handler's level.
"""
_log.setLevel(level.upper())
_ensure_handler().setLevel(level.upper())
def _logged_cached(fmt, func=None):
"""
Decorator that logs a function's return value, and memoizes that value.
After ::
@_logged_cached(fmt)
def func(): ...
the first call to *func* will log its return value at the DEBUG level using
%-format string *fmt*, and memoize it; later calls to *func* will directly
return that value.
"""
if func is None: # Return the actual decorator.
return functools.partial(_logged_cached, fmt)
called = False
ret = None
@functools.wraps(func)
def wrapper(**kwargs):
nonlocal called, ret
if not called:
ret = func(**kwargs)
called = True
_log.debug(fmt, ret)
return ret
return wrapper
_ExecInfo = namedtuple("_ExecInfo", "executable version")
class ExecutableNotFoundError(FileNotFoundError):
"""
Error raised when an executable that Matplotlib optionally
depends on can't be found.
"""
pass
@functools.lru_cache()
def _get_executable_info(name):
"""
Get the version of some executable that Matplotlib optionally depends on.
.. warning:
The list of executables that this function supports is set according to
Matplotlib's internal needs, and may change without notice.
Parameters
----------
name : str
The executable to query. The following values are currently supported:
"dvipng", "gs", "inkscape", "magick", "pdftops". This list is subject
to change without notice.
Returns
-------
If the executable is found, a namedtuple with fields ``executable`` (`str`)
and ``version`` (`distutils.version.LooseVersion`, or ``None`` if the
version cannot be determined).
Raises
------
ExecutableNotFoundError
If the executable is not found or older than the oldest version
supported by Matplotlib.
ValueError
If the executable is not one that we know how to query.
"""
def impl(args, regex, min_ver=None, ignore_exit_code=False):
# Execute the subprocess specified by args; capture stdout and stderr.
# Search for a regex match in the output; if the match succeeds, the
# first group of the match is the version.
# Return an _ExecInfo if the executable exists, and has a version of
# at least min_ver (if set); else, raise ExecutableNotFoundError.
try:
output = subprocess.check_output(
args, stderr=subprocess.STDOUT,
universal_newlines=True, errors="replace")
except subprocess.CalledProcessError as _cpe:
if ignore_exit_code:
output = _cpe.output
else:
raise ExecutableNotFoundError(str(_cpe)) from _cpe
except OSError as _ose:
raise ExecutableNotFoundError(str(_ose)) from _ose
match = re.search(regex, output)
if match:
version = LooseVersion(match.group(1))
if min_ver is not None and version < min_ver:
raise ExecutableNotFoundError(
f"You have {args[0]} version {version} but the minimum "
f"version supported by Matplotlib is {min_ver}")
return _ExecInfo(args[0], version)
else:
raise ExecutableNotFoundError(
f"Failed to determine the version of {args[0]} from "
f"{' '.join(args)}, which output {output}")
if name == "dvipng":
return impl(["dvipng", "-version"], "(?m)^dvipng(?: .*)? (.+)", "1.6")
elif name == "gs":
execs = (["gswin32c", "gswin64c", "mgs", "gs"] # "mgs" for miktex.
if sys.platform == "win32" else
["gs"])
for e in execs:
try:
return impl([e, "--version"], "(.*)", "9")
except ExecutableNotFoundError:
pass
message = "Failed to find a Ghostscript installation"
raise ExecutableNotFoundError(message)
elif name == "inkscape":
try:
# Try headless option first (needed for Inkscape version < 1.0):
return impl(["inkscape", "--without-gui", "-V"],
"Inkscape ([^ ]*)")
except ExecutableNotFoundError:
pass # Suppress exception chaining.
# If --without-gui is not accepted, we may be using Inkscape >= 1.0 so
# try without it:
return impl(["inkscape", "-V"], "Inkscape ([^ ]*)")
elif name == "magick":
path = None
if sys.platform == "win32":
# Check the registry to avoid confusing ImageMagick's convert with
# Windows's builtin convert.exe.
import winreg
binpath = ""
for flag in [0, winreg.KEY_WOW64_32KEY, winreg.KEY_WOW64_64KEY]:
try:
with winreg.OpenKeyEx(
winreg.HKEY_LOCAL_MACHINE,
r"Software\Imagemagick\Current",
0, winreg.KEY_QUERY_VALUE | flag) as hkey:
binpath = winreg.QueryValueEx(hkey, "BinPath")[0]
except OSError:
pass
if binpath:
for name in ["convert.exe", "magick.exe"]:
candidate = Path(binpath, name)
if candidate.exists():
path = str(candidate)
break
else:
path = "convert"
if path is None:
raise ExecutableNotFoundError(
"Failed to find an ImageMagick installation")
return impl([path, "--version"], r"^Version: ImageMagick (\S*)")
elif name == "pdftops":
info = impl(["pdftops", "-v"], "^pdftops version (.*)",
ignore_exit_code=True)
if info and not ("3.0" <= info.version
# poppler version numbers.
or "0.9" <= info.version <= "1.0"):
raise ExecutableNotFoundError(
f"You have pdftops version {info.version} but the minimum "
f"version supported by Matplotlib is 3.0")
return info
else:
raise ValueError("Unknown executable: {!r}".format(name))
@cbook.deprecated("3.2")
def checkdep_ps_distiller(s):
if not s:
return False
try:
_get_executable_info("gs")
except ExecutableNotFoundError:
_log.warning(
"Setting rcParams['ps.usedistiller'] requires ghostscript.")
return False
if s == "xpdf":
try:
_get_executable_info("pdftops")
except ExecutableNotFoundError:
_log.warning(
"Setting rcParams['ps.usedistiller'] to 'xpdf' requires xpdf.")
return False
return s
def checkdep_usetex(s):
if not s:
return False
if not shutil.which("tex"):
_log.warning("usetex mode requires TeX.")
return False
try:
_get_executable_info("dvipng")
except ExecutableNotFoundError:
_log.warning("usetex mode requires dvipng.")
return False
try:
_get_executable_info("gs")
except ExecutableNotFoundError:
_log.warning("usetex mode requires ghostscript.")
return False
return True
@cbook.deprecated("3.2", alternative="os.path.expanduser('~')")
@_logged_cached('$HOME=%s')
def get_home():
"""
Return the user's home directory.
If the user's home directory cannot be found, return None.
"""
try:
return str(Path.home())
except Exception:
return None
def _get_xdg_config_dir():
"""
Return the XDG configuration directory, according to the XDG base
directory spec:
https://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html
"""
return os.environ.get('XDG_CONFIG_HOME') or str(Path.home() / ".config")
def _get_xdg_cache_dir():
"""
Return the XDG cache directory, according to the XDG base directory spec:
https://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html
"""
return os.environ.get('XDG_CACHE_HOME') or str(Path.home() / ".cache")
def _get_config_or_cache_dir(xdg_base):
configdir = os.environ.get('MPLCONFIGDIR')
if configdir:
configdir = Path(configdir).resolve()
elif sys.platform.startswith(('linux', 'freebsd')) and xdg_base:
configdir = Path(xdg_base, "matplotlib")
else:
configdir = Path.home() / ".matplotlib"
try:
configdir.mkdir(parents=True, exist_ok=True)
except OSError:
pass
else:
if os.access(str(configdir), os.W_OK) and configdir.is_dir():
return str(configdir)
# If the config or cache directory cannot be created or is not a writable
# directory, create a temporary one.
tmpdir = os.environ["MPLCONFIGDIR"] = \
tempfile.mkdtemp(prefix="matplotlib-")
atexit.register(shutil.rmtree, tmpdir)
_log.warning(
"Matplotlib created a temporary config/cache directory at %s because "
"the default path (%s) is not a writable directory; it is highly "
"recommended to set the MPLCONFIGDIR environment variable to a "
"writable directory, in particular to speed up the import of "
"Matplotlib and to better support multiprocessing.",
tmpdir, configdir)
return tmpdir
@_logged_cached('CONFIGDIR=%s')
def get_configdir():
"""
Return the string path of the the configuration directory.
The directory is chosen as follows:
1. If the MPLCONFIGDIR environment variable is supplied, choose that.
2. On Linux, follow the XDG specification and look first in
``$XDG_CONFIG_HOME``, if defined, or ``$HOME/.config``. On other
platforms, choose ``$HOME/.matplotlib``.
3. If the chosen directory exists and is writable, use that as the
configuration directory.
4. Else, create a temporary directory, and use it as the configuration
directory.
"""
return _get_config_or_cache_dir(_get_xdg_config_dir())
@_logged_cached('CACHEDIR=%s')
def get_cachedir():
"""
Return the string path of the cache directory.
The procedure used to find the directory is the same as for
_get_config_dir, except using ``$XDG_CACHE_HOME``/``$HOME/.cache`` instead.
"""
return _get_config_or_cache_dir(_get_xdg_cache_dir())
@_logged_cached('matplotlib data path: %s')
def get_data_path(*, _from_rc=None):
"""Return the path to Matplotlib data."""
if _from_rc is not None:
cbook.warn_deprecated(
"3.2",
message=("Setting the datapath via matplotlibrc is deprecated "
"%(since)s and will be removed %(removal)s."),
removal='3.4')
path = Path(_from_rc)
if path.is_dir():
return str(path)
else:
warnings.warn(f"You passed datapath: {_from_rc!r} in your "
f"matplotribrc file ({matplotlib_fname()}). "
"However this path does not exist, falling back "
"to standard paths.")
return _get_data_path()
@_logged_cached('(private) matplotlib data path: %s')
def _get_data_path():
path = Path(__file__).with_name("mpl-data")
if path.is_dir():
return str(path)
cbook.warn_deprecated(
"3.2", message="Matplotlib installs where the data is not in the "
"mpl-data subdirectory of the package are deprecated since %(since)s "
"and support for them will be removed %(removal)s.")
def get_candidate_paths():
# setuptools' namespace_packages may hijack this init file
# so need to try something known to be in Matplotlib, not basemap.
import matplotlib.afm
yield Path(matplotlib.afm.__file__).with_name('mpl-data')
# py2exe zips pure python, so still need special check.
if getattr(sys, 'frozen', None):
yield Path(sys.executable).with_name('mpl-data')
# Try again assuming we need to step up one more directory.
yield Path(sys.executable).parent.with_name('mpl-data')
# Try again assuming sys.path[0] is a dir not a exe.
yield Path(sys.path[0]) / 'mpl-data'
for path in get_candidate_paths():
if path.is_dir():
defaultParams['datapath'][0] = str(path)
return str(path)
raise RuntimeError('Could not find the matplotlib data files')
def matplotlib_fname():
"""
Get the location of the config file.
The file location is determined in the following order
- ``$PWD/matplotlibrc``
- ``$MATPLOTLIBRC`` if it is not a directory
- ``$MATPLOTLIBRC/matplotlibrc``
- ``$MPLCONFIGDIR/matplotlibrc``
- On Linux,
- ``$XDG_CONFIG_HOME/matplotlib/matplotlibrc`` (if ``$XDG_CONFIG_HOME``
is defined)
- or ``$HOME/.config/matplotlib/matplotlibrc`` (if ``$XDG_CONFIG_HOME``
is not defined)
- On other platforms,
- ``$HOME/.matplotlib/matplotlibrc`` if ``$HOME`` is defined
- Lastly, it looks in ``$MATPLOTLIBDATA/matplotlibrc``, which should always
exist.
"""
def gen_candidates():
yield os.path.join(os.getcwd(), 'matplotlibrc')
try:
matplotlibrc = os.environ['MATPLOTLIBRC']
except KeyError:
pass
else:
yield matplotlibrc
yield os.path.join(matplotlibrc, 'matplotlibrc')
yield os.path.join(get_configdir(), 'matplotlibrc')
yield os.path.join(_get_data_path(), 'matplotlibrc')
for fname in gen_candidates():
if os.path.exists(fname) and not os.path.isdir(fname):
return fname
raise RuntimeError("Could not find matplotlibrc file; your Matplotlib "
"install is broken")
# rcParams deprecated and automatically mapped to another key.
# Values are tuples of (version, new_name, f_old2new, f_new2old).
_deprecated_map = {}
# rcParams deprecated; some can manually be mapped to another key.
# Values are tuples of (version, new_name_or_None).
_deprecated_ignore_map = {
}
# rcParams deprecated; can use None to suppress warnings; remain actually
# listed in the rcParams (not included in _all_deprecated).
# Values are tuples of (version,)
_deprecated_remain_as_none = {
'datapath': ('3.2.1',),
'animation.avconv_path': ('3.3',),
'animation.avconv_args': ('3.3',),
'animation.html_args': ('3.3',),
'mathtext.fallback_to_cm': ('3.3',),
'keymap.all_axes': ('3.3',),
'savefig.jpeg_quality': ('3.3',),
'text.latex.preview': ('3.3',),
}
_all_deprecated = {*_deprecated_map, *_deprecated_ignore_map}
class RcParams(MutableMapping, dict):
"""
A dictionary object including validation.
Validating functions are defined and associated with rc parameters in
:mod:`matplotlib.rcsetup`.
See Also
--------
:ref:`customizing-with-matplotlibrc-files`
"""
validate = rcsetup._validators
# validate values on the way in
def __init__(self, *args, **kwargs):
self.update(*args, **kwargs)
def __setitem__(self, key, val):
try:
if key in _deprecated_map:
version, alt_key, alt_val, inverse_alt = _deprecated_map[key]
cbook.warn_deprecated(
version, name=key, obj_type="rcparam", alternative=alt_key)
key = alt_key
val = alt_val(val)
elif key in _deprecated_remain_as_none and val is not None:
version, = _deprecated_remain_as_none[key]
cbook.warn_deprecated(
version, name=key, obj_type="rcparam")
elif key in _deprecated_ignore_map:
version, alt_key = _deprecated_ignore_map[key]
cbook.warn_deprecated(
version, name=key, obj_type="rcparam", alternative=alt_key)
return
elif key == 'backend':
if val is rcsetup._auto_backend_sentinel:
if 'backend' in self:
return
try:
cval = self.validate[key](val)
except ValueError as ve:
raise ValueError(f"Key {key}: {ve}") from None
dict.__setitem__(self, key, cval)
except KeyError as err:
raise KeyError(
f"{key} is not a valid rc parameter (see rcParams.keys() for "
f"a list of valid parameters)") from err
def __getitem__(self, key):
if key in _deprecated_map:
version, alt_key, alt_val, inverse_alt = _deprecated_map[key]
cbook.warn_deprecated(
version, name=key, obj_type="rcparam", alternative=alt_key)
return inverse_alt(dict.__getitem__(self, alt_key))
elif key in _deprecated_ignore_map:
version, alt_key = _deprecated_ignore_map[key]
cbook.warn_deprecated(
version, name=key, obj_type="rcparam", alternative=alt_key)
return dict.__getitem__(self, alt_key) if alt_key else None
elif key == "backend":
val = dict.__getitem__(self, key)
if val is rcsetup._auto_backend_sentinel:
from matplotlib import pyplot as plt
plt.switch_backend(rcsetup._auto_backend_sentinel)
elif key == "datapath":
return get_data_path()
return dict.__getitem__(self, key)
def __repr__(self):
class_name = self.__class__.__name__
indent = len(class_name) + 1
with cbook._suppress_matplotlib_deprecation_warning():
repr_split = pprint.pformat(dict(self), indent=1,
width=80 - indent).split('\n')
repr_indented = ('\n' + ' ' * indent).join(repr_split)
return '{}({})'.format(class_name, repr_indented)
def __str__(self):
return '\n'.join(map('{0[0]}: {0[1]}'.format, sorted(self.items())))
def __iter__(self):
"""Yield sorted list of keys."""
with cbook._suppress_matplotlib_deprecation_warning():
yield from sorted(dict.__iter__(self))
def __len__(self):
return dict.__len__(self)
def find_all(self, pattern):
"""
Return the subset of this RcParams dictionary whose keys match,
using :func:`re.search`, the given ``pattern``.
.. note::
Changes to the returned dictionary are *not* propagated to
the parent RcParams dictionary.
"""
pattern_re = re.compile(pattern)
return RcParams((key, value)
for key, value in self.items()
if pattern_re.search(key))
def copy(self):
return {k: dict.__getitem__(self, k) for k in self}
def rc_params(fail_on_error=False):
"""Construct a `RcParams` instance from the default Matplotlib rc file."""
return rc_params_from_file(matplotlib_fname(), fail_on_error)
URL_REGEX = re.compile(r'^http://|^https://|^ftp://|^file:')
def is_url(filename):
"""Return True if string is an http, ftp, or file URL path."""
return URL_REGEX.match(filename) is not None
@functools.lru_cache()
def _get_ssl_context():
try:
import certifi
except ImportError:
_log.debug("Could not import certifi.")
return None
import ssl
return ssl.create_default_context(cafile=certifi.where())
@contextlib.contextmanager
def _open_file_or_url(fname):
if not isinstance(fname, Path) and is_url(fname):
import urllib.request
ssl_ctx = _get_ssl_context()
if ssl_ctx is None:
_log.debug(
"Could not get certifi ssl context, https may not work."
)
with urllib.request.urlopen(fname, context=ssl_ctx) as f:
yield (line.decode('utf-8') for line in f)
else:
fname = os.path.expanduser(fname)
encoding = locale.getpreferredencoding(do_setlocale=False)
if encoding is None:
encoding = "utf-8"
with open(fname, encoding=encoding) as f:
yield f
def _rc_params_in_file(fname, transform=lambda x: x, fail_on_error=False):
"""
Construct a `RcParams` instance from file *fname*.
Unlike `rc_params_from_file`, the configuration class only contains the
parameters specified in the file (i.e. default values are not filled in).
Parameters
----------
fname : path-like
The loaded file.
transform : callable, default: the identity function
A function called on each individual line of the file to transform it,
before further parsing.
fail_on_error : bool, default: False
Whether invalid entries should result in an exception or a warning.
"""
rc_temp = {}
with _open_file_or_url(fname) as fd:
try:
for line_no, line in enumerate(fd, 1):
line = transform(line)
strippedline = line.split('#', 1)[0].strip()
if not strippedline:
continue
tup = strippedline.split(':', 1)
if len(tup) != 2:
_log.warning('Missing colon in file %r, line %d (%r)',
fname, line_no, line.rstrip('\n'))
continue
key, val = tup
key = key.strip()
val = val.strip()
if key in rc_temp:
_log.warning('Duplicate key in file %r, line %d (%r)',
fname, line_no, line.rstrip('\n'))
rc_temp[key] = (val, line, line_no)
except UnicodeDecodeError:
_log.warning('Cannot decode configuration file %s with encoding '
'%s, check LANG and LC_* variables.',
fname,
locale.getpreferredencoding(do_setlocale=False)
or 'utf-8 (default)')
raise
config = RcParams()
for key, (val, line, line_no) in rc_temp.items():
if key in rcsetup._validators:
if fail_on_error:
config[key] = val # try to convert to proper type or raise
else:
try:
config[key] = val # try to convert to proper type or skip
except Exception as msg:
_log.warning('Bad value in file %r, line %d (%r): %s',
fname, line_no, line.rstrip('\n'), msg)
elif key in _deprecated_ignore_map:
version, alt_key = _deprecated_ignore_map[key]
cbook.warn_deprecated(
version, name=key, alternative=alt_key,
addendum="Please update your matplotlibrc.")
else:
version = 'master' if '.post' in __version__ else f'v{__version__}'
_log.warning("""
Bad key %(key)s in file %(fname)s, line %(line_no)s (%(line)r)
You probably need to get an updated matplotlibrc file from
https://github.com/matplotlib/matplotlib/blob/%(version)s/matplotlibrc.template
or from the matplotlib source distribution""",
dict(key=key, fname=fname, line_no=line_no,
line=line.rstrip('\n'), version=version))
return config
def rc_params_from_file(fname, fail_on_error=False, use_default_template=True):
"""
Construct a `RcParams` from file *fname*.
Parameters
----------
fname : str or path-like
A file with Matplotlib rc settings.
fail_on_error : bool
If True, raise an error when the parser fails to convert a parameter.
use_default_template : bool
If True, initialize with default parameters before updating with those
in the given file. If False, the configuration class only contains the
parameters specified in the file. (Useful for updating dicts.)
"""
config_from_file = _rc_params_in_file(fname, fail_on_error=fail_on_error)
if not use_default_template:
return config_from_file
with cbook._suppress_matplotlib_deprecation_warning():
config = RcParams({**rcParamsDefault, **config_from_file})
with cbook._suppress_matplotlib_deprecation_warning():
if config['datapath'] is None:
config['datapath'] = _get_data_path()
else:
config['datapath'] = get_data_path(_from_rc=config['datapath'])
if "".join(config['text.latex.preamble']):
_log.info("""
*****************************************************************
You have the following UNSUPPORTED LaTeX preamble customizations:
%s
Please do not ask for support with these customizations active.
*****************************************************************
""", '\n'.join(config['text.latex.preamble']))
_log.debug('loaded rc file %s', fname)
return config
# When constructing the global instances, we need to perform certain updates
# by explicitly calling the superclass (dict.update, dict.items) to avoid
# triggering resolution of _auto_backend_sentinel.
rcParamsDefault = _rc_params_in_file(
cbook._get_data_path("matplotlibrc"),
# Strip leading comment.
transform=lambda line: line[1:] if line.startswith("#") else line,
fail_on_error=True)
dict.update(rcParamsDefault, rcsetup._hardcoded_defaults)
rcParams = RcParams() # The global instance.
dict.update(rcParams, dict.items(rcParamsDefault))
dict.update(rcParams, _rc_params_in_file(matplotlib_fname()))
with cbook._suppress_matplotlib_deprecation_warning():
rcParamsOrig = RcParams(rcParams.copy())
# This also checks that all rcParams are indeed listed in the template.
# Assiging to rcsetup.defaultParams is left only for backcompat.
defaultParams = rcsetup.defaultParams = {
# We want to resolve deprecated rcParams, but not backend...
key: [(rcsetup._auto_backend_sentinel if key == "backend" else
rcParamsDefault[key]),
validator]
for key, validator in rcsetup._validators.items()}
if rcParams['axes.formatter.use_locale']:
locale.setlocale(locale.LC_ALL, '')
def rc(group, **kwargs):
"""
Set the current `.rcParams`. *group* is the grouping for the rc, e.g.,
for ``lines.linewidth`` the group is ``lines``, for
``axes.facecolor``, the group is ``axes``, and so on. Group may
also be a list or tuple of group names, e.g., (*xtick*, *ytick*).
*kwargs* is a dictionary attribute name/value pairs, e.g.,::
rc('lines', linewidth=2, color='r')
sets the current `.rcParams` and is equivalent to::
rcParams['lines.linewidth'] = 2
rcParams['lines.color'] = 'r'
The following aliases are available to save typing for interactive users:
===== =================
Alias Property
===== =================
'lw' 'linewidth'
'ls' 'linestyle'
'c' 'color'
'fc' 'facecolor'
'ec' 'edgecolor'
'mew' 'markeredgewidth'
'aa' 'antialiased'
===== =================
Thus you could abbreviate the above call as::
rc('lines', lw=2, c='r')
Note you can use python's kwargs dictionary facility to store
dictionaries of default parameters. e.g., you can customize the
font rc as follows::
font = {'family' : 'monospace',
'weight' : 'bold',
'size' : 'larger'}
rc('font', **font) # pass in the font dict as kwargs
This enables you to easily switch between several configurations. Use
``matplotlib.style.use('default')`` or :func:`~matplotlib.rcdefaults` to
restore the default `.rcParams` after changes.
Notes
-----
Similar functionality is available by using the normal dict interface, i.e.
``rcParams.update({"lines.linewidth": 2, ...})`` (but ``rcParams.update``
does not support abbreviations or grouping).
"""
aliases = {
'lw': 'linewidth',
'ls': 'linestyle',
'c': 'color',
'fc': 'facecolor',
'ec': 'edgecolor',
'mew': 'markeredgewidth',
'aa': 'antialiased',
}
if isinstance(group, str):
group = (group,)
for g in group:
for k, v in kwargs.items():
name = aliases.get(k) or k
key = '%s.%s' % (g, name)
try:
rcParams[key] = v
except KeyError as err:
raise KeyError(('Unrecognized key "%s" for group "%s" and '
'name "%s"') % (key, g, name)) from err
def rcdefaults():
"""
Restore the `.rcParams` from Matplotlib's internal default style.
Style-blacklisted `.rcParams` (defined in
`matplotlib.style.core.STYLE_BLACKLIST`) are not updated.
See Also
--------
matplotlib.rc_file_defaults
Restore the `.rcParams` from the rc file originally loaded by
Matplotlib.
matplotlib.style.use
Use a specific style file. Call ``style.use('default')`` to restore
the default style.
"""
# Deprecation warnings were already handled when creating rcParamsDefault,
# no need to reemit them here.
with cbook._suppress_matplotlib_deprecation_warning():
from .style.core import STYLE_BLACKLIST
rcParams.clear()
rcParams.update({k: v for k, v in rcParamsDefault.items()
if k not in STYLE_BLACKLIST})
def rc_file_defaults():
"""
Restore the `.rcParams` from the original rc file loaded by Matplotlib.
Style-blacklisted `.rcParams` (defined in
`matplotlib.style.core.STYLE_BLACKLIST`) are not updated.
"""
# Deprecation warnings were already handled when creating rcParamsOrig, no
# need to reemit them here.
with cbook._suppress_matplotlib_deprecation_warning():
from .style.core import STYLE_BLACKLIST
rcParams.update({k: rcParamsOrig[k] for k in rcParamsOrig
if k not in STYLE_BLACKLIST})
def rc_file(fname, *, use_default_template=True):
"""
Update `.rcParams` from file.
Style-blacklisted `.rcParams` (defined in
`matplotlib.style.core.STYLE_BLACKLIST`) are not updated.
Parameters
----------
fname : str or path-like
A file with Matplotlib rc settings.
use_default_template : bool
If True, initialize with default parameters before updating with those
in the given file. If False, the current configuration persists
and only the parameters specified in the file are updated.
"""
# Deprecation warnings were already handled in rc_params_from_file, no need
# to reemit them here.
with cbook._suppress_matplotlib_deprecation_warning():
from .style.core import STYLE_BLACKLIST
rc_from_file = rc_params_from_file(
fname, use_default_template=use_default_template)
rcParams.update({k: rc_from_file[k] for k in rc_from_file
if k not in STYLE_BLACKLIST})
@contextlib.contextmanager
def rc_context(rc=None, fname=None):
"""
Return a context manager for temporarily changing rcParams.
Parameters
----------
rc : dict
The rcParams to temporarily set.
fname : str or path-like
A file with Matplotlib rc settings. If both *fname* and *rc* are given,
settings from *rc* take precedence.
See Also
--------
:ref:`customizing-with-matplotlibrc-files`
Examples
--------
Passing explicit values via a dict::
with mpl.rc_context({'interactive': False}):
fig, ax = plt.subplots()
ax.plot(range(3), range(3))
fig.savefig('example.png')
plt.close(fig)
Loading settings from a file::
with mpl.rc_context(fname='print.rc'):
plt.plot(x, y) # uses 'print.rc'
"""
orig = rcParams.copy()
try:
if fname:
rc_file(fname)
if rc:
rcParams.update(rc)
yield
finally:
dict.update(rcParams, orig) # Revert to the original rcs.
def use(backend, *, force=True):
"""
Select the backend used for rendering and GUI integration.
Parameters
----------
backend : str
The backend to switch to. This can either be one of the standard
backend names, which are case-insensitive:
- interactive backends:
GTK3Agg, GTK3Cairo, MacOSX, nbAgg,
Qt4Agg, Qt4Cairo, Qt5Agg, Qt5Cairo,
TkAgg, TkCairo, WebAgg, WX, WXAgg, WXCairo
- non-interactive backends:
agg, cairo, pdf, pgf, ps, svg, template
or a string of the form: ``module://my.module.name``.
force : bool, default: True
If True (the default), raise an `ImportError` if the backend cannot be
set up (either because it fails to import, or because an incompatible
GUI interactive framework is already running); if False, ignore the
failure.
See Also
--------
:ref:`backends`
matplotlib.get_backend
"""
name = validate_backend(backend)
# we need to use the base-class method here to avoid (prematurely)
# resolving the "auto" backend setting
if dict.__getitem__(rcParams, 'backend') == name:
# Nothing to do if the requested backend is already set
pass
else:
# if pyplot is not already imported, do not import it. Doing
# so may trigger a `plt.switch_backend` to the _default_ backend
# before we get a chance to change to the one the user just requested
plt = sys.modules.get('matplotlib.pyplot')
# if pyplot is imported, then try to change backends
if plt is not None:
try:
# we need this import check here to re-raise if the
# user does not have the libraries to support their
# chosen backend installed.
plt.switch_backend(name)
except ImportError:
if force:
raise
# if we have not imported pyplot, then we can set the rcParam
# value which will be respected when the user finally imports
# pyplot
else:
rcParams['backend'] = backend
# if the user has asked for a given backend, do not helpfully
# fallback
rcParams['backend_fallback'] = False
if os.environ.get('MPLBACKEND'):
rcParams['backend'] = os.environ.get('MPLBACKEND')
def get_backend():
"""
Return the name of the current backend.
See Also
--------
matplotlib.use
"""
return rcParams['backend']
def interactive(b):
"""
Set whether to redraw after every plotting command (e.g. `.pyplot.xlabel`).
"""
rcParams['interactive'] = b
def is_interactive():
"""Return whether to redraw after every plotting command."""
return rcParams['interactive']
default_test_modules = [
'matplotlib.tests',
'mpl_toolkits.tests',
]
def _init_tests():
# The version of FreeType to install locally for running the
# tests. This must match the value in `setupext.py`
LOCAL_FREETYPE_VERSION = '2.6.1'
from matplotlib import ft2font
if (ft2font.__freetype_version__ != LOCAL_FREETYPE_VERSION or
ft2font.__freetype_build_type__ != 'local'):
_log.warning(
f"Matplotlib is not built with the correct FreeType version to "
f"run tests. Rebuild without setting system_freetype=1 in "
f"setup.cfg. Expect many image comparison failures below. "
f"Expected freetype version {LOCAL_FREETYPE_VERSION}. "
f"Found freetype version {ft2font.__freetype_version__}. "
"Freetype build type is {}local".format(
"" if ft2font.__freetype_build_type__ == 'local' else "not "))
@cbook._delete_parameter("3.2", "switch_backend_warn")
@cbook._delete_parameter("3.3", "recursionlimit")
def test(verbosity=None, coverage=False, switch_backend_warn=True,
recursionlimit=0, **kwargs):
"""Run the matplotlib test suite."""
try:
import pytest
except ImportError:
print("matplotlib.test requires pytest to run.")
return -1
if not os.path.isdir(os.path.join(os.path.dirname(__file__), 'tests')):
print("Matplotlib test data is not installed")
return -1
old_backend = get_backend()
old_recursionlimit = sys.getrecursionlimit()
try:
use('agg')
if recursionlimit:
sys.setrecursionlimit(recursionlimit)
args = kwargs.pop('argv', [])
provide_default_modules = True
use_pyargs = True
for arg in args:
if any(arg.startswith(module_path)
for module_path in default_test_modules):
provide_default_modules = False
break
if os.path.exists(arg):
provide_default_modules = False
use_pyargs = False
break
if use_pyargs:
args += ['--pyargs']
if provide_default_modules:
args += default_test_modules
if coverage:
args += ['--cov']
if verbosity:
args += ['-' + 'v' * verbosity]
retcode = pytest.main(args, **kwargs)
finally:
if old_backend.lower() != 'agg':
use(old_backend)
if recursionlimit:
sys.setrecursionlimit(old_recursionlimit)
return retcode
test.__test__ = False # pytest: this function is not a test
def _replacer(data, value):
"""
Either returns ``data[value]`` or passes ``data`` back, converts either to
a sequence.
"""
try:
# if key isn't a string don't bother
if isinstance(value, str):
# try to use __getitem__
value = data[value]
except Exception:
# key does not exist, silently fall back to key
pass
return sanitize_sequence(value)
def _label_from_arg(y, default_name):
try:
return y.name
except AttributeError:
if isinstance(default_name, str):
return default_name
return None
_DATA_DOC_TITLE = """
Notes
-----
"""
_DATA_DOC_APPENDIX = """
.. note::
In addition to the above described arguments, this function can take
a *data* keyword argument. If such a *data* argument is given,
{replaced}
Objects passed as **data** must support item access (``data[s]``) and
membership test (``s in data``).
"""
def _add_data_doc(docstring, replace_names):
"""
Add documentation for a *data* field to the given docstring.
Parameters
----------
docstring : str
The input docstring.
replace_names : list of str or None
The list of parameter names which arguments should be replaced by
``data[name]`` (if ``data[name]`` does not throw an exception). If
None, replacement is attempted for all arguments.
Returns
-------
str
The augmented docstring.
"""
if (docstring is None
or replace_names is not None and len(replace_names) == 0):
return docstring
docstring = inspect.cleandoc(docstring)
repl = (
(" every other argument can also be string ``s``, which is\n"
" interpreted as ``data[s]`` (unless this raises an exception).")
if replace_names is None else
(" the following arguments can also be string ``s``, which is\n"
" interpreted as ``data[s]`` (unless this raises an exception):\n"
" " + ", ".join(map("*{}*".format, replace_names))) + ".")
addendum = _DATA_DOC_APPENDIX.format(replaced=repl)
if _DATA_DOC_TITLE not in docstring:
addendum = _DATA_DOC_TITLE + addendum
return docstring + addendum
def _preprocess_data(func=None, *, replace_names=None, label_namer=None):
"""
A decorator to add a 'data' kwarg to a function.
When applied::
@_preprocess_data()
def func(ax, *args, **kwargs): ...
the signature is modified to ``decorated(ax, *args, data=None, **kwargs)``
with the following behavior:
- if called with ``data=None``, forward the other arguments to ``func``;
- otherwise, *data* must be a mapping; for any argument passed in as a
string ``name``, replace the argument by ``data[name]`` (if this does not
throw an exception), then forward the arguments to ``func``.
In either case, any argument that is a `MappingView` is also converted to a
list.
Parameters
----------
replace_names : list of str or None, default: None
The list of parameter names for which lookup into *data* should be
attempted. If None, replacement is attempted for all arguments.
label_namer : str, default: None
If set e.g. to "namer" (which must be a kwarg in the function's
signature -- not as ``**kwargs``), if the *namer* argument passed in is
a (string) key of *data* and no *label* kwarg is passed, then use the
(string) value of the *namer* as *label*. ::
@_preprocess_data(label_namer="foo")
def func(foo, label=None): ...
func("key", data={"key": value})
# is equivalent to
func.__wrapped__(value, label="key")
"""
if func is None: # Return the actual decorator.
return functools.partial(
_preprocess_data,
replace_names=replace_names, label_namer=label_namer)
sig = inspect.signature(func)
varargs_name = None
varkwargs_name = None
arg_names = []
params = list(sig.parameters.values())
for p in params:
if p.kind is Parameter.VAR_POSITIONAL:
varargs_name = p.name
elif p.kind is Parameter.VAR_KEYWORD:
varkwargs_name = p.name
else:
arg_names.append(p.name)
data_param = Parameter("data", Parameter.KEYWORD_ONLY, default=None)
if varkwargs_name:
params.insert(-1, data_param)
else:
params.append(data_param)
new_sig = sig.replace(parameters=params)
arg_names = arg_names[1:] # remove the first "ax" / self arg
assert {*arg_names}.issuperset(replace_names or []) or varkwargs_name, (
"Matplotlib internal error: invalid replace_names ({!r}) for {!r}"
.format(replace_names, func.__name__))
assert label_namer is None or label_namer in arg_names, (
"Matplotlib internal error: invalid label_namer ({!r}) for {!r}"
.format(label_namer, func.__name__))
@functools.wraps(func)
def inner(ax, *args, data=None, **kwargs):
if data is None:
return func(ax, *map(sanitize_sequence, args), **kwargs)
bound = new_sig.bind(ax, *args, **kwargs)
auto_label = (bound.arguments.get(label_namer)
or bound.kwargs.get(label_namer))
for k, v in bound.arguments.items():
if k == varkwargs_name:
for k1, v1 in v.items():
if replace_names is None or k1 in replace_names:
v[k1] = _replacer(data, v1)
elif k == varargs_name:
if replace_names is None:
bound.arguments[k] = tuple(_replacer(data, v1) for v1 in v)
else:
if replace_names is None or k in replace_names:
bound.arguments[k] = _replacer(data, v)
new_args = bound.args
new_kwargs = bound.kwargs
args_and_kwargs = {**bound.arguments, **bound.kwargs}
if label_namer and "label" not in args_and_kwargs:
new_kwargs["label"] = _label_from_arg(
args_and_kwargs.get(label_namer), auto_label)
return func(*new_args, **new_kwargs)
inner.__doc__ = _add_data_doc(inner.__doc__, replace_names)
inner.__signature__ = new_sig
return inner
_log.debug('matplotlib version %s', __version__)
_log.debug('interactive is %s', is_interactive())
_log.debug('platform is %s', sys.platform)
_log.debug('loaded modules: %s', list(sys.modules))
| 34.192722 | 79 | 0.619566 |
import atexit
from collections import namedtuple
from collections.abc import MutableMapping
import contextlib
from distutils.version import LooseVersion
import functools
import importlib
import inspect
from inspect import Parameter
import locale
import logging
import os
from pathlib import Path
import pprint
import re
import shutil
import subprocess
import sys
import tempfile
import warnings
from . import cbook, rcsetup
from matplotlib.cbook import MatplotlibDeprecationWarning, sanitize_sequence
from matplotlib.cbook import mplDeprecation
from matplotlib.rcsetup import validate_backend, cycler
import numpy
from ._version import get_versions
__version__ = str(get_versions()['version'])
del get_versions
_log = logging.getLogger(__name__)
__bibtex__ = r"""@Article{Hunter:2007,
Author = {Hunter, J. D.},
Title = {Matplotlib: A 2D graphics environment},
Journal = {Computing in Science \& Engineering},
Volume = {9},
Number = {3},
Pages = {90--95},
abstract = {Matplotlib is a 2D graphics package used for Python
for application development, interactive scripting, and
publication-quality image generation across user
interfaces and operating systems.},
publisher = {IEEE COMPUTER SOC},
year = 2007
}"""
@cbook.deprecated("3.2")
def compare_versions(a, b):
if isinstance(a, bytes):
cbook.warn_deprecated(
"3.0", message="compare_versions arguments should be strs.")
a = a.decode('ascii')
if isinstance(b, bytes):
cbook.warn_deprecated(
"3.0", message="compare_versions arguments should be strs.")
b = b.decode('ascii')
if a:
return LooseVersion(a) >= LooseVersion(b)
else:
return False
def _check_versions():
from . import ft2font
for modname, minver in [
("cycler", "0.10"),
("dateutil", "2.1"),
("kiwisolver", "1.0.1"),
("numpy", "1.15"),
("pyparsing", "2.0.1"),
]:
module = importlib.import_module(modname)
if LooseVersion(module.__version__) < minver:
raise ImportError("Matplotlib requires {}>={}; you have {}"
.format(modname, minver, module.__version__))
_check_versions()
@functools.lru_cache()
def _ensure_handler():
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter(logging.BASIC_FORMAT))
_log.addHandler(handler)
return handler
def set_loglevel(level):
_log.setLevel(level.upper())
_ensure_handler().setLevel(level.upper())
def _logged_cached(fmt, func=None):
if func is None:
return functools.partial(_logged_cached, fmt)
called = False
ret = None
@functools.wraps(func)
def wrapper(**kwargs):
nonlocal called, ret
if not called:
ret = func(**kwargs)
called = True
_log.debug(fmt, ret)
return ret
return wrapper
_ExecInfo = namedtuple("_ExecInfo", "executable version")
class ExecutableNotFoundError(FileNotFoundError):
pass
@functools.lru_cache()
def _get_executable_info(name):
def impl(args, regex, min_ver=None, ignore_exit_code=False):
try:
output = subprocess.check_output(
args, stderr=subprocess.STDOUT,
universal_newlines=True, errors="replace")
except subprocess.CalledProcessError as _cpe:
if ignore_exit_code:
output = _cpe.output
else:
raise ExecutableNotFoundError(str(_cpe)) from _cpe
except OSError as _ose:
raise ExecutableNotFoundError(str(_ose)) from _ose
match = re.search(regex, output)
if match:
version = LooseVersion(match.group(1))
if min_ver is not None and version < min_ver:
raise ExecutableNotFoundError(
f"You have {args[0]} version {version} but the minimum "
f"version supported by Matplotlib is {min_ver}")
return _ExecInfo(args[0], version)
else:
raise ExecutableNotFoundError(
f"Failed to determine the version of {args[0]} from "
f"{' '.join(args)}, which output {output}")
if name == "dvipng":
return impl(["dvipng", "-version"], "(?m)^dvipng(?: .*)? (.+)", "1.6")
elif name == "gs":
execs = (["gswin32c", "gswin64c", "mgs", "gs"]
if sys.platform == "win32" else
["gs"])
for e in execs:
try:
return impl([e, "--version"], "(.*)", "9")
except ExecutableNotFoundError:
pass
message = "Failed to find a Ghostscript installation"
raise ExecutableNotFoundError(message)
elif name == "inkscape":
try:
return impl(["inkscape", "--without-gui", "-V"],
"Inkscape ([^ ]*)")
except ExecutableNotFoundError:
pass
return impl(["inkscape", "-V"], "Inkscape ([^ ]*)")
elif name == "magick":
path = None
if sys.platform == "win32":
# Windows's builtin convert.exe.
import winreg
binpath = ""
for flag in [0, winreg.KEY_WOW64_32KEY, winreg.KEY_WOW64_64KEY]:
try:
with winreg.OpenKeyEx(
winreg.HKEY_LOCAL_MACHINE,
r"Software\Imagemagick\Current",
0, winreg.KEY_QUERY_VALUE | flag) as hkey:
binpath = winreg.QueryValueEx(hkey, "BinPath")[0]
except OSError:
pass
if binpath:
for name in ["convert.exe", "magick.exe"]:
candidate = Path(binpath, name)
if candidate.exists():
path = str(candidate)
break
else:
path = "convert"
if path is None:
raise ExecutableNotFoundError(
"Failed to find an ImageMagick installation")
return impl([path, "--version"], r"^Version: ImageMagick (\S*)")
elif name == "pdftops":
info = impl(["pdftops", "-v"], "^pdftops version (.*)",
ignore_exit_code=True)
if info and not ("3.0" <= info.version
or "0.9" <= info.version <= "1.0"):
raise ExecutableNotFoundError(
f"You have pdftops version {info.version} but the minimum "
f"version supported by Matplotlib is 3.0")
return info
else:
raise ValueError("Unknown executable: {!r}".format(name))
@cbook.deprecated("3.2")
def checkdep_ps_distiller(s):
if not s:
return False
try:
_get_executable_info("gs")
except ExecutableNotFoundError:
_log.warning(
"Setting rcParams['ps.usedistiller'] requires ghostscript.")
return False
if s == "xpdf":
try:
_get_executable_info("pdftops")
except ExecutableNotFoundError:
_log.warning(
"Setting rcParams['ps.usedistiller'] to 'xpdf' requires xpdf.")
return False
return s
def checkdep_usetex(s):
if not s:
return False
if not shutil.which("tex"):
_log.warning("usetex mode requires TeX.")
return False
try:
_get_executable_info("dvipng")
except ExecutableNotFoundError:
_log.warning("usetex mode requires dvipng.")
return False
try:
_get_executable_info("gs")
except ExecutableNotFoundError:
_log.warning("usetex mode requires ghostscript.")
return False
return True
@cbook.deprecated("3.2", alternative="os.path.expanduser('~')")
@_logged_cached('$HOME=%s')
def get_home():
try:
return str(Path.home())
except Exception:
return None
def _get_xdg_config_dir():
return os.environ.get('XDG_CONFIG_HOME') or str(Path.home() / ".config")
def _get_xdg_cache_dir():
return os.environ.get('XDG_CACHE_HOME') or str(Path.home() / ".cache")
def _get_config_or_cache_dir(xdg_base):
configdir = os.environ.get('MPLCONFIGDIR')
if configdir:
configdir = Path(configdir).resolve()
elif sys.platform.startswith(('linux', 'freebsd')) and xdg_base:
configdir = Path(xdg_base, "matplotlib")
else:
configdir = Path.home() / ".matplotlib"
try:
configdir.mkdir(parents=True, exist_ok=True)
except OSError:
pass
else:
if os.access(str(configdir), os.W_OK) and configdir.is_dir():
return str(configdir)
tmpdir = os.environ["MPLCONFIGDIR"] = \
tempfile.mkdtemp(prefix="matplotlib-")
atexit.register(shutil.rmtree, tmpdir)
_log.warning(
"Matplotlib created a temporary config/cache directory at %s because "
"the default path (%s) is not a writable directory; it is highly "
"recommended to set the MPLCONFIGDIR environment variable to a "
"writable directory, in particular to speed up the import of "
"Matplotlib and to better support multiprocessing.",
tmpdir, configdir)
return tmpdir
@_logged_cached('CONFIGDIR=%s')
def get_configdir():
return _get_config_or_cache_dir(_get_xdg_config_dir())
@_logged_cached('CACHEDIR=%s')
def get_cachedir():
return _get_config_or_cache_dir(_get_xdg_cache_dir())
@_logged_cached('matplotlib data path: %s')
def get_data_path(*, _from_rc=None):
if _from_rc is not None:
cbook.warn_deprecated(
"3.2",
message=("Setting the datapath via matplotlibrc is deprecated "
"%(since)s and will be removed %(removal)s."),
removal='3.4')
path = Path(_from_rc)
if path.is_dir():
return str(path)
else:
warnings.warn(f"You passed datapath: {_from_rc!r} in your "
f"matplotribrc file ({matplotlib_fname()}). "
"However this path does not exist, falling back "
"to standard paths.")
return _get_data_path()
@_logged_cached('(private) matplotlib data path: %s')
def _get_data_path():
path = Path(__file__).with_name("mpl-data")
if path.is_dir():
return str(path)
cbook.warn_deprecated(
"3.2", message="Matplotlib installs where the data is not in the "
"mpl-data subdirectory of the package are deprecated since %(since)s "
"and support for them will be removed %(removal)s.")
def get_candidate_paths():
# so need to try something known to be in Matplotlib, not basemap.
import matplotlib.afm
yield Path(matplotlib.afm.__file__).with_name('mpl-data')
# py2exe zips pure python, so still need special check.
if getattr(sys, 'frozen', None):
yield Path(sys.executable).with_name('mpl-data')
# Try again assuming we need to step up one more directory.
yield Path(sys.executable).parent.with_name('mpl-data')
# Try again assuming sys.path[0] is a dir not a exe.
yield Path(sys.path[0]) / 'mpl-data'
for path in get_candidate_paths():
if path.is_dir():
defaultParams['datapath'][0] = str(path)
return str(path)
raise RuntimeError('Could not find the matplotlib data files')
def matplotlib_fname():
def gen_candidates():
yield os.path.join(os.getcwd(), 'matplotlibrc')
try:
matplotlibrc = os.environ['MATPLOTLIBRC']
except KeyError:
pass
else:
yield matplotlibrc
yield os.path.join(matplotlibrc, 'matplotlibrc')
yield os.path.join(get_configdir(), 'matplotlibrc')
yield os.path.join(_get_data_path(), 'matplotlibrc')
for fname in gen_candidates():
if os.path.exists(fname) and not os.path.isdir(fname):
return fname
raise RuntimeError("Could not find matplotlibrc file; your Matplotlib "
"install is broken")
# rcParams deprecated and automatically mapped to another key.
# Values are tuples of (version, new_name, f_old2new, f_new2old).
_deprecated_map = {}
# rcParams deprecated; some can manually be mapped to another key.
# Values are tuples of (version, new_name_or_None).
_deprecated_ignore_map = {
}
# rcParams deprecated; can use None to suppress warnings; remain actually
# listed in the rcParams (not included in _all_deprecated).
# Values are tuples of (version,)
_deprecated_remain_as_none = {
'datapath': ('3.2.1',),
'animation.avconv_path': ('3.3',),
'animation.avconv_args': ('3.3',),
'animation.html_args': ('3.3',),
'mathtext.fallback_to_cm': ('3.3',),
'keymap.all_axes': ('3.3',),
'savefig.jpeg_quality': ('3.3',),
'text.latex.preview': ('3.3',),
}
_all_deprecated = {*_deprecated_map, *_deprecated_ignore_map}
class RcParams(MutableMapping, dict):
validate = rcsetup._validators
# validate values on the way in
def __init__(self, *args, **kwargs):
self.update(*args, **kwargs)
def __setitem__(self, key, val):
try:
if key in _deprecated_map:
version, alt_key, alt_val, inverse_alt = _deprecated_map[key]
cbook.warn_deprecated(
version, name=key, obj_type="rcparam", alternative=alt_key)
key = alt_key
val = alt_val(val)
elif key in _deprecated_remain_as_none and val is not None:
version, = _deprecated_remain_as_none[key]
cbook.warn_deprecated(
version, name=key, obj_type="rcparam")
elif key in _deprecated_ignore_map:
version, alt_key = _deprecated_ignore_map[key]
cbook.warn_deprecated(
version, name=key, obj_type="rcparam", alternative=alt_key)
return
elif key == 'backend':
if val is rcsetup._auto_backend_sentinel:
if 'backend' in self:
return
try:
cval = self.validate[key](val)
except ValueError as ve:
raise ValueError(f"Key {key}: {ve}") from None
dict.__setitem__(self, key, cval)
except KeyError as err:
raise KeyError(
f"{key} is not a valid rc parameter (see rcParams.keys() for "
f"a list of valid parameters)") from err
def __getitem__(self, key):
if key in _deprecated_map:
version, alt_key, alt_val, inverse_alt = _deprecated_map[key]
cbook.warn_deprecated(
version, name=key, obj_type="rcparam", alternative=alt_key)
return inverse_alt(dict.__getitem__(self, alt_key))
elif key in _deprecated_ignore_map:
version, alt_key = _deprecated_ignore_map[key]
cbook.warn_deprecated(
version, name=key, obj_type="rcparam", alternative=alt_key)
return dict.__getitem__(self, alt_key) if alt_key else None
elif key == "backend":
val = dict.__getitem__(self, key)
if val is rcsetup._auto_backend_sentinel:
from matplotlib import pyplot as plt
plt.switch_backend(rcsetup._auto_backend_sentinel)
elif key == "datapath":
return get_data_path()
return dict.__getitem__(self, key)
def __repr__(self):
class_name = self.__class__.__name__
indent = len(class_name) + 1
with cbook._suppress_matplotlib_deprecation_warning():
repr_split = pprint.pformat(dict(self), indent=1,
width=80 - indent).split('\n')
repr_indented = ('\n' + ' ' * indent).join(repr_split)
return '{}({})'.format(class_name, repr_indented)
def __str__(self):
return '\n'.join(map('{0[0]}: {0[1]}'.format, sorted(self.items())))
def __iter__(self):
with cbook._suppress_matplotlib_deprecation_warning():
yield from sorted(dict.__iter__(self))
def __len__(self):
return dict.__len__(self)
def find_all(self, pattern):
pattern_re = re.compile(pattern)
return RcParams((key, value)
for key, value in self.items()
if pattern_re.search(key))
def copy(self):
return {k: dict.__getitem__(self, k) for k in self}
def rc_params(fail_on_error=False):
return rc_params_from_file(matplotlib_fname(), fail_on_error)
URL_REGEX = re.compile(r'^http://|^https://|^ftp://|^file:')
def is_url(filename):
return URL_REGEX.match(filename) is not None
@functools.lru_cache()
def _get_ssl_context():
try:
import certifi
except ImportError:
_log.debug("Could not import certifi.")
return None
import ssl
return ssl.create_default_context(cafile=certifi.where())
@contextlib.contextmanager
def _open_file_or_url(fname):
if not isinstance(fname, Path) and is_url(fname):
import urllib.request
ssl_ctx = _get_ssl_context()
if ssl_ctx is None:
_log.debug(
"Could not get certifi ssl context, https may not work."
)
with urllib.request.urlopen(fname, context=ssl_ctx) as f:
yield (line.decode('utf-8') for line in f)
else:
fname = os.path.expanduser(fname)
encoding = locale.getpreferredencoding(do_setlocale=False)
if encoding is None:
encoding = "utf-8"
with open(fname, encoding=encoding) as f:
yield f
def _rc_params_in_file(fname, transform=lambda x: x, fail_on_error=False):
rc_temp = {}
with _open_file_or_url(fname) as fd:
try:
for line_no, line in enumerate(fd, 1):
line = transform(line)
strippedline = line.split('
if not strippedline:
continue
tup = strippedline.split(':', 1)
if len(tup) != 2:
_log.warning('Missing colon in file %r, line %d (%r)',
fname, line_no, line.rstrip('\n'))
continue
key, val = tup
key = key.strip()
val = val.strip()
if key in rc_temp:
_log.warning('Duplicate key in file %r, line %d (%r)',
fname, line_no, line.rstrip('\n'))
rc_temp[key] = (val, line, line_no)
except UnicodeDecodeError:
_log.warning('Cannot decode configuration file %s with encoding '
'%s, check LANG and LC_* variables.',
fname,
locale.getpreferredencoding(do_setlocale=False)
or 'utf-8 (default)')
raise
config = RcParams()
for key, (val, line, line_no) in rc_temp.items():
if key in rcsetup._validators:
if fail_on_error:
config[key] = val # try to convert to proper type or raise
else:
try:
config[key] = val # try to convert to proper type or skip
except Exception as msg:
_log.warning('Bad value in file %r, line %d (%r): %s',
fname, line_no, line.rstrip('\n'), msg)
elif key in _deprecated_ignore_map:
version, alt_key = _deprecated_ignore_map[key]
cbook.warn_deprecated(
version, name=key, alternative=alt_key,
addendum="Please update your matplotlibrc.")
else:
version = 'master' if '.post' in __version__ else f'v{__version__}'
_log.warning("""
Bad key %(key)s in file %(fname)s, line %(line_no)s (%(line)r)
You probably need to get an updated matplotlibrc file from
https://github.com/matplotlib/matplotlib/blob/%(version)s/matplotlibrc.template
or from the matplotlib source distribution""",
dict(key=key, fname=fname, line_no=line_no,
line=line.rstrip('\n'), version=version))
return config
def rc_params_from_file(fname, fail_on_error=False, use_default_template=True):
config_from_file = _rc_params_in_file(fname, fail_on_error=fail_on_error)
if not use_default_template:
return config_from_file
with cbook._suppress_matplotlib_deprecation_warning():
config = RcParams({**rcParamsDefault, **config_from_file})
with cbook._suppress_matplotlib_deprecation_warning():
if config['datapath'] is None:
config['datapath'] = _get_data_path()
else:
config['datapath'] = get_data_path(_from_rc=config['datapath'])
if "".join(config['text.latex.preamble']):
_log.info("""
*****************************************************************
You have the following UNSUPPORTED LaTeX preamble customizations:
%s
Please do not ask for support with these customizations active.
*****************************************************************
""", '\n'.join(config['text.latex.preamble']))
_log.debug('loaded rc file %s', fname)
return config
# When constructing the global instances, we need to perform certain updates
# by explicitly calling the superclass (dict.update, dict.items) to avoid
# triggering resolution of _auto_backend_sentinel.
rcParamsDefault = _rc_params_in_file(
cbook._get_data_path("matplotlibrc"),
# Strip leading comment.
transform=lambda line: line[1:] if line.startswith("#") else line,
fail_on_error=True)
dict.update(rcParamsDefault, rcsetup._hardcoded_defaults)
rcParams = RcParams() # The global instance.
dict.update(rcParams, dict.items(rcParamsDefault))
dict.update(rcParams, _rc_params_in_file(matplotlib_fname()))
with cbook._suppress_matplotlib_deprecation_warning():
rcParamsOrig = RcParams(rcParams.copy())
# This also checks that all rcParams are indeed listed in the template.
# Assiging to rcsetup.defaultParams is left only for backcompat.
defaultParams = rcsetup.defaultParams = {
# We want to resolve deprecated rcParams, but not backend...
key: [(rcsetup._auto_backend_sentinel if key == "backend" else
rcParamsDefault[key]),
validator]
for key, validator in rcsetup._validators.items()}
if rcParams['axes.formatter.use_locale']:
locale.setlocale(locale.LC_ALL, '')
def rc(group, **kwargs):
aliases = {
'lw': 'linewidth',
'ls': 'linestyle',
'c': 'color',
'fc': 'facecolor',
'ec': 'edgecolor',
'mew': 'markeredgewidth',
'aa': 'antialiased',
}
if isinstance(group, str):
group = (group,)
for g in group:
for k, v in kwargs.items():
name = aliases.get(k) or k
key = '%s.%s' % (g, name)
try:
rcParams[key] = v
except KeyError as err:
raise KeyError(('Unrecognized key "%s" for group "%s" and '
'name "%s"') % (key, g, name)) from err
def rcdefaults():
# Deprecation warnings were already handled when creating rcParamsDefault,
# no need to reemit them here.
with cbook._suppress_matplotlib_deprecation_warning():
from .style.core import STYLE_BLACKLIST
rcParams.clear()
rcParams.update({k: v for k, v in rcParamsDefault.items()
if k not in STYLE_BLACKLIST})
def rc_file_defaults():
# Deprecation warnings were already handled when creating rcParamsOrig, no
# need to reemit them here.
with cbook._suppress_matplotlib_deprecation_warning():
from .style.core import STYLE_BLACKLIST
rcParams.update({k: rcParamsOrig[k] for k in rcParamsOrig
if k not in STYLE_BLACKLIST})
def rc_file(fname, *, use_default_template=True):
# Deprecation warnings were already handled in rc_params_from_file, no need
# to reemit them here.
with cbook._suppress_matplotlib_deprecation_warning():
from .style.core import STYLE_BLACKLIST
rc_from_file = rc_params_from_file(
fname, use_default_template=use_default_template)
rcParams.update({k: rc_from_file[k] for k in rc_from_file
if k not in STYLE_BLACKLIST})
@contextlib.contextmanager
def rc_context(rc=None, fname=None):
orig = rcParams.copy()
try:
if fname:
rc_file(fname)
if rc:
rcParams.update(rc)
yield
finally:
dict.update(rcParams, orig) # Revert to the original rcs.
def use(backend, *, force=True):
name = validate_backend(backend)
# we need to use the base-class method here to avoid (prematurely)
# resolving the "auto" backend setting
if dict.__getitem__(rcParams, 'backend') == name:
# Nothing to do if the requested backend is already set
pass
else:
# if pyplot is not already imported, do not import it. Doing
# so may trigger a `plt.switch_backend` to the _default_ backend
# before we get a chance to change to the one the user just requested
plt = sys.modules.get('matplotlib.pyplot')
# if pyplot is imported, then try to change backends
if plt is not None:
try:
# we need this import check here to re-raise if the
# user does not have the libraries to support their
# chosen backend installed.
plt.switch_backend(name)
except ImportError:
if force:
raise
# if we have not imported pyplot, then we can set the rcParam
# value which will be respected when the user finally imports
# pyplot
else:
rcParams['backend'] = backend
# if the user has asked for a given backend, do not helpfully
# fallback
rcParams['backend_fallback'] = False
if os.environ.get('MPLBACKEND'):
rcParams['backend'] = os.environ.get('MPLBACKEND')
def get_backend():
return rcParams['backend']
def interactive(b):
rcParams['interactive'] = b
def is_interactive():
return rcParams['interactive']
default_test_modules = [
'matplotlib.tests',
'mpl_toolkits.tests',
]
def _init_tests():
# The version of FreeType to install locally for running the
# tests. This must match the value in `setupext.py`
LOCAL_FREETYPE_VERSION = '2.6.1'
from matplotlib import ft2font
if (ft2font.__freetype_version__ != LOCAL_FREETYPE_VERSION or
ft2font.__freetype_build_type__ != 'local'):
_log.warning(
f"Matplotlib is not built with the correct FreeType version to "
f"run tests. Rebuild without setting system_freetype=1 in "
f"setup.cfg. Expect many image comparison failures below. "
f"Expected freetype version {LOCAL_FREETYPE_VERSION}. "
f"Found freetype version {ft2font.__freetype_version__}. "
"Freetype build type is {}local".format(
"" if ft2font.__freetype_build_type__ == 'local' else "not "))
@cbook._delete_parameter("3.2", "switch_backend_warn")
@cbook._delete_parameter("3.3", "recursionlimit")
def test(verbosity=None, coverage=False, switch_backend_warn=True,
recursionlimit=0, **kwargs):
try:
import pytest
except ImportError:
print("matplotlib.test requires pytest to run.")
return -1
if not os.path.isdir(os.path.join(os.path.dirname(__file__), 'tests')):
print("Matplotlib test data is not installed")
return -1
old_backend = get_backend()
old_recursionlimit = sys.getrecursionlimit()
try:
use('agg')
if recursionlimit:
sys.setrecursionlimit(recursionlimit)
args = kwargs.pop('argv', [])
provide_default_modules = True
use_pyargs = True
for arg in args:
if any(arg.startswith(module_path)
for module_path in default_test_modules):
provide_default_modules = False
break
if os.path.exists(arg):
provide_default_modules = False
use_pyargs = False
break
if use_pyargs:
args += ['--pyargs']
if provide_default_modules:
args += default_test_modules
if coverage:
args += ['--cov']
if verbosity:
args += ['-' + 'v' * verbosity]
retcode = pytest.main(args, **kwargs)
finally:
if old_backend.lower() != 'agg':
use(old_backend)
if recursionlimit:
sys.setrecursionlimit(old_recursionlimit)
return retcode
test.__test__ = False # pytest: this function is not a test
def _replacer(data, value):
try:
# if key isn't a string don't bother
if isinstance(value, str):
# try to use __getitem__
value = data[value]
except Exception:
# key does not exist, silently fall back to key
pass
return sanitize_sequence(value)
def _label_from_arg(y, default_name):
try:
return y.name
except AttributeError:
if isinstance(default_name, str):
return default_name
return None
_DATA_DOC_TITLE = """
Notes
-----
"""
_DATA_DOC_APPENDIX = """
.. note::
In addition to the above described arguments, this function can take
a *data* keyword argument. If such a *data* argument is given,
{replaced}
Objects passed as **data** must support item access (``data[s]``) and
membership test (``s in data``).
"""
def _add_data_doc(docstring, replace_names):
if (docstring is None
or replace_names is not None and len(replace_names) == 0):
return docstring
docstring = inspect.cleandoc(docstring)
repl = (
(" every other argument can also be string ``s``, which is\n"
" interpreted as ``data[s]`` (unless this raises an exception).")
if replace_names is None else
(" the following arguments can also be string ``s``, which is\n"
" interpreted as ``data[s]`` (unless this raises an exception):\n"
" " + ", ".join(map("*{}*".format, replace_names))) + ".")
addendum = _DATA_DOC_APPENDIX.format(replaced=repl)
if _DATA_DOC_TITLE not in docstring:
addendum = _DATA_DOC_TITLE + addendum
return docstring + addendum
def _preprocess_data(func=None, *, replace_names=None, label_namer=None):
if func is None: # Return the actual decorator.
return functools.partial(
_preprocess_data,
replace_names=replace_names, label_namer=label_namer)
sig = inspect.signature(func)
varargs_name = None
varkwargs_name = None
arg_names = []
params = list(sig.parameters.values())
for p in params:
if p.kind is Parameter.VAR_POSITIONAL:
varargs_name = p.name
elif p.kind is Parameter.VAR_KEYWORD:
varkwargs_name = p.name
else:
arg_names.append(p.name)
data_param = Parameter("data", Parameter.KEYWORD_ONLY, default=None)
if varkwargs_name:
params.insert(-1, data_param)
else:
params.append(data_param)
new_sig = sig.replace(parameters=params)
arg_names = arg_names[1:] # remove the first "ax" / self arg
assert {*arg_names}.issuperset(replace_names or []) or varkwargs_name, (
"Matplotlib internal error: invalid replace_names ({!r}) for {!r}"
.format(replace_names, func.__name__))
assert label_namer is None or label_namer in arg_names, (
"Matplotlib internal error: invalid label_namer ({!r}) for {!r}"
.format(label_namer, func.__name__))
@functools.wraps(func)
def inner(ax, *args, data=None, **kwargs):
if data is None:
return func(ax, *map(sanitize_sequence, args), **kwargs)
bound = new_sig.bind(ax, *args, **kwargs)
auto_label = (bound.arguments.get(label_namer)
or bound.kwargs.get(label_namer))
for k, v in bound.arguments.items():
if k == varkwargs_name:
for k1, v1 in v.items():
if replace_names is None or k1 in replace_names:
v[k1] = _replacer(data, v1)
elif k == varargs_name:
if replace_names is None:
bound.arguments[k] = tuple(_replacer(data, v1) for v1 in v)
else:
if replace_names is None or k in replace_names:
bound.arguments[k] = _replacer(data, v)
new_args = bound.args
new_kwargs = bound.kwargs
args_and_kwargs = {**bound.arguments, **bound.kwargs}
if label_namer and "label" not in args_and_kwargs:
new_kwargs["label"] = _label_from_arg(
args_and_kwargs.get(label_namer), auto_label)
return func(*new_args, **new_kwargs)
inner.__doc__ = _add_data_doc(inner.__doc__, replace_names)
inner.__signature__ = new_sig
return inner
_log.debug('matplotlib version %s', __version__)
_log.debug('interactive is %s', is_interactive())
_log.debug('platform is %s', sys.platform)
_log.debug('loaded modules: %s', list(sys.modules))
| true | true |
1c4797802e5895313ae0514ecdda3acd949bd084 | 6,958 | py | Python | Lib/objc/CoreTelephony.py | snazari/Pyto | bcea7bbef35cab21ce73087b1a0c00a07d07ec72 | [
"MIT"
] | 701 | 2018-10-22T11:54:09.000Z | 2022-03-31T14:39:30.000Z | Lib/objc/CoreTelephony.py | snazari/Pyto | bcea7bbef35cab21ce73087b1a0c00a07d07ec72 | [
"MIT"
] | 229 | 2018-10-24T09:15:31.000Z | 2021-12-24T16:51:37.000Z | Lib/objc/CoreTelephony.py | snazari/Pyto | bcea7bbef35cab21ce73087b1a0c00a07d07ec72 | [
"MIT"
] | 131 | 2018-11-25T18:33:03.000Z | 2022-03-24T03:18:07.000Z | """
Classes from the 'CoreTelephony' framework.
"""
try:
from rubicon.objc import ObjCClass
except ValueError:
def ObjCClass(name):
return None
def _Class(name):
try:
return ObjCClass(name)
except NameError:
return None
MuxNotificationSink = _Class("MuxNotificationSink")
CoreTelephonyClientRemoteAsyncProxy = _Class("CoreTelephonyClientRemoteAsyncProxy")
CoreTelephonyClientDelegateProxy = _Class("CoreTelephonyClientDelegateProxy")
CTDisplayPlanList = _Class("CTDisplayPlanList")
CTDisplayPlan = _Class("CTDisplayPlan")
CTEmergencyModeResult = _Class("CTEmergencyModeResult")
CTDeviceDataUsage = _Class("CTDeviceDataUsage")
CTPerAppDataUsage = _Class("CTPerAppDataUsage")
CTAppDataUsage = _Class("CTAppDataUsage")
CTDataUsed = _Class("CTDataUsed")
CTDataUsage = _Class("CTDataUsage")
CTXPCContexts = _Class("CTXPCContexts")
CTXPCContextInfo = _Class("CTXPCContextInfo")
CTXPCSimLessContexts = _Class("CTXPCSimLessContexts")
CTXPCSimLessContextInfo = _Class("CTXPCSimLessContextInfo")
CTXPCServiceSubscriptionInfo = _Class("CTXPCServiceSubscriptionInfo")
CTXPCServiceSubscriptionContext = _Class("CTXPCServiceSubscriptionContext")
CTBandInfo = _Class("CTBandInfo")
CTRadioAccessTechnology = _Class("CTRadioAccessTechnology")
CTSweetgumUsageAccountMetrics = _Class("CTSweetgumUsageAccountMetrics")
CTLocalDevice = _Class("CTLocalDevice")
CTSubscriber = _Class("CTSubscriber")
CTBundle = _Class("CTBundle")
CTCellularData = _Class("CTCellularData")
CTSubscriberInfo = _Class("CTSubscriberInfo")
CTCallForwardingValue = _Class("CTCallForwardingValue")
CTVoicemailInfoType = _Class("CTVoicemailInfoType")
CTSweetgumDataPlanMetrics = _Class("CTSweetgumDataPlanMetrics")
CTBinarySMS = _Class("CTBinarySMS")
CTSMSDataType = _Class("CTSMSDataType")
CTPlanList = _Class("CTPlanList")
CTSuppServicesNotificationData = _Class("CTSuppServicesNotificationData")
CTInstalledPlan = _Class("CTInstalledPlan")
CTRemoteDeviceList = _Class("CTRemoteDeviceList")
CTSubscriberAuthDataHolder = _Class("CTSubscriberAuthDataHolder")
CTDataSettings = _Class("CTDataSettings")
CTSweetgumCapabilities = _Class("CTSweetgumCapabilities")
CTPhoneBookEntry = _Class("CTPhoneBookEntry")
CTSweetgumUsagePlanItemMessages = _Class("CTSweetgumUsagePlanItemMessages")
CTIMSRegistrationTransportInfo = _Class("CTIMSRegistrationTransportInfo")
CTCallCapabilities = _Class("CTCallCapabilities")
CTDeviceIdentifier = _Class("CTDeviceIdentifier")
CTActivationPolicyState = _Class("CTActivationPolicyState")
CTSweetgumAppsInfo = _Class("CTSweetgumAppsInfo")
CTEmergencyMode = _Class("CTEmergencyMode")
CTPhoneNumberInfo = _Class("CTPhoneNumberInfo")
CTCellInfo = _Class("CTCellInfo")
CTSubscriberAuthResult = _Class("CTSubscriberAuthResult")
CTSubscriberAuthRequest = _Class("CTSubscriberAuthRequest")
CTSubscriberAlgorithm = _Class("CTSubscriberAlgorithm")
CTSubscriberAlgorithmEAPAKA = _Class("CTSubscriberAlgorithmEAPAKA")
CTSubscriberAlgorithmEAPSIM = _Class("CTSubscriberAlgorithmEAPSIM")
CTRemoteDevice = _Class("CTRemoteDevice")
CTSweetgumPlan = _Class("CTSweetgumPlan")
CTNetworkList = _Class("CTNetworkList")
CTSweetgumPlansInfo = _Class("CTSweetgumPlansInfo")
CTSIMToolkitMenu = _Class("CTSIMToolkitMenu")
CoreTelephonyClient = _Class("CoreTelephonyClient")
CTSignalStrengthMeasurements = _Class("CTSignalStrengthMeasurements")
CTSignalStrengthInfo = _Class("CTSignalStrengthInfo")
CTCall = _Class("CTCall")
CTCallCenter = _Class("CTCallCenter")
CoreTelephonyClientMux = _Class("CoreTelephonyClientMux")
CTRadioFrequencyFrontEndScanData = _Class("CTRadioFrequencyFrontEndScanData")
CTNetworkSelectionInfo = _Class("CTNetworkSelectionInfo")
CTEncryptionStatusInfo = _Class("CTEncryptionStatusInfo")
CTRemotePlanIdentifierList = _Class("CTRemotePlanIdentifierList")
CTPlanIdentifier = _Class("CTPlanIdentifier")
CTRemotePlanIdentifier = _Class("CTRemotePlanIdentifier")
CTXPCError = _Class("CTXPCError")
CTTelephonyNetworkInfo = _Class("CTTelephonyNetworkInfo")
CTPhoneNumber = _Class("CTPhoneNumber")
CTCarrier = _Class("CTCarrier")
CTCellularPlanProvisioningRequest = _Class("CTCellularPlanProvisioningRequest")
CTMobileEquipmentInfoList = _Class("CTMobileEquipmentInfoList")
CTMobileEquipmentInfo = _Class("CTMobileEquipmentInfo")
CTDataStatus = _Class("CTDataStatus")
CTEnhancedLinkQualityMetric = _Class("CTEnhancedLinkQualityMetric")
CTEnhancedDataLinkQualityMetric = _Class("CTEnhancedDataLinkQualityMetric")
CTVoiceLinkQualityMetric = _Class("CTVoiceLinkQualityMetric")
CTCellularPlanManagerCameraScanAction = _Class("CTCellularPlanManagerCameraScanAction")
CTCellularPlanProvisioning = _Class("CTCellularPlanProvisioning")
CTIMSRegistrationStatus = _Class("CTIMSRegistrationStatus")
CTServiceDescriptorContainer = _Class("CTServiceDescriptorContainer")
CTServiceDescriptor = _Class("CTServiceDescriptor")
CTEmailAddress = _Class("CTEmailAddress")
CTSIMToolkitItemList = _Class("CTSIMToolkitItemList")
CTSIMToolkitItem = _Class("CTSIMToolkitItem")
CTMessageStatus = _Class("CTMessageStatus")
CTCellularPlanProvisioningOnDeviceActivationRequest = _Class(
"CTCellularPlanProvisioningOnDeviceActivationRequest"
)
CTPNRContextInfo = _Class("CTPNRContextInfo")
CTPNRRequestSentInfo = _Class("CTPNRRequestSentInfo")
CTPNRRequestType = _Class("CTPNRRequestType")
CTPNRDataType = _Class("CTPNRDataType")
CTDataConnectionStatus = _Class("CTDataConnectionStatus")
CTAudioCodecInfo = _Class("CTAudioCodecInfo")
CTSimLabel = _Class("CTSimLabel")
CTMessagePart = _Class("CTMessagePart")
CTMmsEncoder = _Class("CTMmsEncoder")
CTCellIdInfo = _Class("CTCellIdInfo")
CTMmsRegistrationFailureInfoType = _Class("CTMmsRegistrationFailureInfoType")
CTMessageCenter = _Class("CTMessageCenter")
CTPlan = _Class("CTPlan")
CTRemotePlan = _Class("CTRemotePlan")
CTRemoteBlacklistPlan = _Class("CTRemoteBlacklistPlan")
CTPendingPlan = _Class("CTPendingPlan")
CTSweetgumUsagePlanItemData = _Class("CTSweetgumUsagePlanItemData")
CTSweetgumUserConsentFlowInfo = _Class("CTSweetgumUserConsentFlowInfo")
CTNetwork = _Class("CTNetwork")
CTSweetgumDataPlanMetricsItem = _Class("CTSweetgumDataPlanMetricsItem")
CTRegistrationDisplayStatus = _Class("CTRegistrationDisplayStatus")
CTRatSelection = _Class("CTRatSelection")
CTAsciiAddress = _Class("CTAsciiAddress")
CTSweetgumPlanGroup = _Class("CTSweetgumPlanGroup")
CTDataConnectionAvailabilityStatus = _Class("CTDataConnectionAvailabilityStatus")
CTSweetgumUsageInfo = _Class("CTSweetgumUsageInfo")
CTSupportedMaxDataRates = _Class("CTSupportedMaxDataRates")
CTMessage = _Class("CTMessage")
CTSweetgumUsagePlanMetrics = _Class("CTSweetgumUsagePlanMetrics")
CTServiceDisconnectionStatus = _Class("CTServiceDisconnectionStatus")
CTPlanTransferAttributes = _Class("CTPlanTransferAttributes")
CTTetheringStatus = _Class("CTTetheringStatus")
CTPriVersion = _Class("CTPriVersion")
CTSweetgumUsagePlanItemVoice = _Class("CTSweetgumUsagePlanItemVoice")
CTSweetgumDataPlanMetricsError = _Class("CTSweetgumDataPlanMetricsError")
| 47.333333 | 87 | 0.841765 |
try:
from rubicon.objc import ObjCClass
except ValueError:
def ObjCClass(name):
return None
def _Class(name):
try:
return ObjCClass(name)
except NameError:
return None
MuxNotificationSink = _Class("MuxNotificationSink")
CoreTelephonyClientRemoteAsyncProxy = _Class("CoreTelephonyClientRemoteAsyncProxy")
CoreTelephonyClientDelegateProxy = _Class("CoreTelephonyClientDelegateProxy")
CTDisplayPlanList = _Class("CTDisplayPlanList")
CTDisplayPlan = _Class("CTDisplayPlan")
CTEmergencyModeResult = _Class("CTEmergencyModeResult")
CTDeviceDataUsage = _Class("CTDeviceDataUsage")
CTPerAppDataUsage = _Class("CTPerAppDataUsage")
CTAppDataUsage = _Class("CTAppDataUsage")
CTDataUsed = _Class("CTDataUsed")
CTDataUsage = _Class("CTDataUsage")
CTXPCContexts = _Class("CTXPCContexts")
CTXPCContextInfo = _Class("CTXPCContextInfo")
CTXPCSimLessContexts = _Class("CTXPCSimLessContexts")
CTXPCSimLessContextInfo = _Class("CTXPCSimLessContextInfo")
CTXPCServiceSubscriptionInfo = _Class("CTXPCServiceSubscriptionInfo")
CTXPCServiceSubscriptionContext = _Class("CTXPCServiceSubscriptionContext")
CTBandInfo = _Class("CTBandInfo")
CTRadioAccessTechnology = _Class("CTRadioAccessTechnology")
CTSweetgumUsageAccountMetrics = _Class("CTSweetgumUsageAccountMetrics")
CTLocalDevice = _Class("CTLocalDevice")
CTSubscriber = _Class("CTSubscriber")
CTBundle = _Class("CTBundle")
CTCellularData = _Class("CTCellularData")
CTSubscriberInfo = _Class("CTSubscriberInfo")
CTCallForwardingValue = _Class("CTCallForwardingValue")
CTVoicemailInfoType = _Class("CTVoicemailInfoType")
CTSweetgumDataPlanMetrics = _Class("CTSweetgumDataPlanMetrics")
CTBinarySMS = _Class("CTBinarySMS")
CTSMSDataType = _Class("CTSMSDataType")
CTPlanList = _Class("CTPlanList")
CTSuppServicesNotificationData = _Class("CTSuppServicesNotificationData")
CTInstalledPlan = _Class("CTInstalledPlan")
CTRemoteDeviceList = _Class("CTRemoteDeviceList")
CTSubscriberAuthDataHolder = _Class("CTSubscriberAuthDataHolder")
CTDataSettings = _Class("CTDataSettings")
CTSweetgumCapabilities = _Class("CTSweetgumCapabilities")
CTPhoneBookEntry = _Class("CTPhoneBookEntry")
CTSweetgumUsagePlanItemMessages = _Class("CTSweetgumUsagePlanItemMessages")
CTIMSRegistrationTransportInfo = _Class("CTIMSRegistrationTransportInfo")
CTCallCapabilities = _Class("CTCallCapabilities")
CTDeviceIdentifier = _Class("CTDeviceIdentifier")
CTActivationPolicyState = _Class("CTActivationPolicyState")
CTSweetgumAppsInfo = _Class("CTSweetgumAppsInfo")
CTEmergencyMode = _Class("CTEmergencyMode")
CTPhoneNumberInfo = _Class("CTPhoneNumberInfo")
CTCellInfo = _Class("CTCellInfo")
CTSubscriberAuthResult = _Class("CTSubscriberAuthResult")
CTSubscriberAuthRequest = _Class("CTSubscriberAuthRequest")
CTSubscriberAlgorithm = _Class("CTSubscriberAlgorithm")
CTSubscriberAlgorithmEAPAKA = _Class("CTSubscriberAlgorithmEAPAKA")
CTSubscriberAlgorithmEAPSIM = _Class("CTSubscriberAlgorithmEAPSIM")
CTRemoteDevice = _Class("CTRemoteDevice")
CTSweetgumPlan = _Class("CTSweetgumPlan")
CTNetworkList = _Class("CTNetworkList")
CTSweetgumPlansInfo = _Class("CTSweetgumPlansInfo")
CTSIMToolkitMenu = _Class("CTSIMToolkitMenu")
CoreTelephonyClient = _Class("CoreTelephonyClient")
CTSignalStrengthMeasurements = _Class("CTSignalStrengthMeasurements")
CTSignalStrengthInfo = _Class("CTSignalStrengthInfo")
CTCall = _Class("CTCall")
CTCallCenter = _Class("CTCallCenter")
CoreTelephonyClientMux = _Class("CoreTelephonyClientMux")
CTRadioFrequencyFrontEndScanData = _Class("CTRadioFrequencyFrontEndScanData")
CTNetworkSelectionInfo = _Class("CTNetworkSelectionInfo")
CTEncryptionStatusInfo = _Class("CTEncryptionStatusInfo")
CTRemotePlanIdentifierList = _Class("CTRemotePlanIdentifierList")
CTPlanIdentifier = _Class("CTPlanIdentifier")
CTRemotePlanIdentifier = _Class("CTRemotePlanIdentifier")
CTXPCError = _Class("CTXPCError")
CTTelephonyNetworkInfo = _Class("CTTelephonyNetworkInfo")
CTPhoneNumber = _Class("CTPhoneNumber")
CTCarrier = _Class("CTCarrier")
CTCellularPlanProvisioningRequest = _Class("CTCellularPlanProvisioningRequest")
CTMobileEquipmentInfoList = _Class("CTMobileEquipmentInfoList")
CTMobileEquipmentInfo = _Class("CTMobileEquipmentInfo")
CTDataStatus = _Class("CTDataStatus")
CTEnhancedLinkQualityMetric = _Class("CTEnhancedLinkQualityMetric")
CTEnhancedDataLinkQualityMetric = _Class("CTEnhancedDataLinkQualityMetric")
CTVoiceLinkQualityMetric = _Class("CTVoiceLinkQualityMetric")
CTCellularPlanManagerCameraScanAction = _Class("CTCellularPlanManagerCameraScanAction")
CTCellularPlanProvisioning = _Class("CTCellularPlanProvisioning")
CTIMSRegistrationStatus = _Class("CTIMSRegistrationStatus")
CTServiceDescriptorContainer = _Class("CTServiceDescriptorContainer")
CTServiceDescriptor = _Class("CTServiceDescriptor")
CTEmailAddress = _Class("CTEmailAddress")
CTSIMToolkitItemList = _Class("CTSIMToolkitItemList")
CTSIMToolkitItem = _Class("CTSIMToolkitItem")
CTMessageStatus = _Class("CTMessageStatus")
CTCellularPlanProvisioningOnDeviceActivationRequest = _Class(
"CTCellularPlanProvisioningOnDeviceActivationRequest"
)
CTPNRContextInfo = _Class("CTPNRContextInfo")
CTPNRRequestSentInfo = _Class("CTPNRRequestSentInfo")
CTPNRRequestType = _Class("CTPNRRequestType")
CTPNRDataType = _Class("CTPNRDataType")
CTDataConnectionStatus = _Class("CTDataConnectionStatus")
CTAudioCodecInfo = _Class("CTAudioCodecInfo")
CTSimLabel = _Class("CTSimLabel")
CTMessagePart = _Class("CTMessagePart")
CTMmsEncoder = _Class("CTMmsEncoder")
CTCellIdInfo = _Class("CTCellIdInfo")
CTMmsRegistrationFailureInfoType = _Class("CTMmsRegistrationFailureInfoType")
CTMessageCenter = _Class("CTMessageCenter")
CTPlan = _Class("CTPlan")
CTRemotePlan = _Class("CTRemotePlan")
CTRemoteBlacklistPlan = _Class("CTRemoteBlacklistPlan")
CTPendingPlan = _Class("CTPendingPlan")
CTSweetgumUsagePlanItemData = _Class("CTSweetgumUsagePlanItemData")
CTSweetgumUserConsentFlowInfo = _Class("CTSweetgumUserConsentFlowInfo")
CTNetwork = _Class("CTNetwork")
CTSweetgumDataPlanMetricsItem = _Class("CTSweetgumDataPlanMetricsItem")
CTRegistrationDisplayStatus = _Class("CTRegistrationDisplayStatus")
CTRatSelection = _Class("CTRatSelection")
CTAsciiAddress = _Class("CTAsciiAddress")
CTSweetgumPlanGroup = _Class("CTSweetgumPlanGroup")
CTDataConnectionAvailabilityStatus = _Class("CTDataConnectionAvailabilityStatus")
CTSweetgumUsageInfo = _Class("CTSweetgumUsageInfo")
CTSupportedMaxDataRates = _Class("CTSupportedMaxDataRates")
CTMessage = _Class("CTMessage")
CTSweetgumUsagePlanMetrics = _Class("CTSweetgumUsagePlanMetrics")
CTServiceDisconnectionStatus = _Class("CTServiceDisconnectionStatus")
CTPlanTransferAttributes = _Class("CTPlanTransferAttributes")
CTTetheringStatus = _Class("CTTetheringStatus")
CTPriVersion = _Class("CTPriVersion")
CTSweetgumUsagePlanItemVoice = _Class("CTSweetgumUsagePlanItemVoice")
CTSweetgumDataPlanMetricsError = _Class("CTSweetgumDataPlanMetricsError")
| true | true |
1c4798111c6d8c070c9d6fc6c731414b5eeea115 | 34 | py | Python | main/views/admin/profile/__init__.py | tiberiucorbu/av-website | f26f44a367d718316442506b130a7034697670b8 | [
"MIT"
] | null | null | null | main/views/admin/profile/__init__.py | tiberiucorbu/av-website | f26f44a367d718316442506b130a7034697670b8 | [
"MIT"
] | null | null | null | main/views/admin/profile/__init__.py | tiberiucorbu/av-website | f26f44a367d718316442506b130a7034697670b8 | [
"MIT"
] | null | null | null | from .profile_controller import *
| 17 | 33 | 0.823529 | from .profile_controller import *
| true | true |
1c4799987b867024deedfd8f407f6c7f0bdfb743 | 1,772 | py | Python | keylime/tornado_requests.py | ansasaki/keylime | 6aeb105975e8f2b3e9c83417dcf69b25dc2d69e4 | [
"Apache-2.0"
] | null | null | null | keylime/tornado_requests.py | ansasaki/keylime | 6aeb105975e8f2b3e9c83417dcf69b25dc2d69e4 | [
"Apache-2.0"
] | null | null | null | keylime/tornado_requests.py | ansasaki/keylime | 6aeb105975e8f2b3e9c83417dcf69b25dc2d69e4 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
'''
SPDX-License-Identifier: Apache-2.0
Copyright 2017 Massachusetts Institute of Technology.
'''
import ssl
from tornado import httpclient
from keylime import json
async def request(method, url, params=None, data=None, context=None, headers=None):
http_client = httpclient.AsyncHTTPClient()
if params is not None and len(list(params.keys())) > 0:
url += '?'
for key in list(params.keys()):
url += f"{key}={params[key]}&"
url = url[:-1]
if context is not None:
url = url.replace('http://', 'https://', 1)
# Convert dict to JSON before sending
if isinstance(data, dict):
data = json.dumps(data)
if headers is None:
headers = {}
if "Content-Type" not in headers:
headers["Content-Type"] = "application/json"
try:
req = httpclient.HTTPRequest(url=url,
method=method,
ssl_options=context,
body=data,
headers=headers)
response = await http_client.fetch(req)
except httpclient.HTTPError as e:
if e.response is None:
return TornadoResponse(500, str(e))
return TornadoResponse(e.response.code, e.response.body)
except ConnectionError as e:
return TornadoResponse(599, f"Connection error: {str(e)}")
except ssl.SSLError as e:
return TornadoResponse(599, f"SSL connection error: {str(e)}")
if response is None:
return None
return TornadoResponse(response.code, response.body)
class TornadoResponse:
def __init__(self, code, body):
self.status_code = code
self.body = body
| 28.580645 | 83 | 0.586907 |
import ssl
from tornado import httpclient
from keylime import json
async def request(method, url, params=None, data=None, context=None, headers=None):
http_client = httpclient.AsyncHTTPClient()
if params is not None and len(list(params.keys())) > 0:
url += '?'
for key in list(params.keys()):
url += f"{key}={params[key]}&"
url = url[:-1]
if context is not None:
url = url.replace('http://', 'https://', 1)
if isinstance(data, dict):
data = json.dumps(data)
if headers is None:
headers = {}
if "Content-Type" not in headers:
headers["Content-Type"] = "application/json"
try:
req = httpclient.HTTPRequest(url=url,
method=method,
ssl_options=context,
body=data,
headers=headers)
response = await http_client.fetch(req)
except httpclient.HTTPError as e:
if e.response is None:
return TornadoResponse(500, str(e))
return TornadoResponse(e.response.code, e.response.body)
except ConnectionError as e:
return TornadoResponse(599, f"Connection error: {str(e)}")
except ssl.SSLError as e:
return TornadoResponse(599, f"SSL connection error: {str(e)}")
if response is None:
return None
return TornadoResponse(response.code, response.body)
class TornadoResponse:
def __init__(self, code, body):
self.status_code = code
self.body = body
| true | true |
1c479a27833091f86e7dce2d076b0b29113122e0 | 1,190 | py | Python | rgbContrast.py | tsarjak/gsoc_code_library | 961cea8e0833d28e5c78e7dd06f7c3823b38cbfb | [
"MIT"
] | null | null | null | rgbContrast.py | tsarjak/gsoc_code_library | 961cea8e0833d28e5c78e7dd06f7c3823b38cbfb | [
"MIT"
] | null | null | null | rgbContrast.py | tsarjak/gsoc_code_library | 961cea8e0833d28e5c78e7dd06f7c3823b38cbfb | [
"MIT"
] | null | null | null | import cv2
from PIL import Image
import numpy as np
def arrayToImage(img,sizeX,sizeY,saveAs):
rgbArray = np.zeros((sizeX,sizeY,3),'uint8')
for i in range(0,sizeX):
for j in range(0,sizeY):
for k in range(0,3):
rgbArray[i,j,k] = img[i,j,k] * 255
img = Image.fromarray(rgbArray)
img.save(saveAs)
im = Image.open("inImage.jpg")
sizeX = im.size[0]
sizeY = im.size[1]
photo = im.load()
img = np.zeros((sizeX,sizeY,3),'float')
for i in range(0,sizeX):
for j in range(0,sizeY):
for k in range(0,3):
img[i,j,k] = photo[i,j][k]
img[i,j,k] = ((img[i,j,k])/255)
factor = 0.4
for i in range(0, sizeX):
for j in range(0,sizeY):
img[i,j,0] = ((1 - img[i,j,0]) * factor) + img[i,j,0]
img[i,j,1] = ((1 - img[i,j,1]) * factor) + img[i,j,1]
# Change in blue can be recctified for sure!
if img[i,j,0] > img[i,j,1] :
img[i,j,2] = img[i,j,2] - (img[i,j,2] * factor)
else:
img[i,j,2] = ((1 - img[i,j,2]) * factor) + img[i,j,2]
arrayToImage(img, sizeX, sizeY, "outImage6.jpg")
'''
cv2.imshow('image', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
''' | 25.869565 | 65 | 0.544538 | import cv2
from PIL import Image
import numpy as np
def arrayToImage(img,sizeX,sizeY,saveAs):
rgbArray = np.zeros((sizeX,sizeY,3),'uint8')
for i in range(0,sizeX):
for j in range(0,sizeY):
for k in range(0,3):
rgbArray[i,j,k] = img[i,j,k] * 255
img = Image.fromarray(rgbArray)
img.save(saveAs)
im = Image.open("inImage.jpg")
sizeX = im.size[0]
sizeY = im.size[1]
photo = im.load()
img = np.zeros((sizeX,sizeY,3),'float')
for i in range(0,sizeX):
for j in range(0,sizeY):
for k in range(0,3):
img[i,j,k] = photo[i,j][k]
img[i,j,k] = ((img[i,j,k])/255)
factor = 0.4
for i in range(0, sizeX):
for j in range(0,sizeY):
img[i,j,0] = ((1 - img[i,j,0]) * factor) + img[i,j,0]
img[i,j,1] = ((1 - img[i,j,1]) * factor) + img[i,j,1]
if img[i,j,0] > img[i,j,1] :
img[i,j,2] = img[i,j,2] - (img[i,j,2] * factor)
else:
img[i,j,2] = ((1 - img[i,j,2]) * factor) + img[i,j,2]
arrayToImage(img, sizeX, sizeY, "outImage6.jpg")
| true | true |
1c479cab6063cd842005ff2b64e355a6610808bd | 31,229 | py | Python | fstunes/__init__.py | raxod502/fstunes | d54860ba1a709ce75855e6897d7f8019ecb92640 | [
"MIT"
] | 1 | 2019-05-03T04:08:17.000Z | 2019-05-03T04:08:17.000Z | fstunes/__init__.py | raxod502/fstunes | d54860ba1a709ce75855e6897d7f8019ecb92640 | [
"MIT"
] | null | null | null | fstunes/__init__.py | raxod502/fstunes | d54860ba1a709ce75855e6897d7f8019ecb92640 | [
"MIT"
] | null | null | null | import argparse
import bisect
import collections
import math
import mutagen
import os
import pathlib
import random
import re
import shutil
import string
import sys
def has_duplicates(l):
return len(l) != len(set(l))
def iter_len(iterable):
return sum(1 for _ in iterable)
def plural(n):
return "s" if n != 1 else ""
def pluralen(n):
return plural(len(n))
def plurals(n):
return n, plural(n)
def pluralens(n):
return plurals(len(n))
def log(message, *args, **kwargs):
print("fstunes: {}".format(message), *args, file=sys.stderr, **kwargs)
def die(message=None, *args, **kwargs):
if os.environ.get("FSTUNES_DEBUG"):
assert False, "stacktrace requested"
if message is not None:
log(message, *args, **kwargs)
sys.exit(1)
def are_you_sure(default, yes):
prompt = "[Y/n]" if default else "[y/N]"
print("Proceed? {} ".format(prompt), end="")
if yes:
response = "y (from command-line options)"
print(response)
else:
response = input()
if response.lower().startswith("y"):
return True
if response.lower().startswith("n"):
return False
return default
def add_yes_option(parser):
parser.add_argument("-y", "--yes", action="store_true",
help="Don't ask for confirmation")
def add_fields_option(parser):
parser.add_argument("-f", "--fields", metavar="FIELD1,FIELD2,...",
help="Which metadata fields to include")
def add_match_options(parser):
parser.add_argument("-m", "--match", metavar="FIELD=EXPR", action="append",
help="Filter songs")
parser.add_argument("--match-literal", metavar="FIELD=VALUE",
action="append", help="Filter songs by literal match")
parser.add_argument("--match-set", metavar="FIELD=VALUE1,VALUE2,...",
action="append", help="Filter songs by set membership")
parser.add_argument("--match-range", metavar="FIELD=LOW-HIGH",
action="append",
help="Filter songs by range inclusion")
parser.add_argument("-M", "--match-all", metavar="FIELD", action="append",
help="Do not filter songs")
parser.add_argument("--set-delimiter", default=",", metavar="DELIM",
help="Delimiter to use for set filtering")
parser.add_argument("--range-delimiter", default="-", metavar="DELIM",
help="Delimiter to use for range filtering")
SORT_OPTION_STRINGS = ("-s", "--sort")
REVERSE_OPTION_STRINGS = ("-r", "--reverse")
SHUFFLE_OPTION_STRINGS = ("-x", "--shuffle")
class SortAction(argparse.Action):
def __call__(self, parser, namespace, value, option_string):
if option_string in SORT_OPTION_STRINGS:
modifier = "sort"
elif option_string in REVERSE_OPTION_STRINGS:
modifier = "reverse"
elif option_string in SHUFFLE_OPTION_STRINGS:
modifier = "shuffle"
else:
assert False, "unexpected modifier: {}".format(modifier)
if namespace.sort is None:
namespace.sort = []
namespace.sort.append({
"field": value,
"modifier": modifier,
})
def add_sort_options(parser):
parser.add_argument(*SORT_OPTION_STRINGS, action=SortAction,
help="Sort by field")
parser.add_argument(*REVERSE_OPTION_STRINGS, action=SortAction,
help="Sort by field in reverse order")
parser.add_argument(*SHUFFLE_OPTION_STRINGS, action=SortAction,
help="Shuffle by field")
def get_parser():
parser = argparse.ArgumentParser(
description=(
"Minimal command-line music library manager and media player."))
subparsers = parser.add_subparsers(dest="subcommand")
parser_import = subparsers.add_parser(
"import", help="Add media files to library")
parser_import.add_argument(
"paths", nargs="+", metavar="path", help="Media file or directory")
parser_playlist = subparsers.add_parser(
"playlist", help="Create or delete playlists")
subparsers_playlist = parser_playlist.add_subparsers(
dest="subcommand_playlist")
parser_playlist_create = subparsers_playlist.add_parser(
"create", help="Create a playlist")
parser_playlist_create.add_argument(
"playlists", nargs="+", metavar="playlist",
help="Name of playlist to create")
parser_playlist_delete = subparsers_playlist.add_parser(
"delete", help="Delete a playlist")
parser_playlist_delete.add_argument(
"playlists", nargs="+", metavar="playlist",
help="Name of playlist to delete")
add_yes_option(parser_playlist_delete)
parser_insert = subparsers.add_parser(
"insert", help="Add songs to a playlist or the queue")
add_match_options(parser_insert)
add_sort_options(parser_insert)
parser_insert.add_argument(
"-t", "--transfer", action="store_true",
help="Also remove songs from original playlists")
add_yes_option(parser_insert)
group_insert_before = parser_insert.add_mutually_exclusive_group()
group_insert_before.add_argument(
"--after", action="store_false", dest="before",
help="Insert after given index")
group_insert_before.add_argument(
"--before", action="store_true", help="Insert before given index")
parser_insert.add_argument(
"playlist", help="Name of playlist in which to insert")
parser_insert.add_argument(
"index", type=int, help="Index at which to insert")
parser_remove = subparsers.add_parser(
"remove", help="Remove songs from a playlist or the queue")
add_match_options(parser_remove)
add_yes_option(parser_remove)
parser_edit = subparsers.add_parser(
"edit", help="Edit song metadata")
add_match_options(parser_edit)
add_sort_options(parser_edit)
add_fields_option(parser_edit)
parser_edit.add_argument(
"-e", "--editor", help="Shell command to run text editor")
add_yes_option(parser_edit)
parser_list = subparsers.add_parser(
"list", help="List songs and associated information")
add_match_options(parser_list)
add_sort_options(parser_list)
add_fields_option(parser_list)
parser_delete = subparsers.add_parser(
"delete", help="Delete media files from library")
add_match_options(parser_delete)
add_yes_option(parser_delete)
parser_seek = subparsers.add_parser(
"seek", help="Change place in queue and play/pause")
group_seek_play_pause = parser_seek.add_mutually_exclusive_group()
group_seek_play_pause.add_argument(
"-p", "--play", action="store_true", help="Start playing")
group_seek_play_pause.add_argument(
"-P", "--pause", action="store_true", help="Stop playing")
parser_seek.add_argument(
"index", type=int, nargs="?", help="Relative index to which to seek")
return parser
def read_mutagen_key(m, key):
try:
return ", ".join(m[key].text) or None
except KeyError:
return None
def read_metadata(filepath):
m = mutagen.File(filepath)
metadata = {}
metadata["artist"] = (read_mutagen_key(m, "TPE2") or
read_mutagen_key(m, "TPE1"))
metadata["album"] = read_mutagen_key(m, "TALB")
metadata["disk"] = None
disk_and_total = read_mutagen_key(m, "TPOS")
if disk_and_total:
match = re.match(r"[0-9]+", disk_and_total)
if match:
metadata["disk"] = int(match.group())
metadata["track"] = None
track_and_total = read_mutagen_key(m, "TRCK")
if track_and_total:
match = re.match(r"[0-9]+", track_and_total)
if match:
metadata["track"] = int(match.group())
metadata["song"] = read_mutagen_key(m, "TIT2")
metadata["extension"] = filepath.suffix
return metadata
SAFE_CHARS = (
string.ascii_letters + string.digits + " !\"$%&'()*+,-.[]^_`{|}~")
ESCAPE_CHAR = "#"
def escape_string(s):
results = []
for char in s:
if char in SAFE_CHARS:
results.append(char)
else:
results.append("{0}{1:x}{0}".format(ESCAPE_CHAR, ord(char)))
return "".join(results)
def unescape_string(s):
return re.sub(r"#([0-9a-f]+)#", lambda m: chr(int(m.group(1), base=16)), s)
MISSING_FIELD = "---"
def create_relpath(metadata):
disk_str = (
"{}-".format(metadata["disk"]) if "disk" in metadata else "")
return pathlib.Path("{}/{}/{}{} {}{}".format(
escape_string(metadata["artist"] or MISSING_FIELD),
escape_string(metadata["album"] or MISSING_FIELD),
disk_str,
metadata.get("track", ""),
escape_string(metadata.get("song") or MISSING_FIELD),
metadata["extension"]))
def parse_relpath(relpath):
match = re.fullmatch(
r"([^/]+)/([^/]+)/(?:([0-9]+)-)?([0-9]+)? (.+)", str(relpath))
artist = unescape_string(match.group(1))
if artist == MISSING_FIELD:
artist = None
album = unescape_string(match.group(2))
if album == MISSING_FIELD:
album = None
disk = match.group(3)
if disk:
disk = int(disk)
track = match.group(4)
if track:
track = int(track)
song_and_extension = match.group(5)
song_match = re.fullmatch(r"(.+?)(\..*)", song_and_extension)
if song_match:
song, extension = song_match.groups()
else:
song = song_and_extension
extension = ""
song = unescape_string(song)
if song == MISSING_FIELD:
song = None
return {
"artist": artist,
"album": album,
"disk": disk,
"track": track,
"song": song,
"extension": extension,
}
def import_song(env, filepath):
metadata = read_metadata(filepath)
relpath = create_relpath(metadata)
target = env["media"] / relpath
if target.exists() or target.is_symlink():
log("skipping, already exists: {} => {}"
.format(filepath, target))
return False
target.parent.mkdir(parents=True, exist_ok=True)
shutil.copyfile(filepath, target)
return True
MEDIA_EXTENSIONS = [".mp3"]
def import_music(env, paths):
copied = 0
already_present = 0
skipped = 0
for path in paths:
path = pathlib.Path(path).resolve()
for dirpath, dirnames, filenames in os.walk(path):
dirnames.sort()
filenames.sort()
already_reported_dir = False
for filename in filenames:
filepath = pathlib.Path(dirpath) / filename
suffix = filepath.suffix
if suffix not in MEDIA_EXTENSIONS:
log("skipping, extension {} not recognized: {}"
.format(repr(suffix), filepath))
skipped += 1
continue
if not already_reported_dir:
log("importing media from directory: {}"
.format(filepath.parent))
already_reported_dir = True
if import_song(env, filepath):
copied += 1
else:
already_present += 1
log(("imported {} media file{}, skipped {} "
"already present and {} unrecognized")
.format(*plurals(copied), already_present, skipped))
MEDIA_PLAYLIST = "media"
QUEUE_PLAYLIST = "queue"
RESERVED_PLAYLISTS = (MEDIA_PLAYLIST, QUEUE_PLAYLIST)
def create_playlists(env, playlists):
for reserved_name in RESERVED_PLAYLISTS:
if reserved_name in playlists:
die("playlist name is reserved for fstunes: {}"
.format(reserved_name))
if has_duplicates(playlists):
die("more than one playlist with the same name")
paths = [env["playlists"] / escape_string(p) for p in playlists]
should_die = False
for playlist, path in zip(playlists, paths):
if path.exists() or path.is_symlink():
if path.is_dir():
log("playlist already exists: {}".format(playlist))
else:
log("already exists and not a directory: {}".format(path))
should_die = True
if should_die:
die()
for path in paths:
path.mkdir(parents=True)
log("created {} playlist{}".format(*pluralens(playlists)))
def delete_playlists(env, playlists, yes):
for reserved_name in RESERVED_PLAYLISTS:
if reserved_name in playlists:
die("playlist name is reserved for fstunes: {}"
.format(reserved_name))
if has_duplicates(playlists):
die("more than one playlist with the same name")
paths = [env["playlists"] / escape_string(p) for p in playlists]
should_die = False
for playlist, path in zip(playlists, paths):
if not path.is_dir():
if path.exists() or path.is_symlink():
log("already exists and not a directory: {}".format(path))
else:
log("playlist does not exist: {}".format(playlist))
should_die = True
if should_die:
die()
total_songs = 0
deletion_list = []
for playlist, path in zip(playlists, paths):
num_songs = 0
for entry_path in path.iterdir():
if not entry_path.is_symlink():
continue
try:
int(entry_path.name)
except ValueError:
continue
num_songs += 1
total_songs += num_songs
deletion_list.append(
"\n {} ({} song{})"
.format(playlist, *plurals(num_songs)))
log("will delete the following {} playlist{} with {} total songs:{}"
.format(*pluralens(paths), total_songs, "".join(deletion_list)))
if not are_you_sure(default=total_songs == 0, yes=yes):
die()
for path in paths:
shutil.rmtree(path)
log("deleted {} playlist{}".format(*pluralens(playlists)))
FSTUNES_HOME_ENV_VAR = "FSTUNES_HOME"
FSTUNES_QUEUE_LENGTH_ENV_VAR = "FSTUNES_QUEUE_LENGTH"
METADATA_FIELDS = (
"artist",
"album",
"disk",
"track",
"song",
"extension",
"from",
"index",
)
METADATA_INT_FIELDS = (
"disk",
"track",
"index",
)
assert set(METADATA_INT_FIELDS).issubset(set(METADATA_FIELDS))
def split_matcher(matcher):
return matcher.split("=", maxsplit=1)
def combine_matchers(true_matchers, false_matchers):
return ([(True, t) for t in true_matchers] +
[(False, f) for f in false_matchers])
def parse_matchers(args, default_to_media):
match = args.match or []
match_literal = args.match_literal or []
match_set = args.match_set or []
match_range = args.match_range or []
match_all = args.match_all or []
matchers = collections.defaultdict(list)
for matcher_type, unparsed_matchers in (
("guess", match),
("literal", match_literal),
("set", match_set),
("range", match_range),
("all", match_all)):
for unparsed_matcher in unparsed_matchers:
if matcher_type != "all":
try:
field, orig_expr = unparsed_matcher.split("=", maxsplit=1)
except ValueError:
die("invalid match expression: {}"
.format(unparsed_matcher))
else:
field = unparsed_matcher
if field not in METADATA_FIELDS:
die("unsupported field: {}".format(field))
desc = {}
if matcher_type not in ("guess", "literal", "set", "range", "all"):
assert False, (
"unexpected matcher type: {}".format(matcher_type))
if matcher_type in ("literal", "guess") and "type" not in desc:
skip = False
expr = orig_expr
if field in METADATA_INT_FIELDS:
try:
expr = int(orig_expr)
except ValueError:
if matcher_type != "guess":
die("invalid integer literal: {}"
.format(orig_expr))
else:
skip = True
if not skip:
desc["type"] = "literal"
desc["value"] = expr
if matcher_type in ("set", "guess") and "type" not in desc:
skip = False
expr = orig_expr.split(args.set_delimiter)
if field in METADATA_INT_FIELDS:
try:
expr = list(map(int, expr))
except ValueError:
if matcher_type != "guess":
die("invalid integer set: {}".format(orig_expr))
else:
skip = True
if not skip:
desc["type"] = "set"
desc["values"] = expr
if matcher_type in ("range", "guess") and "type" not in desc:
skip = False
try:
low, high = orig_expr.split(
args.range_delimiter, maxsplit=1)
except ValueError:
if matcher_type != "guess":
die("invalid range (does not contain {}): {}"
.format(repr(args.range_delimiter), orig_expr))
else:
skip = True
if not skip and field in METADATA_INT_FIELDS:
try:
low = int(low)
high = int(high)
except ValueError:
if matcher_type != "guess":
die("invalid integer range: {}".format(orig_expr))
else:
skip = True
if not skip:
desc["type"] = "range"
desc["low"] = low
desc["high"] = high
if matcher_type == "all" and "type" not in desc:
desc["type"] = "all"
if "type" not in desc:
die("invalid match expression: {}".format(orig_expr))
matchers[field].append(desc)
if not matchers["from"]:
if default_to_media:
matchers["from"] = [{
"type": "literal",
"value": "media",
}]
else:
die("you must select a playlist using -m from=PLAYLIST or similar")
return matchers
def parse_sorters(args):
sorters = []
for sorter in args.sort or []:
field = sorter["field"]
if field not in METADATA_FIELDS:
die("unsupported field: {}".format(field))
sorters.append(dict(sorter))
for field in (
"from", "index", "artist", "album", "disk", "track",
"song", "extension"):
sorters.append({
"field": field,
"modifier": "sort",
})
sorters.reverse()
return sorters
def apply_matchers(matchers, value):
for matcher in matchers:
if matcher["type"] == "all":
return True
elif matcher["type"] == "literal":
if value == matcher["value"]:
return True
elif matcher["type"] == "set":
if value in matcher["values"]:
return True
elif matcher["type"] == "range":
if matcher["low"] <= value <= matcher["high"]:
return True
else:
assert False, "unexpected matcher type: {}".format(matcher["type"])
return not matchers
def get_queue_index(env):
try:
index = int(os.readlink(env["queue_current"]))
except (OSError, ValueError):
min_value = math.inf
try:
for entry_path in env["queue"].iterdir():
try:
min_value = min(min_value, int(entry_path.name))
except ValueError:
continue
except OSError:
pass
index = min_value if min_value != math.inf else 0
return index
def set_queue_index(env, index):
queue_current_path = env["queue_current"]
queue_current_path.parent.mkdir(parents=True, exist_ok=True)
queue_current_path_new = env["temp"] / env["queue_current"].name
queue_current_path_new.parent.mkdir(parents=True, exist_ok=True)
queue_current_path_new.symlink_to(str(index))
queue_current_path_new.rename(queue_current_path)
def collect_matched_songs(env, matchers):
songs = []
matches_media = (
apply_matchers(matchers["from"], MEDIA_PLAYLIST) and
env["media"].is_dir())
if matches_media:
for artist_path in env["media"].iterdir():
artist = unescape_string(artist_path.name)
if not apply_matchers(matchers["artist"], artist):
continue
if not artist_path.is_dir():
continue
for album_path in artist_path.iterdir():
album = unescape_string(album_path.name)
if not apply_matchers(matchers["album"], album):
continue
if not album_path.is_dir():
continue
for song_path in album_path.iterdir():
if song_path.suffix not in MEDIA_EXTENSIONS:
continue
if not song_path.is_file():
continue
relpath = song_path.relative_to(env["media"])
metadata = parse_relpath(relpath)
disqualified = False
for field in ("disk", "track", "song", "extension"):
if not apply_matchers(
matchers[field], metadata[field]):
disqualified = True
break
if disqualified:
continue
metadata["relpath"] = relpath
songs.append(metadata)
if env["playlists"].is_dir():
for playlist_path in env["playlists"].iterdir():
playlist = unescape_string(playlist_path.name)
if not apply_matchers(matchers["from"], playlist):
continue
if not playlist_path.is_dir():
continue
offset = get_queue_index(env) if playlist == QUEUE_PLAYLIST else 0
for entry_path in playlist_path.iterdir():
try:
index = int(entry_path.name)
except ValueError:
continue
index += offset
if not apply_matchers(matchers["index"], index):
continue
if not entry_path.is_symlink():
continue
song_path = entry_path.resolve()
relpath = song_path.relative_to(env["media"])
metadata = parse_relpath(relpath)
disqualified = False
for field in ("artist", "album", "disk", "track", "song",
"extension"):
if not apply_matchers(matchers[field], metadata[field]):
disqualified = True
break
if disqualified:
continue
metadata["from"] = playlist
metadata["index"] = index
metadata["relpath"] = relpath
songs.append(metadata)
return songs
def sort_songs(songs, sorters):
for sorter in sorters:
field = sorter["field"]
modifier = sorter["modifier"]
reverse = False
assert modifier in ("sort", "reverse", "shuffle"), (
"unexpected sort modifier: {}".format(modifier))
if modifier == "shuffle":
memo = collections.defaultdict(lambda: random.getrandbits(64))
def key(value):
if field in value:
return memo[value[field]]
elif field in METADATA_INT_FIELDS:
return -math.inf
else:
return ""
else:
def key(value):
if field in value:
return value[field]
elif field in METADATA_INT_FIELDS:
return -math.inf
else:
return ""
reverse = modifier == "reverse"
songs.sort(key=key, reverse=reverse)
CONTEXT = 3
def song_description(song, index):
return ("\n [{}]. {}{}{}{} ({}, {})"
.format(index,
"{}-".format(song["disk"]) if "disk" in song else "",
song.get("track", ""),
" " if "disk" in song or "track" in song else "",
song["song"], song["album"], song["artist"]))
CONTEXT_DIVIDER = "\n-----"
def insert_in_playlist(env, songs, playlist, insert_index, before, yes):
if not before:
insert_index += 1
if playlist == MEDIA_PLAYLIST:
die("playlist name is reserved for fstunes: {}"
.format(MEDIA_PLAYLIST))
if playlist == QUEUE_PLAYLIST:
current_index = get_queue_index(env)
insert_index += current_index
global_offset = current_index
else:
global_offset = 0
playlist_path = env["playlists"] / playlist
if playlist == QUEUE_PLAYLIST:
playlist_path.mkdir(parents=True, exist_ok=True)
elif not playlist_path.is_dir():
die("playlist does not exist: {}".format(playlist))
existing_indices = []
for entry_path in playlist_path.iterdir():
try:
index = int(entry_path.name)
except ValueError:
continue
existing_indices.append(index)
existing_indices.sort()
insertion_point = bisect.bisect_left(existing_indices, insert_index)
insertion_list = []
removals = []
if playlist == QUEUE_PLAYLIST:
removal_point = bisect.bisect_left(existing_indices, current_index)
for i in range(removal_point - env["queue_length"]):
index = existing_indices[i]
removals.append(playlist_path / str(index))
for i in range(max(0, insertion_point - CONTEXT), insertion_point):
index = existing_indices[i]
song = parse_relpath(
(playlist_path / str(index)).resolve().relative_to(env["media"]))
insertion_list.append(song_description(song, index - global_offset))
insertion_list.append(CONTEXT_DIVIDER)
creates = []
for offset, song in enumerate(songs):
song_index = insert_index + offset
target = pathlib.Path("..") / ".." / MEDIA_PLAYLIST / song["relpath"]
creates.append((playlist_path / str(song_index), target))
insertion_list.append(
song_description(song, song_index - global_offset))
insertion_list.append(CONTEXT_DIVIDER)
for i in range(insertion_point,
min(insertion_point + CONTEXT, len(existing_indices))):
index = existing_indices[i]
song = parse_relpath(
(playlist_path / str(index)).resolve().relative_to(env["media"]))
insertion_list.append(
song_description(song, index + len(songs) - global_offset))
renames = []
for i in range(insertion_point, len(existing_indices)):
old_index = existing_indices[i]
new_index = old_index + len(songs)
renames.append((playlist_path / str(old_index),
playlist_path / str(new_index)))
renames.reverse()
advance = False
if playlist == QUEUE_PLAYLIST:
if current_index > insert_index:
new_current_index = current_index + len(songs)
advance = True
log(("will insert the following {} song{} into "
"playlist {} with {} song{} already:{}")
.format(*pluralens(songs), repr(playlist),
*pluralens(existing_indices),
"".join(insertion_list)))
log("will move {} symlink{}, insert {}, prune {}{}"
.format(*pluralens(renames), len(creates), len(removals),
", advance pointer" if advance else ""))
if not are_you_sure(default=True, yes=yes):
die()
for removal in removals:
removal.unlink()
for rename, target in renames:
rename.rename(target)
for create, target in creates:
create.symlink_to(target)
if advance:
set_queue_index(env, new_current_index)
log("inserted {} song{} into playlist {} and pruned {} (length {} -> {})"
.format(*pluralens(songs), repr(playlist),
len(removals), len(existing_indices),
len(existing_indices) + len(songs) - len(removals)))
def insert_songs(
env, matchers, sorters, playlist, index, transfer, before, yes):
if transfer:
raise NotImplementedError
songs = collect_matched_songs(env, matchers)
if not songs:
die("no songs matched")
sort_songs(songs, sorters)
insert_in_playlist(env, songs, playlist, index, before=before, yes=yes)
def handle_args(args):
home = os.environ.get(FSTUNES_HOME_ENV_VAR)
if not home:
die("environment variable not set: {}".format(FSTUNES_HOME_ENV_VAR))
home = pathlib.Path(home)
if not home.is_dir():
if home.exists() or home.is_symlink():
die("not a directory: {}".format(home))
die("directory does not exist: {}".format(home))
queue_length = os.environ.get(FSTUNES_QUEUE_LENGTH_ENV_VAR)
if queue_length:
try:
queue_length = int(queue_length)
except ValueError:
die("invalid integer literal in {}: {}"
.format(FSTUNES_QUEUE_LENGTH_ENV_VAR, queue_length))
if queue_length < 0:
die("queue length cannot be negative in {}: {}"
.format(FSTUNES_QUEUE_LENGTH_ENV_VAR, queue_length))
else:
queue_length = 10000
env = {
"home": home,
"media": home / MEDIA_PLAYLIST,
"playlists": home / "playlists",
"queue": home / "playlists" / QUEUE_PLAYLIST,
"queue_current": home / "playlists" / QUEUE_PLAYLIST / "_current",
"queue_length": queue_length,
"temp": home / "temp",
}
if args.subcommand == "import":
import_music(env, args.paths)
elif args.subcommand == "playlist":
if args.subcommand_playlist == "create":
create_playlists(env, args.playlists)
else:
delete_playlists(env, args.playlists, yes=args.yes)
elif args.subcommand == "insert":
matchers = parse_matchers(args, default_to_media=True)
sorters = parse_sorters(args)
insert_songs(
env, matchers, sorters, args.playlist, args.index,
transfer=args.transfer, before=args.before, yes=args.yes)
else:
raise NotImplementedError
def main():
parser = get_parser()
args = parser.parse_args()
handle_args(args)
| 36.957396 | 79 | 0.573666 | import argparse
import bisect
import collections
import math
import mutagen
import os
import pathlib
import random
import re
import shutil
import string
import sys
def has_duplicates(l):
return len(l) != len(set(l))
def iter_len(iterable):
return sum(1 for _ in iterable)
def plural(n):
return "s" if n != 1 else ""
def pluralen(n):
return plural(len(n))
def plurals(n):
return n, plural(n)
def pluralens(n):
return plurals(len(n))
def log(message, *args, **kwargs):
print("fstunes: {}".format(message), *args, file=sys.stderr, **kwargs)
def die(message=None, *args, **kwargs):
if os.environ.get("FSTUNES_DEBUG"):
assert False, "stacktrace requested"
if message is not None:
log(message, *args, **kwargs)
sys.exit(1)
def are_you_sure(default, yes):
prompt = "[Y/n]" if default else "[y/N]"
print("Proceed? {} ".format(prompt), end="")
if yes:
response = "y (from command-line options)"
print(response)
else:
response = input()
if response.lower().startswith("y"):
return True
if response.lower().startswith("n"):
return False
return default
def add_yes_option(parser):
parser.add_argument("-y", "--yes", action="store_true",
help="Don't ask for confirmation")
def add_fields_option(parser):
parser.add_argument("-f", "--fields", metavar="FIELD1,FIELD2,...",
help="Which metadata fields to include")
def add_match_options(parser):
parser.add_argument("-m", "--match", metavar="FIELD=EXPR", action="append",
help="Filter songs")
parser.add_argument("--match-literal", metavar="FIELD=VALUE",
action="append", help="Filter songs by literal match")
parser.add_argument("--match-set", metavar="FIELD=VALUE1,VALUE2,...",
action="append", help="Filter songs by set membership")
parser.add_argument("--match-range", metavar="FIELD=LOW-HIGH",
action="append",
help="Filter songs by range inclusion")
parser.add_argument("-M", "--match-all", metavar="FIELD", action="append",
help="Do not filter songs")
parser.add_argument("--set-delimiter", default=",", metavar="DELIM",
help="Delimiter to use for set filtering")
parser.add_argument("--range-delimiter", default="-", metavar="DELIM",
help="Delimiter to use for range filtering")
SORT_OPTION_STRINGS = ("-s", "--sort")
REVERSE_OPTION_STRINGS = ("-r", "--reverse")
SHUFFLE_OPTION_STRINGS = ("-x", "--shuffle")
class SortAction(argparse.Action):
def __call__(self, parser, namespace, value, option_string):
if option_string in SORT_OPTION_STRINGS:
modifier = "sort"
elif option_string in REVERSE_OPTION_STRINGS:
modifier = "reverse"
elif option_string in SHUFFLE_OPTION_STRINGS:
modifier = "shuffle"
else:
assert False, "unexpected modifier: {}".format(modifier)
if namespace.sort is None:
namespace.sort = []
namespace.sort.append({
"field": value,
"modifier": modifier,
})
def add_sort_options(parser):
parser.add_argument(*SORT_OPTION_STRINGS, action=SortAction,
help="Sort by field")
parser.add_argument(*REVERSE_OPTION_STRINGS, action=SortAction,
help="Sort by field in reverse order")
parser.add_argument(*SHUFFLE_OPTION_STRINGS, action=SortAction,
help="Shuffle by field")
def get_parser():
parser = argparse.ArgumentParser(
description=(
"Minimal command-line music library manager and media player."))
subparsers = parser.add_subparsers(dest="subcommand")
parser_import = subparsers.add_parser(
"import", help="Add media files to library")
parser_import.add_argument(
"paths", nargs="+", metavar="path", help="Media file or directory")
parser_playlist = subparsers.add_parser(
"playlist", help="Create or delete playlists")
subparsers_playlist = parser_playlist.add_subparsers(
dest="subcommand_playlist")
parser_playlist_create = subparsers_playlist.add_parser(
"create", help="Create a playlist")
parser_playlist_create.add_argument(
"playlists", nargs="+", metavar="playlist",
help="Name of playlist to create")
parser_playlist_delete = subparsers_playlist.add_parser(
"delete", help="Delete a playlist")
parser_playlist_delete.add_argument(
"playlists", nargs="+", metavar="playlist",
help="Name of playlist to delete")
add_yes_option(parser_playlist_delete)
parser_insert = subparsers.add_parser(
"insert", help="Add songs to a playlist or the queue")
add_match_options(parser_insert)
add_sort_options(parser_insert)
parser_insert.add_argument(
"-t", "--transfer", action="store_true",
help="Also remove songs from original playlists")
add_yes_option(parser_insert)
group_insert_before = parser_insert.add_mutually_exclusive_group()
group_insert_before.add_argument(
"--after", action="store_false", dest="before",
help="Insert after given index")
group_insert_before.add_argument(
"--before", action="store_true", help="Insert before given index")
parser_insert.add_argument(
"playlist", help="Name of playlist in which to insert")
parser_insert.add_argument(
"index", type=int, help="Index at which to insert")
parser_remove = subparsers.add_parser(
"remove", help="Remove songs from a playlist or the queue")
add_match_options(parser_remove)
add_yes_option(parser_remove)
parser_edit = subparsers.add_parser(
"edit", help="Edit song metadata")
add_match_options(parser_edit)
add_sort_options(parser_edit)
add_fields_option(parser_edit)
parser_edit.add_argument(
"-e", "--editor", help="Shell command to run text editor")
add_yes_option(parser_edit)
parser_list = subparsers.add_parser(
"list", help="List songs and associated information")
add_match_options(parser_list)
add_sort_options(parser_list)
add_fields_option(parser_list)
parser_delete = subparsers.add_parser(
"delete", help="Delete media files from library")
add_match_options(parser_delete)
add_yes_option(parser_delete)
parser_seek = subparsers.add_parser(
"seek", help="Change place in queue and play/pause")
group_seek_play_pause = parser_seek.add_mutually_exclusive_group()
group_seek_play_pause.add_argument(
"-p", "--play", action="store_true", help="Start playing")
group_seek_play_pause.add_argument(
"-P", "--pause", action="store_true", help="Stop playing")
parser_seek.add_argument(
"index", type=int, nargs="?", help="Relative index to which to seek")
return parser
def read_mutagen_key(m, key):
try:
return ", ".join(m[key].text) or None
except KeyError:
return None
def read_metadata(filepath):
m = mutagen.File(filepath)
metadata = {}
metadata["artist"] = (read_mutagen_key(m, "TPE2") or
read_mutagen_key(m, "TPE1"))
metadata["album"] = read_mutagen_key(m, "TALB")
metadata["disk"] = None
disk_and_total = read_mutagen_key(m, "TPOS")
if disk_and_total:
match = re.match(r"[0-9]+", disk_and_total)
if match:
metadata["disk"] = int(match.group())
metadata["track"] = None
track_and_total = read_mutagen_key(m, "TRCK")
if track_and_total:
match = re.match(r"[0-9]+", track_and_total)
if match:
metadata["track"] = int(match.group())
metadata["song"] = read_mutagen_key(m, "TIT2")
metadata["extension"] = filepath.suffix
return metadata
SAFE_CHARS = (
string.ascii_letters + string.digits + " !\"$%&'()*+,-.[]^_`{|}~")
ESCAPE_CHAR = "
def escape_string(s):
results = []
for char in s:
if char in SAFE_CHARS:
results.append(char)
else:
results.append("{0}{1:x}{0}".format(ESCAPE_CHAR, ord(char)))
return "".join(results)
def unescape_string(s):
return re.sub(r"adata):
disk_str = (
"{}-".format(metadata["disk"]) if "disk" in metadata else "")
return pathlib.Path("{}/{}/{}{} {}{}".format(
escape_string(metadata["artist"] or MISSING_FIELD),
escape_string(metadata["album"] or MISSING_FIELD),
disk_str,
metadata.get("track", ""),
escape_string(metadata.get("song") or MISSING_FIELD),
metadata["extension"]))
def parse_relpath(relpath):
match = re.fullmatch(
r"([^/]+)/([^/]+)/(?:([0-9]+)-)?([0-9]+)? (.+)", str(relpath))
artist = unescape_string(match.group(1))
if artist == MISSING_FIELD:
artist = None
album = unescape_string(match.group(2))
if album == MISSING_FIELD:
album = None
disk = match.group(3)
if disk:
disk = int(disk)
track = match.group(4)
if track:
track = int(track)
song_and_extension = match.group(5)
song_match = re.fullmatch(r"(.+?)(\..*)", song_and_extension)
if song_match:
song, extension = song_match.groups()
else:
song = song_and_extension
extension = ""
song = unescape_string(song)
if song == MISSING_FIELD:
song = None
return {
"artist": artist,
"album": album,
"disk": disk,
"track": track,
"song": song,
"extension": extension,
}
def import_song(env, filepath):
metadata = read_metadata(filepath)
relpath = create_relpath(metadata)
target = env["media"] / relpath
if target.exists() or target.is_symlink():
log("skipping, already exists: {} => {}"
.format(filepath, target))
return False
target.parent.mkdir(parents=True, exist_ok=True)
shutil.copyfile(filepath, target)
return True
MEDIA_EXTENSIONS = [".mp3"]
def import_music(env, paths):
copied = 0
already_present = 0
skipped = 0
for path in paths:
path = pathlib.Path(path).resolve()
for dirpath, dirnames, filenames in os.walk(path):
dirnames.sort()
filenames.sort()
already_reported_dir = False
for filename in filenames:
filepath = pathlib.Path(dirpath) / filename
suffix = filepath.suffix
if suffix not in MEDIA_EXTENSIONS:
log("skipping, extension {} not recognized: {}"
.format(repr(suffix), filepath))
skipped += 1
continue
if not already_reported_dir:
log("importing media from directory: {}"
.format(filepath.parent))
already_reported_dir = True
if import_song(env, filepath):
copied += 1
else:
already_present += 1
log(("imported {} media file{}, skipped {} "
"already present and {} unrecognized")
.format(*plurals(copied), already_present, skipped))
MEDIA_PLAYLIST = "media"
QUEUE_PLAYLIST = "queue"
RESERVED_PLAYLISTS = (MEDIA_PLAYLIST, QUEUE_PLAYLIST)
def create_playlists(env, playlists):
for reserved_name in RESERVED_PLAYLISTS:
if reserved_name in playlists:
die("playlist name is reserved for fstunes: {}"
.format(reserved_name))
if has_duplicates(playlists):
die("more than one playlist with the same name")
paths = [env["playlists"] / escape_string(p) for p in playlists]
should_die = False
for playlist, path in zip(playlists, paths):
if path.exists() or path.is_symlink():
if path.is_dir():
log("playlist already exists: {}".format(playlist))
else:
log("already exists and not a directory: {}".format(path))
should_die = True
if should_die:
die()
for path in paths:
path.mkdir(parents=True)
log("created {} playlist{}".format(*pluralens(playlists)))
def delete_playlists(env, playlists, yes):
for reserved_name in RESERVED_PLAYLISTS:
if reserved_name in playlists:
die("playlist name is reserved for fstunes: {}"
.format(reserved_name))
if has_duplicates(playlists):
die("more than one playlist with the same name")
paths = [env["playlists"] / escape_string(p) for p in playlists]
should_die = False
for playlist, path in zip(playlists, paths):
if not path.is_dir():
if path.exists() or path.is_symlink():
log("already exists and not a directory: {}".format(path))
else:
log("playlist does not exist: {}".format(playlist))
should_die = True
if should_die:
die()
total_songs = 0
deletion_list = []
for playlist, path in zip(playlists, paths):
num_songs = 0
for entry_path in path.iterdir():
if not entry_path.is_symlink():
continue
try:
int(entry_path.name)
except ValueError:
continue
num_songs += 1
total_songs += num_songs
deletion_list.append(
"\n {} ({} song{})"
.format(playlist, *plurals(num_songs)))
log("will delete the following {} playlist{} with {} total songs:{}"
.format(*pluralens(paths), total_songs, "".join(deletion_list)))
if not are_you_sure(default=total_songs == 0, yes=yes):
die()
for path in paths:
shutil.rmtree(path)
log("deleted {} playlist{}".format(*pluralens(playlists)))
FSTUNES_HOME_ENV_VAR = "FSTUNES_HOME"
FSTUNES_QUEUE_LENGTH_ENV_VAR = "FSTUNES_QUEUE_LENGTH"
METADATA_FIELDS = (
"artist",
"album",
"disk",
"track",
"song",
"extension",
"from",
"index",
)
METADATA_INT_FIELDS = (
"disk",
"track",
"index",
)
assert set(METADATA_INT_FIELDS).issubset(set(METADATA_FIELDS))
def split_matcher(matcher):
return matcher.split("=", maxsplit=1)
def combine_matchers(true_matchers, false_matchers):
return ([(True, t) for t in true_matchers] +
[(False, f) for f in false_matchers])
def parse_matchers(args, default_to_media):
match = args.match or []
match_literal = args.match_literal or []
match_set = args.match_set or []
match_range = args.match_range or []
match_all = args.match_all or []
matchers = collections.defaultdict(list)
for matcher_type, unparsed_matchers in (
("guess", match),
("literal", match_literal),
("set", match_set),
("range", match_range),
("all", match_all)):
for unparsed_matcher in unparsed_matchers:
if matcher_type != "all":
try:
field, orig_expr = unparsed_matcher.split("=", maxsplit=1)
except ValueError:
die("invalid match expression: {}"
.format(unparsed_matcher))
else:
field = unparsed_matcher
if field not in METADATA_FIELDS:
die("unsupported field: {}".format(field))
desc = {}
if matcher_type not in ("guess", "literal", "set", "range", "all"):
assert False, (
"unexpected matcher type: {}".format(matcher_type))
if matcher_type in ("literal", "guess") and "type" not in desc:
skip = False
expr = orig_expr
if field in METADATA_INT_FIELDS:
try:
expr = int(orig_expr)
except ValueError:
if matcher_type != "guess":
die("invalid integer literal: {}"
.format(orig_expr))
else:
skip = True
if not skip:
desc["type"] = "literal"
desc["value"] = expr
if matcher_type in ("set", "guess") and "type" not in desc:
skip = False
expr = orig_expr.split(args.set_delimiter)
if field in METADATA_INT_FIELDS:
try:
expr = list(map(int, expr))
except ValueError:
if matcher_type != "guess":
die("invalid integer set: {}".format(orig_expr))
else:
skip = True
if not skip:
desc["type"] = "set"
desc["values"] = expr
if matcher_type in ("range", "guess") and "type" not in desc:
skip = False
try:
low, high = orig_expr.split(
args.range_delimiter, maxsplit=1)
except ValueError:
if matcher_type != "guess":
die("invalid range (does not contain {}): {}"
.format(repr(args.range_delimiter), orig_expr))
else:
skip = True
if not skip and field in METADATA_INT_FIELDS:
try:
low = int(low)
high = int(high)
except ValueError:
if matcher_type != "guess":
die("invalid integer range: {}".format(orig_expr))
else:
skip = True
if not skip:
desc["type"] = "range"
desc["low"] = low
desc["high"] = high
if matcher_type == "all" and "type" not in desc:
desc["type"] = "all"
if "type" not in desc:
die("invalid match expression: {}".format(orig_expr))
matchers[field].append(desc)
if not matchers["from"]:
if default_to_media:
matchers["from"] = [{
"type": "literal",
"value": "media",
}]
else:
die("you must select a playlist using -m from=PLAYLIST or similar")
return matchers
def parse_sorters(args):
sorters = []
for sorter in args.sort or []:
field = sorter["field"]
if field not in METADATA_FIELDS:
die("unsupported field: {}".format(field))
sorters.append(dict(sorter))
for field in (
"from", "index", "artist", "album", "disk", "track",
"song", "extension"):
sorters.append({
"field": field,
"modifier": "sort",
})
sorters.reverse()
return sorters
def apply_matchers(matchers, value):
for matcher in matchers:
if matcher["type"] == "all":
return True
elif matcher["type"] == "literal":
if value == matcher["value"]:
return True
elif matcher["type"] == "set":
if value in matcher["values"]:
return True
elif matcher["type"] == "range":
if matcher["low"] <= value <= matcher["high"]:
return True
else:
assert False, "unexpected matcher type: {}".format(matcher["type"])
return not matchers
def get_queue_index(env):
try:
index = int(os.readlink(env["queue_current"]))
except (OSError, ValueError):
min_value = math.inf
try:
for entry_path in env["queue"].iterdir():
try:
min_value = min(min_value, int(entry_path.name))
except ValueError:
continue
except OSError:
pass
index = min_value if min_value != math.inf else 0
return index
def set_queue_index(env, index):
queue_current_path = env["queue_current"]
queue_current_path.parent.mkdir(parents=True, exist_ok=True)
queue_current_path_new = env["temp"] / env["queue_current"].name
queue_current_path_new.parent.mkdir(parents=True, exist_ok=True)
queue_current_path_new.symlink_to(str(index))
queue_current_path_new.rename(queue_current_path)
def collect_matched_songs(env, matchers):
songs = []
matches_media = (
apply_matchers(matchers["from"], MEDIA_PLAYLIST) and
env["media"].is_dir())
if matches_media:
for artist_path in env["media"].iterdir():
artist = unescape_string(artist_path.name)
if not apply_matchers(matchers["artist"], artist):
continue
if not artist_path.is_dir():
continue
for album_path in artist_path.iterdir():
album = unescape_string(album_path.name)
if not apply_matchers(matchers["album"], album):
continue
if not album_path.is_dir():
continue
for song_path in album_path.iterdir():
if song_path.suffix not in MEDIA_EXTENSIONS:
continue
if not song_path.is_file():
continue
relpath = song_path.relative_to(env["media"])
metadata = parse_relpath(relpath)
disqualified = False
for field in ("disk", "track", "song", "extension"):
if not apply_matchers(
matchers[field], metadata[field]):
disqualified = True
break
if disqualified:
continue
metadata["relpath"] = relpath
songs.append(metadata)
if env["playlists"].is_dir():
for playlist_path in env["playlists"].iterdir():
playlist = unescape_string(playlist_path.name)
if not apply_matchers(matchers["from"], playlist):
continue
if not playlist_path.is_dir():
continue
offset = get_queue_index(env) if playlist == QUEUE_PLAYLIST else 0
for entry_path in playlist_path.iterdir():
try:
index = int(entry_path.name)
except ValueError:
continue
index += offset
if not apply_matchers(matchers["index"], index):
continue
if not entry_path.is_symlink():
continue
song_path = entry_path.resolve()
relpath = song_path.relative_to(env["media"])
metadata = parse_relpath(relpath)
disqualified = False
for field in ("artist", "album", "disk", "track", "song",
"extension"):
if not apply_matchers(matchers[field], metadata[field]):
disqualified = True
break
if disqualified:
continue
metadata["from"] = playlist
metadata["index"] = index
metadata["relpath"] = relpath
songs.append(metadata)
return songs
def sort_songs(songs, sorters):
for sorter in sorters:
field = sorter["field"]
modifier = sorter["modifier"]
reverse = False
assert modifier in ("sort", "reverse", "shuffle"), (
"unexpected sort modifier: {}".format(modifier))
if modifier == "shuffle":
memo = collections.defaultdict(lambda: random.getrandbits(64))
def key(value):
if field in value:
return memo[value[field]]
elif field in METADATA_INT_FIELDS:
return -math.inf
else:
return ""
else:
def key(value):
if field in value:
return value[field]
elif field in METADATA_INT_FIELDS:
return -math.inf
else:
return ""
reverse = modifier == "reverse"
songs.sort(key=key, reverse=reverse)
CONTEXT = 3
def song_description(song, index):
return ("\n [{}]. {}{}{}{} ({}, {})"
.format(index,
"{}-".format(song["disk"]) if "disk" in song else "",
song.get("track", ""),
" " if "disk" in song or "track" in song else "",
song["song"], song["album"], song["artist"]))
CONTEXT_DIVIDER = "\n-----"
def insert_in_playlist(env, songs, playlist, insert_index, before, yes):
if not before:
insert_index += 1
if playlist == MEDIA_PLAYLIST:
die("playlist name is reserved for fstunes: {}"
.format(MEDIA_PLAYLIST))
if playlist == QUEUE_PLAYLIST:
current_index = get_queue_index(env)
insert_index += current_index
global_offset = current_index
else:
global_offset = 0
playlist_path = env["playlists"] / playlist
if playlist == QUEUE_PLAYLIST:
playlist_path.mkdir(parents=True, exist_ok=True)
elif not playlist_path.is_dir():
die("playlist does not exist: {}".format(playlist))
existing_indices = []
for entry_path in playlist_path.iterdir():
try:
index = int(entry_path.name)
except ValueError:
continue
existing_indices.append(index)
existing_indices.sort()
insertion_point = bisect.bisect_left(existing_indices, insert_index)
insertion_list = []
removals = []
if playlist == QUEUE_PLAYLIST:
removal_point = bisect.bisect_left(existing_indices, current_index)
for i in range(removal_point - env["queue_length"]):
index = existing_indices[i]
removals.append(playlist_path / str(index))
for i in range(max(0, insertion_point - CONTEXT), insertion_point):
index = existing_indices[i]
song = parse_relpath(
(playlist_path / str(index)).resolve().relative_to(env["media"]))
insertion_list.append(song_description(song, index - global_offset))
insertion_list.append(CONTEXT_DIVIDER)
creates = []
for offset, song in enumerate(songs):
song_index = insert_index + offset
target = pathlib.Path("..") / ".." / MEDIA_PLAYLIST / song["relpath"]
creates.append((playlist_path / str(song_index), target))
insertion_list.append(
song_description(song, song_index - global_offset))
insertion_list.append(CONTEXT_DIVIDER)
for i in range(insertion_point,
min(insertion_point + CONTEXT, len(existing_indices))):
index = existing_indices[i]
song = parse_relpath(
(playlist_path / str(index)).resolve().relative_to(env["media"]))
insertion_list.append(
song_description(song, index + len(songs) - global_offset))
renames = []
for i in range(insertion_point, len(existing_indices)):
old_index = existing_indices[i]
new_index = old_index + len(songs)
renames.append((playlist_path / str(old_index),
playlist_path / str(new_index)))
renames.reverse()
advance = False
if playlist == QUEUE_PLAYLIST:
if current_index > insert_index:
new_current_index = current_index + len(songs)
advance = True
log(("will insert the following {} song{} into "
"playlist {} with {} song{} already:{}")
.format(*pluralens(songs), repr(playlist),
*pluralens(existing_indices),
"".join(insertion_list)))
log("will move {} symlink{}, insert {}, prune {}{}"
.format(*pluralens(renames), len(creates), len(removals),
", advance pointer" if advance else ""))
if not are_you_sure(default=True, yes=yes):
die()
for removal in removals:
removal.unlink()
for rename, target in renames:
rename.rename(target)
for create, target in creates:
create.symlink_to(target)
if advance:
set_queue_index(env, new_current_index)
log("inserted {} song{} into playlist {} and pruned {} (length {} -> {})"
.format(*pluralens(songs), repr(playlist),
len(removals), len(existing_indices),
len(existing_indices) + len(songs) - len(removals)))
def insert_songs(
env, matchers, sorters, playlist, index, transfer, before, yes):
if transfer:
raise NotImplementedError
songs = collect_matched_songs(env, matchers)
if not songs:
die("no songs matched")
sort_songs(songs, sorters)
insert_in_playlist(env, songs, playlist, index, before=before, yes=yes)
def handle_args(args):
home = os.environ.get(FSTUNES_HOME_ENV_VAR)
if not home:
die("environment variable not set: {}".format(FSTUNES_HOME_ENV_VAR))
home = pathlib.Path(home)
if not home.is_dir():
if home.exists() or home.is_symlink():
die("not a directory: {}".format(home))
die("directory does not exist: {}".format(home))
queue_length = os.environ.get(FSTUNES_QUEUE_LENGTH_ENV_VAR)
if queue_length:
try:
queue_length = int(queue_length)
except ValueError:
die("invalid integer literal in {}: {}"
.format(FSTUNES_QUEUE_LENGTH_ENV_VAR, queue_length))
if queue_length < 0:
die("queue length cannot be negative in {}: {}"
.format(FSTUNES_QUEUE_LENGTH_ENV_VAR, queue_length))
else:
queue_length = 10000
env = {
"home": home,
"media": home / MEDIA_PLAYLIST,
"playlists": home / "playlists",
"queue": home / "playlists" / QUEUE_PLAYLIST,
"queue_current": home / "playlists" / QUEUE_PLAYLIST / "_current",
"queue_length": queue_length,
"temp": home / "temp",
}
if args.subcommand == "import":
import_music(env, args.paths)
elif args.subcommand == "playlist":
if args.subcommand_playlist == "create":
create_playlists(env, args.playlists)
else:
delete_playlists(env, args.playlists, yes=args.yes)
elif args.subcommand == "insert":
matchers = parse_matchers(args, default_to_media=True)
sorters = parse_sorters(args)
insert_songs(
env, matchers, sorters, args.playlist, args.index,
transfer=args.transfer, before=args.before, yes=args.yes)
else:
raise NotImplementedError
def main():
parser = get_parser()
args = parser.parse_args()
handle_args(args)
| true | true |
1c479ce23370138a8cdf01d03c981094aa08ac95 | 1,867 | py | Python | python/testData/inspections/PyCompatibilityInspection/numericLiteralExpression.py | CeH9/intellij-community | ae45cfd4c76876f9b1c58c6f46508d18523ebd41 | [
"Apache-2.0"
] | 1 | 2020-05-30T04:49:44.000Z | 2020-05-30T04:49:44.000Z | python/testData/inspections/PyCompatibilityInspection/numericLiteralExpression.py | Cyril-lamirand/intellij-community | 60ab6c61b82fc761dd68363eca7d9d69663cfa39 | [
"Apache-2.0"
] | 2 | 2022-02-19T09:45:05.000Z | 2022-02-27T20:32:55.000Z | python/testData/inspections/PyCompatibilityInspection/numericLiteralExpression.py | Cyril-lamirand/intellij-community | 60ab6c61b82fc761dd68363eca7d9d69663cfa39 | [
"Apache-2.0"
] | 2 | 2020-03-15T08:57:37.000Z | 2020-04-07T04:48:14.000Z | a = <error descr="Python does not support a trailing 'u'">12u</error>
b = <warning descr="Python version 3.4, 3.5, 3.6, 3.7, 3.8 do not support a trailing 'l'.">12l</warning>
c = <error descr="Python does not support a trailing 'll'">12ll</error>
d = <error descr="Python does not support a trailing 'U'">12U</error>
e = <warning descr="Python version 3.4, 3.5, 3.6, 3.7, 3.8 do not support a trailing 'L'.">12L</warning>
f = <error descr="Python does not support a trailing 'LL'">12LL</error>
g = <error descr="Python does not support a trailing 'ul'">0x12ful</error>
h = <error descr="Python does not support a trailing 'uL'">0X12fuL</error>
i = <error descr="Python does not support a trailing 'Ul'">12Ul</error>
j = <error descr="Python does not support a trailing 'UL'">12UL</error>
k = <error descr="Python does not support a trailing 'ull'">0o12ull</error>
l = <error descr="Python does not support a trailing 'uLL'">0O12uLL</error>
m = <error descr="Python does not support a trailing 'Ull'">0b1Ull</error>
n = <error descr="Python does not support a trailing 'ULL'">0B1ULL</error>
o = <error descr="Python does not support a trailing 'lu'">12lu</error>
p = <error descr="Python does not support a trailing 'lU'">12lU</error>
q = <error descr="Python does not support a trailing 'Lu'">12Lu</error>
r = <error descr="Python does not support a trailing 'LU'">12LU</error>
s = <error descr="Python does not support a trailing 'llu'">12llu</error>
t = <error descr="Python does not support a trailing 'llU'">12llU</error>
u = <error descr="Python does not support a trailing 'LLu'">12LLu</error>
v = <error descr="Python does not support a trailing 'LLU'">12LLU</error>
w = <warning descr="Python version 3.4, 3.5, 3.6, 3.7, 3.8 do not support this syntax. It requires '0o' prefix for octal literals">04</warning><error descr="End of statement expected">8</error> | 81.173913 | 193 | 0.706481 | a = <error descr="Python does not support a trailing 'u'">12u</error>
b = <warning descr="Python version 3.4, 3.5, 3.6, 3.7, 3.8 do not support a trailing 'l'.">12l</warning>
c = <error descr="Python does not support a trailing 'll'">12ll</error>
d = <error descr="Python does not support a trailing 'U'">12U</error>
e = <warning descr="Python version 3.4, 3.5, 3.6, 3.7, 3.8 do not support a trailing 'L'.">12L</warning>
f = <error descr="Python does not support a trailing 'LL'">12LL</error>
g = <error descr="Python does not support a trailing 'ul'">0x12ful</error>
h = <error descr="Python does not support a trailing 'uL'">0X12fuL</error>
i = <error descr="Python does not support a trailing 'Ul'">12Ul</error>
j = <error descr="Python does not support a trailing 'UL'">12UL</error>
k = <error descr="Python does not support a trailing 'ull'">0o12ull</error>
l = <error descr="Python does not support a trailing 'uLL'">0O12uLL</error>
m = <error descr="Python does not support a trailing 'Ull'">0b1Ull</error>
n = <error descr="Python does not support a trailing 'ULL'">0B1ULL</error>
o = <error descr="Python does not support a trailing 'lu'">12lu</error>
p = <error descr="Python does not support a trailing 'lU'">12lU</error>
q = <error descr="Python does not support a trailing 'Lu'">12Lu</error>
r = <error descr="Python does not support a trailing 'LU'">12LU</error>
s = <error descr="Python does not support a trailing 'llu'">12llu</error>
t = <error descr="Python does not support a trailing 'llU'">12llU</error>
u = <error descr="Python does not support a trailing 'LLu'">12LLu</error>
v = <error descr="Python does not support a trailing 'LLU'">12LLU</error>
w = <warning descr="Python version 3.4, 3.5, 3.6, 3.7, 3.8 do not support this syntax. It requires '0o' prefix for octal literals">04</warning><error descr="End of statement expected">8</error> | false | true |
1c479d15f72832953af2ac415b7d3ec3543095c2 | 1,214 | py | Python | setup.py | DanielR59/mljar-supervised | 04a90ffbff33b2c93a7c212825b987e73b7f62fe | [
"MIT"
] | null | null | null | setup.py | DanielR59/mljar-supervised | 04a90ffbff33b2c93a7c212825b987e73b7f62fe | [
"MIT"
] | null | null | null | setup.py | DanielR59/mljar-supervised | 04a90ffbff33b2c93a7c212825b987e73b7f62fe | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, "README.md"), encoding="utf-8") as f:
long_description = f.read()
setup(
name="mljar-supervised",
version="0.11.2",
description="Automated Machine Learning for Humans",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/mljar/mljar-supervised",
author="MLJAR, Sp. z o.o.",
author_email="contact@mljar.com",
license="MIT",
packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]),
install_requires=open("requirements.txt").readlines(),
include_package_data=True,
python_requires='>=3.7.1',
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
],
keywords=[
"automated machine learning",
"automl",
"machine learning",
"data science",
"data mining",
"mljar"
],
)
| 30.35 | 81 | 0.644152 | from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, "README.md"), encoding="utf-8") as f:
long_description = f.read()
setup(
name="mljar-supervised",
version="0.11.2",
description="Automated Machine Learning for Humans",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/mljar/mljar-supervised",
author="MLJAR, Sp. z o.o.",
author_email="contact@mljar.com",
license="MIT",
packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]),
install_requires=open("requirements.txt").readlines(),
include_package_data=True,
python_requires='>=3.7.1',
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
],
keywords=[
"automated machine learning",
"automl",
"machine learning",
"data science",
"data mining",
"mljar"
],
)
| true | true |
1c479d38ba2d385729e4a2e779104cd41110084d | 1,146 | py | Python | tekstovni_vmesnik.py | kavcicm/Vislice | 04c3c09bad456321ee9da04c6af8deaeaa509842 | [
"MIT"
] | null | null | null | tekstovni_vmesnik.py | kavcicm/Vislice | 04c3c09bad456321ee9da04c6af8deaeaa509842 | [
"MIT"
] | null | null | null | tekstovni_vmesnik.py | kavcicm/Vislice | 04c3c09bad456321ee9da04c6af8deaeaa509842 | [
"MIT"
] | null | null | null | import model
lojtrice = "#############################\n"
def izpis_zmage(igra):
tekst = lojtrice + "Uganili ste geslo {0}.\n".format(igra.geslo)
return tekst
def izpis_poraza(igra):
tekst = lojtrice + "Obešeni ste! Pravilno geslo je blio {0}.\n".format(igra.geslo)
return tekst
def izpis_igre(igra):
tekst = (lojtrice +
igra.pravilni_del_gesla() + "\n" +
("Preostalo število poizkusov: {0}\n Napačni ugibi: {1} "
).format(model.STEVILO_DOVOLJENIH_NAPAK - igra.stevilo_napak() + 1,
igra.nepravilni_ugibi()) + lojtrice)
return tekst
def zahtevaj_vnos():
return input("Ugibaj črko: ")
def pozeni_vmesnik():
igra = model.nova_igra()
while True:
#Izpišemo stanje
print(izpis_igre(igra))
#zahtevaj vnos uporabnika
poskus = zahtevaj_vnos()
igra.ugibaj(poskus)
# preveri ali smo končali
if igra.poraz():
print(izpis_poraza(igra))
break
if igra.zmaga():
print(izpis_zmage(igra))
break
else:
pass
return None
pozeni_vmesnik() | 26.651163 | 86 | 0.579407 | import model
lojtrice = "#############################\n"
def izpis_zmage(igra):
tekst = lojtrice + "Uganili ste geslo {0}.\n".format(igra.geslo)
return tekst
def izpis_poraza(igra):
tekst = lojtrice + "Obešeni ste! Pravilno geslo je blio {0}.\n".format(igra.geslo)
return tekst
def izpis_igre(igra):
tekst = (lojtrice +
igra.pravilni_del_gesla() + "\n" +
("Preostalo število poizkusov: {0}\n Napačni ugibi: {1} "
).format(model.STEVILO_DOVOLJENIH_NAPAK - igra.stevilo_napak() + 1,
igra.nepravilni_ugibi()) + lojtrice)
return tekst
def zahtevaj_vnos():
return input("Ugibaj črko: ")
def pozeni_vmesnik():
igra = model.nova_igra()
while True:
print(izpis_igre(igra))
poskus = zahtevaj_vnos()
igra.ugibaj(poskus)
if igra.poraz():
print(izpis_poraza(igra))
break
if igra.zmaga():
print(izpis_zmage(igra))
break
else:
pass
return None
pozeni_vmesnik() | true | true |
1c479e4d6b65a786785934f82983844d7a1b5553 | 443 | py | Python | run_blast.py | denkovarik/EC-Scrape | e6340fe852b204f4813ec6ede4d20138a85644b6 | [
"MIT"
] | null | null | null | run_blast.py | denkovarik/EC-Scrape | e6340fe852b204f4813ec6ede4d20138a85644b6 | [
"MIT"
] | null | null | null | run_blast.py | denkovarik/EC-Scrape | e6340fe852b204f4813ec6ede4d20138a85644b6 | [
"MIT"
] | null | null | null | import sys, os, time
from utils import *
import shutil
from run_blast_utils import *
blast_rslt_dir = 'blast_rslts\\'
blast_working_dir = 'temp_blast\\'
commands = []
args = parse_args(sys.argv)
# Compile command line arguments
commands = compile_cmd(args, blast_rslt_dir, blast_working_dir)
start_time = time.time()
exec_commands(commands)
shutil.rmtree(blast_working_dir)
print("---%s seconds ---" % (time.time() - start_time)) | 24.611111 | 63 | 0.740406 | import sys, os, time
from utils import *
import shutil
from run_blast_utils import *
blast_rslt_dir = 'blast_rslts\\'
blast_working_dir = 'temp_blast\\'
commands = []
args = parse_args(sys.argv)
commands = compile_cmd(args, blast_rslt_dir, blast_working_dir)
start_time = time.time()
exec_commands(commands)
shutil.rmtree(blast_working_dir)
print("---%s seconds ---" % (time.time() - start_time)) | true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.