id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7
values |
|---|---|---|
52945 | # -*- coding: utf-8 -*-
# __author__ = wangsheng
# __copyright__ = "Copyright 2018, Trump Organization"
# __email__ = "<EMAIL>"
# __status__ = "experiment"
# __time__ = 2018/11/8 11:15
# __file__ = __init__.py.py
from .knearestneighbor import KNN
from .linear_model import Ridge, Lasso, ElasticNet, LogisticRegression
from .naivebayes import GaussianNB
from .decisiontree import DecisionTreeClassifier
from .decisiontree import DecisionTreeRegressor
from .RandomForest import RandomForestClassifier
from .RandomForest import RandomForestRegressor
from .gradientboosting import GradientBoostingClassifier
from .gradientboosting import GradientBoostingRegressor
from .xgboost import XGBRegressor
from .xgboost import XGBClassifier
__all__ = ['KNN',
'Ridge',
'Lasso',
'ElasticNet',
'LogisticRegression',
'GaussianNB',
'DecisionTreeClassifier',
'RandomForestClassifier',
'DecisionTreeRegressor',
'RandomForestRegressor',
'GradientBoostingClassifier',
'GradientBoostingRegressor',
'XGBClassifier',
'XGBRegressor']
| StarcoderdataPython |
1778612 | <reponame>waikato-ufdl/ufdl-backend
from typing import List
from rest_framework import routers
from rest_framework.parsers import FileUploadParser
from rest_framework.request import Request
from rest_framework.response import Response
from ufdl.json.core import FileMetadata
from ...exceptions import JSONParseFailure
from ...renderers import BinaryFileRenderer
from ...models.mixins import FileContainerModel
from ...serialisers import NamedFileSerialiser
from ._RoutedViewSet import RoutedViewSet
class FileContainerViewSet(RoutedViewSet):
"""
Mixin for view-sets which can upload/download/delete contained files.
"""
# The keyword used to specify when the view-set is in file-container mode
FILE_MODE_KEYWORD: str = "file-container"
# The keyword used to specify when the view-set is in file-container mode
METADATA_MODE_KEYWORD: str = "file-metadata"
@classmethod
def get_routes(cls) -> List[routers.Route]:
return [
routers.Route(
url=r'^{prefix}/{lookup}/files/(?P<fn>.*)$',
mapping={'post': 'add_file',
'get': 'get_file',
'delete': 'delete_file'},
name='{basename}-file-container',
detail=True,
initkwargs={cls.MODE_ARGUMENT_NAME: FileContainerViewSet.FILE_MODE_KEYWORD}
),
routers.Route(
url=r'^{prefix}/{lookup}/files-multi{trailing_slash}$',
mapping={
'post': 'add_files'
},
name='{basename}-file-container',
detail=True,
initkwargs={cls.MODE_ARGUMENT_NAME: FileContainerViewSet.FILE_MODE_KEYWORD}
),
routers.Route(
url=r'^{prefix}/{lookup}/file-handles/(?P<fh>.*)$',
mapping={'get': 'get_file_by_handle'},
name='{basename}-file-container',
detail=True,
initkwargs={cls.MODE_ARGUMENT_NAME: FileContainerViewSet.FILE_MODE_KEYWORD}
),
routers.Route(
url=r'^{prefix}/{lookup}/metadata/(?P<fn>.+)$',
mapping={'post': 'set_metadata',
'get': 'get_metadata'},
name='{basename}-file-metadata',
detail=True,
initkwargs={cls.MODE_ARGUMENT_NAME: FileContainerViewSet.METADATA_MODE_KEYWORD}
),
routers.Route(
url=r'^{prefix}/{lookup}/metadata{trailing_slash}$',
mapping={'get': 'get_all_metadata'},
name='{basename}-file-metadata',
detail=True,
initkwargs={cls.MODE_ARGUMENT_NAME: FileContainerViewSet.METADATA_MODE_KEYWORD}
)
]
def get_parsers(self):
# If not posting a file, return the standard parsers
if self.mode != FileContainerViewSet.FILE_MODE_KEYWORD or self.request.method != 'POST':
return super().get_parsers()
return [FileUploadParser()]
def get_renderers(self):
# If not getting a file, return the standard renderers
if self.mode != FileContainerViewSet.FILE_MODE_KEYWORD or self.request.method != 'GET':
return super().get_renderers()
return [BinaryFileRenderer()]
def add_file(self, request: Request, pk=None, fn=None):
"""
Action to add a file to a object.
:param request: The request containing the file data.
:param pk: The primary key of the container object.
:param fn: The filename of the file being added.
:return: The response containing the file record.
"""
# Get the container object
container = self.get_object_of_type(FileContainerModel)
# Create the file record from the data
record = container.add_file(fn, request.data['file'].file.read())
return Response(NamedFileSerialiser().to_representation(record))
def add_files(self, request: Request, pk=None):
"""
Action to add a set of files to an object.
:param request: The request containing the file data.
:param pk: The primary key of the container object.
:return: The response containing the file records.
"""
# Get the container object
container = self.get_object_of_type(FileContainerModel)
# Create the file record from the data
records = container.add_files(request.data['file'].file.read())
file_serialiser = NamedFileSerialiser()
return Response(
[file_serialiser.to_representation(instance)
for instance in records]
)
def get_file(self, request: Request, pk=None, fn=None):
"""
Gets a file from the dataset for download.
:param request: The request.
:param pk: The primary key of the dataset being accessed.
:param fn: The filename of the file being asked for.
:return: The response containing the file.
"""
# Get the container object
container = self.get_object_of_type(FileContainerModel)
return Response(container.get_file(fn))
def delete_file(self, request: Request, pk=None, fn=None):
"""
Action to add a file to a dataset.
:param request: The request containing the file data.
:param pk: The primary key of the dataset being accessed.
:param fn: The filename of the file being deleted.
:return: The response containing the disk-file record.
"""
# Get the container object
container = self.get_object_of_type(FileContainerModel)
# Delete the file
record = container.delete_file(fn)
return Response(NamedFileSerialiser().to_representation(record))
def get_file_by_handle(self, request: Request, pk=None, fh=None):
"""
Gets a file from the dataset for download.
:param request: The request.
:param pk: The primary key of the dataset being accessed.
:param fh: The file-handle of the file being asked for.
:return: The response containing the file.
"""
# Get the container object
container = self.get_object_of_type(FileContainerModel)
return Response(container.get_file_by_handle(fh))
def set_metadata(self, request: Request, pk=None, fn=None):
"""
Action to set the meta-data of a file.
:param request: The request containing the file meta-data.
:param pk: The primary key of the file-container being accessed.
:param fn: The filename of the file being modified.
:return: A response containing the set meta-data.
"""
# Get the container object
container = self.get_object_of_type(FileContainerModel)
# Get the meta-data from the request
metadata = JSONParseFailure.attempt(dict(request.data), FileMetadata)
# Set the metadata of the file
container.set_file_metadata(fn, metadata.metadata)
return Response(metadata.to_raw_json())
def get_metadata(self, request: Request, pk=None, fn=None):
"""
Action to retrieve the meta-data of a file.
:param request: The request.
:param pk: The primary key of the file-container being accessed.
:param fn: The filename of the file being accessed.
:return: A response containing the file's meta-data.
"""
# Get the container object
container = self.get_object_of_type(FileContainerModel)
# Get the meta-data from the container
metadata = container.get_file_metadata(fn)
return Response(FileMetadata(metadata=metadata).to_raw_json())
def get_all_metadata(self, request: Request, pk=None):
"""
Action to retrieve the meta-data for all files in the container.
:param request: The request.
:param pk: The primary key of the file-container being accessed.
:return: A response containing the files' meta-data.
"""
# Get the container object
container = self.get_object_of_type(FileContainerModel)
return Response(
{fn: container.get_file_metadata(fn)
for fn in container.iterate_filenames()}
)
| StarcoderdataPython |
1872 | #!/usr/bin/python
# Copyright (C) 2014 Belledonne Communications SARL
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import argparse
import os
import six
import string
import sys
import xml.etree.ElementTree as ET
import xml.dom.minidom as minidom
import metadoc
class CObject:
def __init__(self, name):
self.name = name.strip()
self.briefDescription = ''
self.detailedDescription = None
self.deprecated = False
self.briefDoc = None
class CEnumValue(CObject):
def __init__(self, name):
CObject.__init__(self, name)
self.value = None
class CEnum(CObject):
def __init__(self, name):
CObject.__init__(self, name)
self.values = []
self.associatedTypedef = None
def addValue(self, value):
self.values.append(value)
class CStructMember(CObject):
def __init__(self, name, t):
CObject.__init__(self, name)
self.ctype = t.strip()
class CStruct(CObject):
def __init__(self, name):
CObject.__init__(self, name)
self.members = []
self.associatedTypedef = None
def addMember(self, member):
self.members.append(member)
class CTypedef(CObject):
def __init__(self, name, definition):
CObject.__init__(self, name)
self.definition = definition.strip()
class CArgument(CObject):
def __init__(self, t, name = '', enums = [], structs = []):
CObject.__init__(self, name)
self.description = None
self.containedType = None
keywords = [ 'const', 'struct', 'enum', 'signed', 'unsigned', 'short', 'long', '*' ]
fullySplittedType = []
splittedType = t.strip().split(' ')
for s in splittedType:
if s.startswith('*'):
fullySplittedType.append('*')
if len(s) > 1:
fullySplittedType.append(s[1:])
elif s.endswith('*'):
fullySplittedType.append(s[:-1])
fullySplittedType.append('*')
else:
fullySplittedType.append(s)
if 'MS2_DEPRECATED' in fullySplittedType:
fullySplittedType.remove('MS2_DEPRECATED')
elif 'LINPHONE_DEPRECATED' in fullySplittedType:
fullySplittedType.remove('LINPHONE_DEPRECATED')
isStruct = False
isEnum = False
self.ctype = 'int' # Default to int so that the result is correct eg. for 'unsigned short'
for s in fullySplittedType:
if not s in keywords:
self.ctype = s
if s == 'struct':
isStruct = True
if s == 'enum':
isEnum = True
if isStruct:
for st in structs:
if st.associatedTypedef is not None:
self.ctype = st.associatedTypedef.name
elif isEnum:
for e in enums:
if e.associatedTypedef is not None:
self.ctype = e.associatedTypedef.name
if self.ctype == 'int' and 'int' not in fullySplittedType:
if fullySplittedType[-1] == '*':
fullySplittedType.insert(-1, 'int')
else:
fullySplittedType.append('int')
self.completeType = ' '.join(fullySplittedType)
def __str__(self):
return self.completeType + " " + self.name
class CArgumentsList:
def __init__(self):
self.arguments = []
def addArgument(self, arg):
self.arguments.append(arg)
def __len__(self):
return len(self.arguments)
def __getitem__(self, key):
return self.arguments[key]
def __str__(self):
argstr = []
for arg in self.arguments:
argstr.append(str(arg))
return ', '.join(argstr)
class CFunction(CObject):
def __init__(self, name, returnarg, argslist):
CObject.__init__(self, name)
self.returnArgument = returnarg
self.arguments = argslist
self.location = None
class CEvent(CFunction):
pass
class CProperty:
def __init__(self, name):
self.name = name
self.getter = None
self.setter = None
class CClass(CObject):
def __init__(self, st):
CObject.__init__(self, st.associatedTypedef.name)
if st.deprecated or st.associatedTypedef.deprecated:
self.deprecated = True
if len(st.associatedTypedef.briefDescription) > 0:
self.briefDescription = st.associatedTypedef.briefDescription
elif len(st.briefDescription) > 0:
self.briefDescription = st.briefDescription
if st.associatedTypedef.detailedDescription is not None:
self.detailedDescription = st.associatedTypedef.detailedDescription
elif st.detailedDescription is not None:
self.detailedDescription = st.detailedDescription
self.__struct = st
self.events = {}
self.classMethods = {}
self.instanceMethods = {}
self.properties = {}
self.__computeCFunctionPrefix()
def __computeCFunctionPrefix(self):
self.cFunctionPrefix = ''
first = True
for l in self.name:
if l.isupper() and not first:
self.cFunctionPrefix += '_'
self.cFunctionPrefix += l.lower()
first = False
self.cFunctionPrefix += '_'
def __addPropertyGetter(self, name, f):
if not name in self.properties:
prop = CProperty(name)
self.properties[name] = prop
self.properties[name].getter = f
def __addPropertySetter(self, name, f):
if not name in self.properties:
prop = CProperty(name)
self.properties[name] = prop
self.properties[name].setter = f
def __addClassMethod(self, f):
if not f.name in self.classMethods:
self.classMethods[f.name] = f
def __addInstanceMethod(self, f):
name = f.name[len(self.cFunctionPrefix):]
if name.startswith('get_') and len(f.arguments) == 1:
self.__addPropertyGetter(name[4:], f)
elif name.startswith('is_') and len(f.arguments) == 1 and f.returnArgument.ctype == 'bool_t':
self.__addPropertyGetter(name, f)
elif name.endswith('_enabled') and len(f.arguments) == 1 and f.returnArgument.ctype == 'bool_t':
self.__addPropertyGetter(name, f)
elif name.startswith('set_') and len(f.arguments) == 2:
self.__addPropertySetter(name[4:], f)
elif name.startswith('enable_') and len(f.arguments) == 2 and f.arguments[1].ctype == 'bool_t':
self.__addPropertySetter(name[7:] + '_enabled', f)
else:
if not f.name in self.instanceMethods:
self.instanceMethods[f.name] = f
def addEvent(self, ev):
if not ev.name in self.events:
self.events[ev.name] = ev
def addMethod(self, f):
if len(f.arguments) > 0 and f.arguments[0].ctype == self.name:
self.__addInstanceMethod(f)
else:
self.__addClassMethod(f)
class Project:
def __init__(self):
self.verbose = False
self.prettyPrint = False
self.enums = []
self.__structs = []
self.__typedefs = []
self.__events = []
self.__functions = []
self.classes = []
self.docparser = metadoc.Parser()
def add(self, elem):
if isinstance(elem, CClass):
if self.verbose:
print("Adding class " + elem.name)
self.classes.append(elem)
elif isinstance(elem, CEnum):
if self.verbose:
print("Adding enum " + elem.name)
for ev in elem.values:
print("\t" + ev.name)
self.enums.append(elem)
elif isinstance(elem, CStruct):
if self.verbose:
print("Adding struct " + elem.name)
for sm in elem.members:
print("\t" + sm.ctype + " " + sm.name)
self.__structs.append(elem)
elif isinstance(elem, CTypedef):
if self.verbose:
print("Adding typedef " + elem.name)
print("\t" + elem.definition)
self.__typedefs.append(elem)
elif isinstance(elem, CEvent):
if self.verbose:
print("Adding event " + elem.name)
print("\tReturns: " + elem.returnArgument.ctype)
print("\tArguments: " + str(elem.arguments))
self.__events.append(elem)
elif isinstance(elem, CFunction):
if self.verbose:
print("Adding function " + elem.name)
print("\tReturns: " + elem.returnArgument.ctype)
print("\tArguments: " + str(elem.arguments))
self.__functions.append(elem)
def __cleanDescription(self, descriptionNode):
for para in descriptionNode.findall('./para'):
for n in para.findall('./parameterlist'):
para.remove(n)
for n in para.findall("./simplesect[@kind='return']"):
para.remove(n)
for n in para.findall("./simplesect[@kind='see']"):
t = ''.join(n.itertext())
n.clear()
n.tag = 'see'
n.text = t
for n in para.findall("./simplesect[@kind='note']"):
n.tag = 'note'
n.attrib = {}
for n in para.findall(".//xrefsect"):
para.remove(n)
for n in para.findall('.//ref'):
n.attrib = {}
for n in para.findall(".//bctbx_list"):
para.remove(n)
if descriptionNode.tag == 'parameterdescription':
descriptionNode.tag = 'description'
if descriptionNode.tag == 'simplesect':
descriptionNode.tag = 'description'
descriptionNode.attrib = {}
return descriptionNode
def __canBeWrapped(self, node):
return node.find('./detaileddescription//donotwrap') is None
def __discoverClasses(self):
for td in self.__typedefs:
if td.definition.startswith('enum '):
for e in self.enums:
if (e.associatedTypedef is None) and td.definition[5:] == e.name:
e.associatedTypedef = td
break
elif td.definition.startswith('struct '):
structFound = False
for st in self.__structs:
if (st.associatedTypedef is None) and td.definition[7:] == st.name:
st.associatedTypedef = td
structFound = True
break
if not structFound:
name = td.definition[7:]
print("Structure with no associated typedef: " + name)
st = CStruct(name)
st.associatedTypedef = td
self.add(st)
for td in self.__typedefs:
if td.definition.startswith('struct '):
for st in self.__structs:
if st.associatedTypedef == td:
cclass = CClass(st)
cclass.briefDoc = td.briefDoc
self.add(cclass)
break
elif ('Linphone' + td.definition) == td.name:
st = CStruct(td.name)
st.associatedTypedef = td
cclass = CClass(st)
cclass.briefDoc = td.briefDoc
self.add(st)
self.add(cclass)
# Sort classes by length of name (longest first), so that methods are put in the right class
self.classes.sort(key = lambda c: len(c.name), reverse = True)
for e in self.__events:
eventAdded = False
for c in self.classes:
if c.name.endswith('Cbs') and e.name.startswith(c.name):
c.addEvent(e)
eventAdded = True
break
if not eventAdded:
for c in self.classes:
if e.name.startswith(c.name):
c.addEvent(e)
eventAdded = True
break
for f in self.__functions:
for c in self.classes:
if c.cFunctionPrefix == f.name[0 : len(c.cFunctionPrefix)]:
c.addMethod(f)
break
def __parseCEnumValueInitializer(self, initializer):
initializer = initializer.strip()
if not initializer.startswith('='):
return None
initializer = initializer[1:]
initializer.strip()
return initializer
def __parseCEnumValue(self, node):
ev = CEnumValue(node.find('./name').text)
initializerNode = node.find('./initializer')
if initializerNode is not None:
ev.value = self.__parseCEnumValueInitializer(initializerNode.text)
deprecatedNode = node.find(".//xrefsect[xreftitle='Deprecated']")
if deprecatedNode is not None:
ev.deprecated = True
ev.briefDescription = ''.join(node.find('./briefdescription').itertext()).strip()
ev.briefDoc = self.docparser.parse_description(node.find('./briefdescription'))
ev.detailedDescription = self.__cleanDescription(node.find('./detaileddescription'))
return ev
def __parseCEnumMemberdef(self, node):
if not Project.__canBeWrapped(self, node):
return None
e = CEnum(node.find('./name').text)
deprecatedNode = node.find(".//xrefsect[xreftitle='Deprecated']")
if deprecatedNode is not None:
e.deprecated = True
e.briefDescription = ''.join(node.find('./briefdescription').itertext()).strip()
e.briefDoc = self.docparser.parse_description(node.find('./briefdescription'))
e.detailedDescription = self.__cleanDescription(node.find('./detaileddescription'))
enumvalues = node.findall("enumvalue[@prot='public']")
for enumvalue in enumvalues:
ev = self.__parseCEnumValue(enumvalue)
e.addValue(ev)
return e
def __findCEnum(self, tree):
memberdefs = tree.findall("./compounddef[@kind='group']/sectiondef[@kind='enum']/memberdef[@kind='enum'][@prot='public']")
for m in memberdefs:
e = self.__parseCEnumMemberdef(m)
self.add(e)
def __parseCStructMember(self, node, structname):
name = node.find('./name').text
definition = node.find('./definition').text
t = definition[0:definition.find(structname + "::" + name)]
sm = CStructMember(name, t)
deprecatedNode = node.find(".//xrefsect[xreftitle='Deprecated']")
if deprecatedNode is not None:
sm.deprecated = True
sm.briefDescription = ''.join(node.find('./briefdescription').itertext()).strip()
sm.briefDoc = self.docparser.parse_description(node.find('./briefdescription'))
sm.detailedDescription = self.__cleanDescription(node.find('./detaileddescription'))
return sm
def __parseCStructCompounddef(self, node):
s = CStruct(node.find('./compoundname').text)
deprecatedNode = node.find(".//xrefsect[xreftitle='Deprecated']")
if deprecatedNode is not None:
s.deprecated = True
s.briefDescription = ''.join(node.find('./briefdescription').itertext()).strip()
s.briefDoc = self.docparser.parse_description(node.find('./briefdescription'))
s.detailedDescription = self.__cleanDescription(node.find('./detaileddescription'))
structmembers = node.findall("sectiondef/memberdef[@kind='variable'][@prot='public']")
for structmember in structmembers:
sm = self.__parseCStructMember(structmember, s.name)
s.addMember(sm)
return s
def __findCStruct(self, tree):
compounddefs = tree.findall("./compounddef[@kind='struct'][@prot='public']")
for c in compounddefs:
s = self.__parseCStructCompounddef(c)
self.add(s)
def __parseCTypedefMemberdef(self, node):
if not Project.__canBeWrapped(self, node):
return None
name = node.find('./name').text
definition = node.find('./definition').text
if definition.startswith('typedef '):
definition = definition[8 :]
if name.endswith('Cb'):
pos = definition.find("(*")
if pos == -1:
return None
returntype = definition[0:pos].strip()
returnarg = CArgument(returntype, enums = self.enums, structs = self.__structs)
returndesc = node.find("./detaileddescription/para/simplesect[@kind='return']")
if returndesc is not None:
if returnarg.ctype == 'MSList' or returnarg.ctype == 'bctbx_list_t':
n = returndesc.find('.//bctbxlist')
if n is not None:
returnarg.containedType = n.text
returnarg.description = self.__cleanDescription(returndesc)
elif returnarg.completeType != 'void':
missingDocWarning += "\tReturn value is not documented\n"
definition = definition[pos + 2 :]
pos = definition.find("(")
definition = definition[pos + 1 : -1]
argslist = CArgumentsList()
for argdef in definition.split(', '):
argType = ''
starPos = argdef.rfind('*')
spacePos = argdef.rfind(' ')
if starPos != -1:
argType = argdef[0 : starPos + 1]
argName = argdef[starPos + 1 :]
elif spacePos != -1:
argType = argdef[0 : spacePos]
argName = argdef[spacePos + 1 :]
argslist.addArgument(CArgument(argType, argName, self.enums, self.__structs))
if len(argslist) > 0:
paramdescs = node.findall("detaileddescription/para/parameterlist[@kind='param']/parameteritem")
if paramdescs:
for arg in argslist.arguments:
for paramdesc in paramdescs:
if arg.name == paramdesc.find('./parameternamelist').find('./parametername').text:
arg.description = self.__cleanDescription(paramdesc.find('./parameterdescription'))
missingDocWarning = ''
for arg in argslist.arguments:
if arg.description == None:
missingDocWarning += "\t'" + arg.name + "' parameter not documented\n";
if missingDocWarning != '':
print(name + ":\n" + missingDocWarning)
f = CEvent(name, returnarg, argslist)
deprecatedNode = node.find(".//xrefsect[xreftitle='Deprecated']")
if deprecatedNode is not None:
f.deprecated = True
f.briefDescription = ''.join(node.find('./briefdescription').itertext()).strip()
f.briefDoc = self.docparser.parse_description(node.find('./briefdescription'))
f.detailedDescription = self.__cleanDescription(node.find('./detaileddescription'))
return f
else:
pos = definition.rfind(" " + name)
if pos != -1:
definition = definition[0 : pos]
td = CTypedef(name, definition)
deprecatedNode = node.find(".//xrefsect[xreftitle='Deprecated']")
if deprecatedNode is not None:
td.deprecated = True
td.briefDescription = ''.join(node.find('./briefdescription').itertext()).strip()
td.briefDoc = self.docparser.parse_description(node.find('./briefdescription'))
td.detailedDescription = self.__cleanDescription(node.find('./detaileddescription'))
return td
return None
def __findCTypedef(self, tree):
memberdefs = tree.findall("./compounddef[@kind='group']/sectiondef[@kind='typedef']/memberdef[@kind='typedef'][@prot='public']")
for m in memberdefs:
td = self.__parseCTypedefMemberdef(m)
self.add(td)
def __parseCFunctionMemberdef(self, node):
if not Project.__canBeWrapped(self, node):
return None
internal = node.find("./detaileddescription/internal")
if internal is not None:
return None
missingDocWarning = ''
name = node.find('./name').text
t = ''.join(node.find('./type').itertext())
returnarg = CArgument(t, enums = self.enums, structs = self.__structs)
returndesc = node.find("./detaileddescription/para/simplesect[@kind='return']")
if returndesc is not None:
if returnarg.ctype == 'MSList' or returnarg.ctype == 'bctbx_list_t':
n = returndesc.find('.//bctbxlist')
if n is not None:
returnarg.containedType = n.text
returnarg.description = self.__cleanDescription(returndesc)
elif returnarg.completeType != 'void':
missingDocWarning += "\tReturn value is not documented\n"
argslist = CArgumentsList()
argslistNode = node.findall('./param')
for argNode in argslistNode:
argType = ''.join(argNode.find('./type').itertext())
argName = ''
argNameNode = argNode.find('./declname')
if argNameNode is not None:
argName = ''.join(argNameNode.itertext())
if argType != 'void':
argslist.addArgument(CArgument(argType, argName, self.enums, self.__structs))
if len(argslist) > 0:
paramdescs = node.findall("./detaileddescription/para/parameterlist[@kind='param']/parameteritem")
if paramdescs:
for arg in argslist.arguments:
for paramdesc in paramdescs:
if arg.name == paramdesc.find('./parameternamelist').find('./parametername').text:
if arg.ctype == 'MSList' or arg.ctype == 'bctbx_list_t':
n = paramdesc.find('.//bctbxlist')
if n is not None:
arg.containedType = n.text
arg.description = self.__cleanDescription(paramdesc.find('./parameterdescription'))
missingDocWarning = ''
for arg in argslist.arguments:
if arg.description == None:
missingDocWarning += "\t'" + arg.name + "' parameter not documented\n";
f = CFunction(name, returnarg, argslist)
deprecatedNode = node.find(".//xrefsect[xreftitle='Deprecated']")
if deprecatedNode is not None:
f.deprecated = True
f.briefDescription = ''.join(node.find('./briefdescription').itertext()).strip()
f.briefDoc = self.docparser.parse_description(node.find('./briefdescription'))
f.detailedDescription = self.__cleanDescription(node.find('./detaileddescription'))
if f.briefDescription == '' and ''.join(f.detailedDescription.itertext()).strip() == '':
return None
locationNode = node.find('./location')
if locationNode is not None:
f.location = locationNode.get('file')
if not f.location.endswith('.h'):
missingDocWarning += "\tNot documented in a header file ('" + f.location + "')\n";
if missingDocWarning != '':
print(name + ":\n" + missingDocWarning)
return f
def __findCFunction(self, tree):
memberdefs = tree.findall("./compounddef[@kind='group']/sectiondef[@kind='func']/memberdef[@kind='function'][@prot='public'][@static='no']")
for m in memberdefs:
f = self.__parseCFunctionMemberdef(m)
if f is not None:
self.add(f)
def initFromFiles(self, xmlfiles):
trees = []
for f in xmlfiles:
tree = None
try:
if self.verbose:
print("Parsing XML file: " + f.name)
tree = ET.parse(f)
except ET.ParseError as e:
print(e)
if tree is not None:
trees.append(tree)
for tree in trees:
self.__findCEnum(tree)
for tree in trees:
self.__findCStruct(tree)
for tree in trees:
self.__findCTypedef(tree)
for tree in trees:
self.__findCFunction(tree)
self.__discoverClasses()
def initFromDir(self, xmldir):
files = [ os.path.join(xmldir, f) for f in os.listdir(xmldir) if (os.path.isfile(os.path.join(xmldir, f)) and f.endswith('.xml')) ]
self.initFromFiles(files)
def check(self):
for c in self.classes:
for name, p in six.iteritems(c.properties):
if p.getter is None and p.setter is not None:
print("Property '" + name + "' of class '" + c.name + "' has a setter but no getter")
class Generator:
def __init__(self, outputfile):
self.__outputfile = outputfile
def __generateEnum(self, cenum, enumsNode):
enumNodeAttributes = { 'name' : cenum.name, 'deprecated' : str(cenum.deprecated).lower() }
if cenum.associatedTypedef is not None:
enumNodeAttributes['name'] = cenum.associatedTypedef.name
enumNode = ET.SubElement(enumsNode, 'enum', enumNodeAttributes)
if cenum.briefDescription != '':
enumBriefDescriptionNode = ET.SubElement(enumNode, 'briefdescription')
enumBriefDescriptionNode.text = cenum.briefDescription
enumNode.append(cenum.detailedDescription)
if len(cenum.values) > 0:
enumValuesNode = ET.SubElement(enumNode, 'values')
for value in cenum.values:
enumValuesNodeAttributes = { 'name' : value.name, 'deprecated' : str(value.deprecated).lower() }
valueNode = ET.SubElement(enumValuesNode, 'value', enumValuesNodeAttributes)
if value.briefDescription != '':
valueBriefDescriptionNode = ET.SubElement(valueNode, 'briefdescription')
valueBriefDescriptionNode.text = value.briefDescription
valueNode.append(value.detailedDescription)
def __generateFunction(self, parentNode, nodeName, f):
functionAttributes = { 'name' : f.name, 'deprecated' : str(f.deprecated).lower() }
if f.location is not None:
functionAttributes['location'] = f.location
functionNode = ET.SubElement(parentNode, nodeName, functionAttributes)
returnValueAttributes = { 'type' : f.returnArgument.ctype, 'completetype' : f.returnArgument.completeType }
if f.returnArgument.containedType is not None:
returnValueAttributes['containedtype'] = f.returnArgument.containedType
returnValueNode = ET.SubElement(functionNode, 'return', returnValueAttributes)
if f.returnArgument.description is not None:
returnValueNode.append(f.returnArgument.description)
argumentsNode = ET.SubElement(functionNode, 'arguments')
for arg in f.arguments:
argumentNodeAttributes = { 'name' : arg.name, 'type' : arg.ctype, 'completetype' : arg.completeType }
if arg.containedType is not None:
argumentNodeAttributes['containedtype'] = arg.containedType
argumentNode = ET.SubElement(argumentsNode, 'argument', argumentNodeAttributes)
if arg.description is not None:
argumentNode.append(arg.description)
if f.briefDescription != '':
functionBriefDescriptionNode = ET.SubElement(functionNode, 'briefdescription')
functionBriefDescriptionNode.text = f.briefDescription
functionNode.append(f.detailedDescription)
def __generateClass(self, cclass, classesNode):
# Do not include classes that contain nothing
if len(cclass.events) == 0 and len(cclass.classMethods) == 0 and \
len(cclass.instanceMethods) == 0 and len(cclass.properties) == 0:
return
# Check the capabilities of the class
has_ref_method = False
has_unref_method = False
has_destroy_method = False
for methodname in cclass.instanceMethods:
methodname_without_prefix = methodname.replace(cclass.cFunctionPrefix, '')
if methodname_without_prefix == 'ref':
has_ref_method = True
elif methodname_without_prefix == 'unref':
has_unref_method = True
elif methodname_without_prefix == 'destroy':
has_destroy_method = True
refcountable = False
destroyable = False
if has_ref_method and has_unref_method:
refcountable = True
if has_destroy_method:
destroyable = True
classNodeAttributes = {
'name' : cclass.name,
'cfunctionprefix' : cclass.cFunctionPrefix,
'deprecated' : str(cclass.deprecated).lower(),
'refcountable' : str(refcountable).lower(),
'destroyable' : str(destroyable).lower()
}
# Generate the XML node for the class
classNode = ET.SubElement(classesNode, 'class', classNodeAttributes)
if len(cclass.events) > 0:
eventsNode = ET.SubElement(classNode, 'events')
eventnames = []
for eventname in cclass.events:
eventnames.append(eventname)
eventnames.sort()
for eventname in eventnames:
self.__generateFunction(eventsNode, 'event', cclass.events[eventname])
if len(cclass.classMethods) > 0:
classMethodsNode = ET.SubElement(classNode, 'classmethods')
methodnames = []
for methodname in cclass.classMethods:
methodnames.append(methodname)
methodnames.sort()
for methodname in methodnames:
self.__generateFunction(classMethodsNode, 'classmethod', cclass.classMethods[methodname])
if len(cclass.instanceMethods) > 0:
instanceMethodsNode = ET.SubElement(classNode, 'instancemethods')
methodnames = []
for methodname in cclass.instanceMethods:
methodnames.append(methodname)
methodnames.sort()
for methodname in methodnames:
self.__generateFunction(instanceMethodsNode, 'instancemethod', cclass.instanceMethods[methodname])
if len(cclass.properties) > 0:
propertiesNode = ET.SubElement(classNode, 'properties')
propnames = []
for propname in cclass.properties:
propnames.append(propname)
propnames.sort()
for propname in propnames:
propertyNodeAttributes = { 'name' : propname }
propertyNode = ET.SubElement(propertiesNode, 'property', propertyNodeAttributes)
if cclass.properties[propname].getter is not None:
self.__generateFunction(propertyNode, 'getter', cclass.properties[propname].getter)
if cclass.properties[propname].setter is not None:
self.__generateFunction(propertyNode, 'setter', cclass.properties[propname].setter)
if cclass.briefDescription != '':
classBriefDescriptionNode = ET.SubElement(classNode, 'briefdescription')
classBriefDescriptionNode.text = cclass.briefDescription
classNode.append(cclass.detailedDescription)
def generate(self, project):
print("Generating XML document of Linphone API to '" + self.__outputfile.name + "'")
apiNode = ET.Element('api')
project.enums.sort(key = lambda e: e.name)
if len(project.enums) > 0:
enumsNode = ET.SubElement(apiNode, 'enums')
for cenum in project.enums:
self.__generateEnum(cenum, enumsNode)
if len(project.classes) > 0:
classesNode = ET.SubElement(apiNode, 'classes')
project.classes.sort(key = lambda c: c.name)
for cclass in project.classes:
self.__generateClass(cclass, classesNode)
s = '<?xml version="1.0" encoding="UTF-8" ?>\n'.encode('utf-8')
s += ET.tostring(apiNode, 'utf-8')
if project.prettyPrint:
s = minidom.parseString(s).toprettyxml(indent='\t')
self.__outputfile.write(s)
def main(argv = None):
if argv is None:
argv = sys.argv
argparser = argparse.ArgumentParser(description="Generate XML version of the Linphone API.")
argparser.add_argument('-o', '--outputfile', metavar='outputfile', type=argparse.FileType('w'), help="Output XML file describing the Linphone API.")
argparser.add_argument('--verbose', help="Increase output verbosity", action='store_true')
argparser.add_argument('--pretty', help="XML pretty print", action='store_true')
argparser.add_argument('xmldir', help="XML directory generated by doxygen.")
args = argparser.parse_args()
if args.outputfile == None:
args.outputfile = open('api.xml', 'w')
project = Project()
if args.verbose:
project.verbose = True
if args.pretty:
project.prettyPrint = True
project.initFromDir(args.xmldir)
project.check()
gen = Generator(args.outputfile)
gen.generate(project)
if __name__ == "__main__":
sys.exit(main())
| StarcoderdataPython |
3369970 | <filename>fit_Mdyn/analyze_fit_dynesty.py
import pickle
import dynesty
from dynesty import plotting as dyplot
import matplotlib.pyplot as plt
wdir = '/Users/justinvega/Documents/GitHub/dyn-masses/fit_Mdyn/pickles/'
picklefile = open(wdir + 'dynesty_results_90000_logL.pickle', 'rb')
dyresults = pickle.load(picklefile)
# set parameter labels, truths
theta_true = [40, 130, 0.7, 200, 2.3, 1, 205, 0.5, 20, 347.6, 4.0, 0, 0]
lbls = [r'$i$', r'$PA$', r'$M$', r'$r_l$', r'$z0$', r'$z_{\psi}$', r'$Tb_{0}$', r'$Tb_q$', r'$T_{\rm{back}}$', r'$dV_{0}$', r'$v_{\rm{sys}}$', r'$dx$', r'$dy$']
# plot cornerplot (not working yet)
# fig, axes = dyplot.cornerplot(db, truths=theta_true, labels=lbls)
# plot traces
fig, axes = dyplot.traceplot(dyresults, truths=theta_true,labels=lbls,
truth_color='black', show_titles=True,
trace_cmap='viridis', fig=plt.subplots(13, 2, figsize=(16, 64)))
fig.tight_layout()
#plt.show() # necessary to see plots
plt.savefig("./plots/traceplot_90000_logL.jpg")
#plt.close()
| StarcoderdataPython |
1623212 | #11/1/2018
#universal embedding
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import StratifiedKFold
from gensim.models import Word2Vec
from gensim.test.utils import common_texts
from gensim.models.doc2vec import Doc2Vec, TaggedDocument
from gensim.test.utils import get_tmpfile
from sklearn.svm import SVR
from sklearn.model_selection import RandomizedSearchCV
execfile('../python_libraries.py')
dta = pd.read_pickle('../../data/features_pkl/dta.pickle')
ndim = 100 #100, 200
window=5 #try smaller
min_count=3 #3-5
embedding_df = pd.read_pickle('../../data/features_pkl/embedding_df_ndim_'+ str(ndim)+'_window_'+str(window)+'_min_count_'+str(min_count) + '.pickle')
# other features
cuisine_df = pd.read_pickle('../../data/features_pkl/cuisine.pickle')
dta = pd.read_pickle('../../data/features_pkl/dta.pickle')
zip_code_df = pd.read_pickle('../../data/features_pkl/zip_code_df.pickle')
CV_model = True # False True
model_type = 'RF' # 'RF' 'SVR'
search_type = 'random' # 'random' 'grid'
## Predictive Models to Try
kf = KFold(n_splits = 3, shuffle = True, random_state = 5)
if CV_model:
if model_type=='RF':
max_depth = [None, 5, 10]
min_samples_leaf = [0.0005, 0.01, 0.05, 0.1]
min_samples_split = [2, 5, 10]
n_estimators = [100, 200, 500]
max_features = [None, 0.25, 0.5, 0.75]
param_grid = {'max_features': max_features,
'max_depth': max_depth,
'min_samples_leaf': min_samples_leaf,
'n_estimators':n_estimators}
clf = sklearn.ensemble.RandomForestRegressor(random_state = 123)
if search_type == 'grid':
model_full = GridSearchCV(estimator = clf,
param_grid = param_grid,
cv = kf, verbose=0,
n_jobs = 3, scoring = 'neg_mean_squared_error')
if search_type == 'random':
model_full = RandomizedSearchCV(estimator = clf, random_state = 123,
param_distributions = param_grid,
cv = kf, verbose=0, n_iter = 25,
n_jobs = 5, scoring = 'neg_mean_squared_error')
if model_type =='SVR':
Cs = [0.001, 0.01, 0.1, 1, 10, 100]
param_grid = {'C': Cs}
model_full = GridSearchCV(estimator = SVR(), param_grid = param_grid, cv=kf,
verbose = 0, n_jobs = 3, scoring = 'neg_mean_squared_error')
else:
if model_type=='RF':
model_full = sklearn.ensemble.RandomForestRegressor(n_estimators=200)
if model_type=='SVR':
model_full = SVR(C=100)
# save output
file_output = open('./results/kang_results_regression_CV_model_SEED_'+str(CV_model)+ '_Yelp_inspection_history_'+str(ndim)+'_window_'+str(window)+'_min_count_'+str(min_count)+ model_type+'_search_'+search_type+'.csv', 'wt')
writer = csv.writer(file_output)
writer.writerow( ('iteration','feature', 'train_split', 'model_params', 'mae', 'mse'))
dta_analysis_tmp = dta.copy()
dta_analysis = dta_analysis_tmp[['inspection_id', 'inspection_average_prev_penalty_scores',
'inspection_prev_penalty_score', 'inspection_penalty_score']]
# merge embeddings
dta_analysis = dta_analysis.merge(embedding_df,
on = 'inspection_id',
how = 'left')
# merge other features
#dta_analysis = dta_analysis.merge(cuisine_df,
# on = 'inspection_id',
# how = 'left')
#dta_analysis = dta_analysis.merge(zip_code_df,
# on = 'inspection_id',
# how = 'left')
subset = np.array(dta_analysis.columns)[np.array(dta_analysis.columns)!='inspection_penalty_score']
subset=subset[subset!='inspection_id']
counter = 0
feature_set_without_embedding = subset[~np.in1d(subset,embedding_df.columns[embedding_df.columns!='inspection_id'])]
feature_set_without_embedding = feature_set_without_embedding[~np.in1d(feature_set_without_embedding,[ 'review_count', 'non_positive_review_count', 'average_review_rating'])]
feature_set = [[feature_set_without_embedding], [subset]]
for subset in feature_set:
print 'counter: ', counter
print 'features: ', subset ## ignore this value for 9
kf = KFold(n_splits=10, # 10-fold CV is used in paper
shuffle = True, # assuming they randomly select train/test
random_state = 123) # random.seed for our own internal replication purposes
mse_features = []
m = 0
for train, test in kf.split(dta_analysis):
print m
x_train = dta_analysis.iloc[train,:][subset[0]]
if m == 0:
print 'columns used as sanity check: ',x_train.columns
print len(x_train.columns)
print len(subset)
y_train = dta_analysis.inspection_penalty_score.iloc[train]
x_test = dta_analysis.iloc[test,:][subset[0]]
y_test = dta_analysis.inspection_penalty_score.iloc[test]
model_full.fit(x_train,
np.ravel(y_train))
y_predict = model_full.predict(x_test)
mse_features.append(mean_squared_error(y_test, y_predict))
print mse_features
m = m+1
if CV_model:
writer.writerow( (m, subset[0], train,model_full.best_params_.values(),
mean_absolute_error(y_test, y_predict),
mean_squared_error(y_test, y_predict)))
else:
writer.writerow( (m, subset[0], train,'0',
mean_absolute_error(y_test, y_predict),
mean_squared_error(y_test, y_predict)))
print ''
| StarcoderdataPython |
1750399 | <gh_stars>0
import pytest
def test_setupSmoke():
x = 1;
y = 2;
assert (x+1) == y, "test setup smoke failed"
| StarcoderdataPython |
3304511 | from kedro.io import AbstractDataSet, CSVLocalDataSet, MemoryDataSet, PickleLocalDataSet
import numpy as np
import logging
from .dataobjects.uplift_model_params import UpliftModelParams
from .dataobjects.propensity_model_params import PropensityModelParams
from .dataobjects.dataset_catalog import DatasetCatalog
from .dataobjects.raw_args import RawArgs
from .dataobjects.loggers import Loggers
log = logging.getLogger('causallift')
class BaseCausalLift:
"""
Set up datasets for uplift modeling.
Optionally, propensity scores are estimated based on logistic regression.
args:
train_df:
Pandas Data Frame containing samples used for training
test_df:
Pandas Data Frame containing samples used for testing
cols_features:
List of column names used as features.
If :obj:`None` (default), all the columns except for outcome,
propensity, CATE, and recommendation.
col_treatment:
Name of treatment column. 'Treatment' in default.
col_outcome:
Name of outcome column. 'Outcome' in default.
col_propensity:
Name of propensity column. 'Propensity' in default.
col_cate:
Name of CATE (Conditional Average Treatment Effect) column. 'CATE' in default.
col_recommendation:
Name of recommendation column. 'Recommendation' in default.
min_propensity:
Minimum propensity score. 0.01 in default.
max_propensity:
Maximum propensity score. 0.99 in defualt.
verbose:
How much info to show. Valid values are:
* :obj:`0` to show nothing
* :obj:`1` to show only warning
* :obj:`2` (default) to show useful info
* :obj:`3` to show more info
uplift_model_params:
Parameters used to fit 2 XGBoost classifier models.
* Optionally use `search_cv` key to specify the Search CV class name. \n
e.g. `sklearn.model_selection.GridSearchCV` \n
Refer to https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html
* Use `estimator` key to specify the estimator class name. \n
e.g. `xgboost.XGBClassifier` \n
Refer to https://xgboost.readthedocs.io/en/latest/parameter.html
* Optionally use `const_params` key to specify the constant parameters to \
construct the estimator.
If :obj:`None` (default)::
dict(
search_cv="sklearn.model_selection.GridSearchCV",
estimator="xgboost.XGBClassifier",
scoring=None,
cv=3,
return_train_score=False,
n_jobs=-1,
param_grid=dict(
max_depth=[3],
learning_rate=[0.1],
n_estimators=[100],
verbose=[0],
objective=["binary:logistic"],
booster=["gbtree"],
n_jobs=[-1],
nthread=[None],
gamma=[0],
min_child_weight=[1],
max_delta_step=[0],
subsample=[1],
colsample_bytree=[1],
colsample_bylevel=[1],
reg_alpha=[0],
reg_lambda=[1],
scale_pos_weight=[1],
base_score=[0.5],
missing=[None],
),
)
Alternatively, estimator model object is acceptable.
The object must have the following methods compatible with
scikit-learn estimator interface.
* :func:`fit`
* :func:`predict`
* :func:`predict_proba`
enable_ipw:
Enable Inverse Probability Weighting based on the estimated propensity score.
True in default.
propensity_model_params:
Parameters used to fit logistic regression model to estimate propensity score.
* Optionally use `search_cv` key to specify the Search CV class name.\n
e.g. `sklearn.model_selection.GridSearchCV` \n
Refer to https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html
* Use `estimator` key to specify the estimator class name. \n
e.g. `sklearn.linear_model.LogisticRegression` \n
Refer to https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html
* Optionally use `const_params` key to specify the constant parameters \
to construct the estimator.
If :obj:`None` (default)::
dict(
search_cv="sklearn.model_selection.GridSearchCV",
estimator="sklearn.linear_model.LogisticRegression",
scoring=None,
cv=3,
return_train_score=False,
n_jobs=-1,
param_grid=dict(
C=[0.1, 1, 10],
class_weight=[None],
dual=[False],
fit_intercept=[True],
intercept_scaling=[1],
max_iter=[100],
multi_class=["ovr"],
n_jobs=[1],
penalty=["l1", "l2"],
solver=["liblinear"],
tol=[0.0001],
warm_start=[False],
),
)
index_name:
Index name of the pandas data frame after resetting the index. 'index' in default. \n
If :obj:`None`, the index will not be reset.
partition_name:
Additional index name to indicate the partition, train or test. 'partition' in default.
runner:
If set to 'SequentialRunner' (default) or 'ParallelRunner', the pipeline is run by Kedro
sequentially or in parallel, respectively. \n
If set to :obj:`None` , the pipeline is run by native Python. \n
Refer to https://kedro.readthedocs.io/en/latest/04_user_guide/05_nodes_and_pipelines.html#runners
conditionally_skip:
*[Effective only if runner is set to either 'SequentialRunner' or 'ParallelRunner']* \n
Skip running the pipeline if the output files already exist.
True in default.
dataset_catalog:
*[Effective only if runner is set to either 'SequentialRunner' or 'ParallelRunner']* \n
Specify dataset files to save in Dict[str, kedro.io.AbstractDataSet] format. \n
To find available file formats, refer to https://kedro.readthedocs.io/en/latest/kedro.io.html#data-sets \n
In default::
dict(
# args_raw = CSVLocalDataSet(filepath='../data/01_raw/args_raw.csv', version=None),
# train_df = CSVLocalDataSet(filepath='../data/01_raw/train_df.csv', version=None),
# test_df = CSVLocalDataSet(filepath='../data/01_raw/test_df.csv', version=None),
propensity_model = PickleLocalDataSet(
filepath='../data/06_models/propensity_model.pickle',
version=None
),
uplift_models_dict = PickleLocalDataSet(
filepath='../data/06_models/uplift_models_dict.pickle',
version=None
),
df_03 = CSVLocalDataSet(
filepath='../data/07_model_output/df.csv',
load_args=dict(index_col=['partition', 'index'], float_precision='high'),
save_args=dict(index=True, float_format='%.16e'),
version=None,
),
treated__sim_eval_df = CSVLocalDataSet(
filepath='../data/08_reporting/treated__sim_eval_df.csv',
version=None,
),
untreated__sim_eval_df = CSVLocalDataSet(
filepath='../data/08_reporting/untreated__sim_eval_df.csv',
version=None,
),
estimated_effect_df = CSVLocalDataSet(
filepath='../data/08_reporting/estimated_effect_df.csv',
version=None,
),
)
logging_config:
Specify logging configuration. \n
Refer to https://docs.python.org/3.6/library/logging.config.html#logging-config-dictschema \n
In default::
{'disable_existing_loggers': False,
'formatters': {
'json_formatter': {
'class': 'pythonjsonlogger.jsonlogger.JsonFormatter',
'format': '[%(asctime)s|%(name)s|%(funcName)s|%(levelname)s] %(message)s',
},
'simple': {
'format': '[%(asctime)s|%(name)s|%(levelname)s] %(message)s',
},
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'formatter': 'simple',
'level': 'INFO',
'stream': 'ext://sys.stdout',
},
'info_file_handler': {
'class': 'logging.handlers.RotatingFileHandler',
'level': 'INFO',
'formatter': 'simple',
'filename': './info.log',
'maxBytes': 10485760, # 10MB
'backupCount': 20,
'encoding': 'utf8',
'delay': True,
},
'error_file_handler': {
'class': 'logging.handlers.RotatingFileHandler',
'level': 'ERROR',
'formatter': 'simple',
'filename': './errors.log',
'maxBytes': 10485760, # 10MB
'backupCount': 20,
'encoding': 'utf8',
'delay': True,
},
},
'loggers': {
'anyconfig': {
'handlers': ['console', 'info_file_handler', 'error_file_handler'],
'level': 'WARNING',
'propagate': False,
},
'kedro.io': {
'handlers': ['console', 'info_file_handler', 'error_file_handler'],
'level': 'WARNING',
'propagate': False,
},
'kedro.pipeline': {
'handlers': ['console', 'info_file_handler', 'error_file_handler'],
'level': 'INFO',
'propagate': False,
},
'kedro.runner': {
'handlers': ['console', 'info_file_handler', 'error_file_handler'],
'level': 'INFO',
'propagate': False,
},
'causallift': {
'handlers': ['console', 'info_file_handler', 'error_file_handler'],
'level': 'INFO',
'propagate': False,
},
},
'root': {
'handlers': ['console', 'info_file_handler', 'error_file_handler'],
'level': 'INFO',
},
'version': 1}
"""
def __init__(
self,
train_df=None, # type: Optional[pd.DataFrame]
test_df=None, # type: Optional[pd.DataFrame]
cols_features=None, # type: Optional[List[str]]
col_treatment="Treatment", # type: str
col_outcome="Outcome", # type: str
col_propensity="Propensity", # type: str
col_cate="CATE", # type: str
col_recommendation="Recommendation", # type: str
min_propensity=0.01, # type: float
max_propensity=0.99, # type: float
verbose=2, # type: int
uplift_model_params=UpliftModelParams(), # type: UpliftModelParams
enable_ipw=True, # type: bool
propensity_model_params=PropensityModelParams(), # type: PropensityModelParams
cv=3, # type: int
index_name="index", # type: str
partition_name="partition", # type: str
runner="SequentialRunner", # type: str
conditionally_skip=False, # type: bool
dataset_catalog=DatasetCatalog(), # type: DatasetCatalog
logging_config = Loggers(config_file=None)
):
self.runner = runner # type: Optional[str]
self.kedro_context = None # type: Optional[Type[FlexibleKedroContext]]
self.args = None # type: Optional[Type[EasyDict]]
self.train_df = None # type: Optional[Type[pd.DataFrame]]
self.test_df = None # type: Optional[Type[pd.DataFrame]]
self.df = None # type: Optional[Type[pd.DataFrame]]
self.propensity_model = None # type: Optional[Type[sklearn.base.BaseEstimator]]
self.uplift_models_dict = None # type: Optional[Type[EasyDict]]
self.treatment_fractions = None # type: Optional[Type[EasyDict]]
self.treatment_fraction_train = None # type: Optional[float]
self.treatment_fraction_test = None # type: Optional[float]
self.treated__proba = None # type: Optional[Type[np.array]]
self.untreated__proba = None # type: Optional[Type[np.array]]
self.cate_estimated = None # type: Optional[Type[pd.Series]]
self.treated__sim_eval_df = None # type: Optional[Type[pd.DataFrame]]
self.untreated__sim_eval_df = None # type: Optional[Type[pd.DataFrame]]
self.estimated_effect_df = None # type: Optional[Type[pd.DataFrame]]
self.dataset_catalog = dataset_catalog
self.logging_config = logging_config
self.args_raw = RawArgs(
cols_features=cols_features,
col_treatment=col_treatment,
col_outcome=col_outcome,
col_propensity=col_propensity,
col_cate=col_cate,
col_recommendation=col_recommendation,
min_propensity=min_propensity,
max_propensity=max_propensity,
verbose=verbose,
uplift_model_params=uplift_model_params,
enable_ipw=enable_ipw,
propensity_model_params=propensity_model_params,
index_name=index_name,
partition_name=partition_name,
runner=runner,
conditionally_skip=conditionally_skip,
)
# Setup loggers
self.logging_config.setup(verbose)
def _separate_train_test(self):
# type: (...) -> Tuple[pd.DataFrame, pd.DataFrame]
self.train_df = self.df.xs("train")
self.test_df = self.df.xs("test")
return self.train_df, self.test_df
def estimate_cate_by_2_models(self):
# type: (...) -> Tuple[pd.DataFrame, pd.DataFrame]
r"""
Estimate CATE (Conditional Average Treatment Effect) using 2 XGBoost classifier models.
"""
raise NotImplementedError()
def estimate_recommendation_impact(
self,
cate_estimated=None, # type: Optional[Type[pd.Series]]
treatment_fraction_train=None, # type: Optional[float]
treatment_fraction_test=None, # type: Optional[float]
verbose=None, # type: Optional[int]
):
# type: (...) -> Type[pd.DataFrame]
r"""
Estimate the impact of recommendation based on uplift modeling.
args:
cate_estimated:
Pandas series containing the CATE.
If :obj:`None` (default), use the ones calculated by estimate_cate_by_2_models method.
treatment_fraction_train:
The fraction of treatment in train dataset.
If :obj:`None` (default), use the ones calculated by estimate_cate_by_2_models method.
treatment_fraction_test:
The fraction of treatment in test dataset.
If :obj:`None` (default), use the ones calculated by estimate_cate_by_2_models method.
verbose:
How much info to show.
If :obj:`None` (default), use the value set in the constructor.
"""
if cate_estimated is not None:
self.cate_estimated = cate_estimated
self.df.loc[:, self.args.col_cate] = cate_estimated.values | StarcoderdataPython |
4841058 | from numpy.core.numeric import Inf
import torch
from torchdiffeq import odeint_adjoint
import matplotlib.pyplot as plt
import torch.optim as optim
from utils import data_utils
from models import PendulumModel
import copy
# Get environment
device = 'cpu'
# device = 'cuda' if torch.cuda.is_available() else 'cpu'
print(f'Training on {device}')
# Define integration parameters
t0 = 0.
te = 20.
t = torch.linspace(t0, te, 40).to(device)
include_neural_net = True
model = PendulumModel(frictionless = True,include_neural_net = include_neural_net).to(device)
# dynamical system parameters
T0 = 12
omega = 2 * 3.1415 / T0
alpha = 0.35
print(f'Expecting an omega of {omega}')
# Get data
batch_size = 25
train,val,test = data_utils.get_pendulum_datasets(n=batch_size,T0 = T0,alpha = alpha)
train,val,test = train.to(device),val.to(device),test.to(device)
# Define accessories
optimizer = optim.Adam(model.parameters(), lr=0.001)
def train_model():
model.train()
optimizer.zero_grad()
# Calculate loss
init_state = train[:,0,:]
sol = odeint_adjoint(model,init_state , t, atol=1e-2, rtol=1e-2,method='dopri5').transpose(0,1)
if include_neural_net:
nn_loss = torch.sum(torch.linalg.norm(model.neural_net(t,train),dim=2)**2) / batch_size
else:
nn_loss = 0
l2_loss = lam * torch.sum(torch.linalg.norm(sol - train,dim = 2)) / batch_size
loss = nn_loss + l2_loss
# Apply backprop
loss.backward()
optimizer.step()
def test_model():
model.eval()
with torch.no_grad():
# Get data
init_state = test[:,0,:]
sol = odeint_adjoint(model,init_state , t, atol=1e-2, rtol=1e-2,method='dopri5').transpose(0,1)
# Calculate loss
if include_neural_net:
nn_loss = torch.sum(torch.linalg.norm(model.neural_net(t,test),dim=2)**2) / batch_size
else:
nn_loss = 0
l2_loss = lam * torch.sum(torch.linalg.norm(sol - test,dim = 2)) / batch_size
loss = nn_loss + l2_loss
return loss, nn_loss,l2_loss
lam = 1
patience = 40
epochs_since_last_improvement = 0
best_loss = Inf
for i in range(2000):
train_model()
loss, nn_loss,l2_loss = test_model()
# Print training progress
if i % 1 == 0:
# Track physical model parameters
print('*' * 20)
print(f'iteration {i}')
print(f'Data contribution = {nn_loss}')
print(f'L2 contribution = {l2_loss}')
print(f'Total loss = {loss}')
for name, param in model.named_parameters():
if param.requires_grad and name in ['omega','alpha']:
print(f'{name} = {param.data.item()} ')
# Implement early stopping
if loss < best_loss:
best_loss = loss
best_model_weights = copy.deepcopy(model.state_dict())
epochs_since_last_improvement = 0
else:
epochs_since_last_improvement += 1
if epochs_since_last_improvement >= patience:
break
# Update lambda parameter
# if epochs_since_last_improvement >= (patience //2):
# lam += 2
model.load_state_dict(best_model_weights)
model = model.to('cpu')
print('Final named parameter values are: ')
for name, param in model.named_parameters():
if param.requires_grad and name in ['omega','alpha']:
print(f'{name} = {param.data.item()} ')
train,val,test = data_utils.get_pendulum_datasets(n=batch_size,T0 = T0,alpha = alpha)
train,val,test = train.to('cpu'),val.to('cpu'),test.to('cpu')
init_state = train[:,0,:]
t = t.to('cpu')
sol = odeint_adjoint(model,init_state , t, atol=1e-4, rtol=1e-4,method='dopri5')
pos = sol[:,:,0].transpose(0,1)
vel = sol[:,:,1].transpose(0,1)
plt.plot(t,pos[0,:].detach())
plt.plot(t,train[0,:,0])
plt.legend(['Learnt','True'])
plt.savefig('final_result.png')
torch.save(model,'model.pt') | StarcoderdataPython |
3201158 | # -*- coding: utf-8 -*-
"""
Functions to perform deconvolution using the framework laid by Rudin-Osher and
Fatemi (ROF) in [1]. The minimization is performed using the FISTA method
derived by <NAME> and <NAME> in [2] as described in their paper for
TV-FISTA in [3]. The FISTA iterations have improved convergence by utilizing
the a momentum restart scheme, as proposed by <NAME>, Emmanuel
Candes in [4]. The denoising is performed using C code that runs at a minimum
of twice the speed of the Scikit-Image Chambolle total variation solver.
[1] <NAME> al. "Nonlinear total variation based noise removal algorithms"
Physica D: Nonlinear Phenomena (1992)
[2] <NAME> and <NAME>. "A fast iterative shrinkage-thresholding
algorithm for linear inverse problems."
SIAM journal on imaging sciences 2.1 (2009).
[3] <NAME> & <NAME>. "Fast Gradient-Based Algorithms for Constrained
Total Variation Image Denoising and Deblurring Problems."
IEEE Transactions on Image Processing (2009).
[4] <NAME> & Can<NAME>. "Adaptive Restart for Accelerated Gradient
Schemes." Foundations of Computational Mathematics (2012)
"""
__author__ = '<NAME>'
__email__ = '<EMAIL>'
import numpy as np
import scipy.signal as sig
from skimage.filters import gaussian
from tv_fista import deconvolve_fista
def deconvolve_tv(image, psf, noise_level=0.05, min_value=0, max_value=1,
intermediate_it=30, it=40, intermediate_eps=1e-3, eps=1e-5):
"""Computes the total variation regularized deconvolution of a given image,
with the point spread function psf. This is computed using the FISTA
method [2] and the framework derived in [3].
:param image: Image to deconvolve.
:param psf: Point spread function to invert.
:param noise_level: Regularization parameter, higher means noisier data.
:param min_value: Minimum pixel intensity.
:param max_value: Maximum pixel intensity.
:param intermediate_it: Iterations per proximal gradient computation.
:param it: No. of FISTA iterations.
:param intermediate_eps: Convergence level of proximal gradient computation.
:param eps: Convergence level deconvolution iterations.
:returns: Deconvoluted image.
"""
psf_adjoint = np.rot90(psf, 2)
filter = lambda x: sig.convolve_2d(x, psf, mode='same')
adjoint_filter = lambda x: sig.convolve_2d(x, psf_adjoint, mode='same')
return deconvolve_fista(image, filter, adjoint_filter, noise_level,
it=it, intermediate_it=intermediate_it, eps=eps,
intermediate_eps=intermediate_eps,
min_value=min_value, max_value=max_value,
lipschitz=find_lipschitz(image,
lambda x: adjoint_filter(filter(x))))
def easy_gaussian_denoise(image, std, noise_level=0.05, min_value=0,
max_value=1, intermediate_it=30, it=40,
intermediate_eps=1e-3, eps=1e-5, lipschitz=None,
message=True):
"""Warning: Slow! Each iteration performs total variation deblurring.
Performs total variation regularized deconvolution of image with a
gaussian blurring kernel that has given standard deviation. This is
performed using the FISTA method [2] and the framework derived in [3] as
well as a restarting scheme as described in [4].
:param image: Image to deblur.
:param std: Standard deviation (radius) of the gaussian blurring kernel
to invert.
:param noise_level: Regularization parameter - Almost always less than 1.
:param min_value: Minimum pixel value.
:param max_value: Maximum pixel value.
:param intermediate_it: Iterations per proximal gradient computation.
:param it: No. of FISTA iterations.
:param intermediate_eps: Convergence level of proximal gradient computation.
:param eps: Convergence level deconvolution iterations.
:param lipschitz: Use higher than standard Lipschitz bound for the
convolution gradient funcitonal.
:param message: Show information during iterations
:return: Deblurred image
"""
lipschitz = 2 if not lipschitz else lipschitz
filter = lambda x: gaussian(x, std)
return deconvolve_fista(image, filter, filter, noise_level,
it=it, intermediate_it=intermediate_it, eps=eps,
intermediate_eps=intermediate_eps,
min_value=min_value, max_value=max_value,
lipschitz=lipschitz, message=message)
def find_lipschitz(x0, operator):
"""Use power iterations to find the Lipschitz constant of a linear
operator O: V -> V. To find the Lipschitz constant for the gradient of:
||Ax - b||,
with linear operator A, constant vector b and variable x, one has to
compute the Lipschitz constant of the operator [A'A].
:param x0: Initial vector x0.
:param operator: Function that corresponds to the operator, takes one
vector as argument and returns a vector of the same size.
:returns: Lipschitz constant of the operator, returns 1.1 if the
Lipschitz constant is less than 1.1 (for stability reasons)."""
x0 = np.copy(x0)
lip = np.linalg.norm(x0)
for i in range(20):
x0 /= lip
x0 = operator(x0)
lip = np.linalg.norm(x0)
return lip if lip > 1.1 else 1.1
| StarcoderdataPython |
193272 | <gh_stars>0
# -*- coding: utf-8 -*- #
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command to list available environment upgrades."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.composer import image_versions_util as image_versions_command_util
from googlecloudsdk.command_lib.composer import resource_args
@base.Hidden
@base.ReleaseTracks(base.ReleaseTrack.BETA, base.ReleaseTrack.ALPHA)
class ListUpgrades(base.ListCommand):
"""List the Cloud Composer image version upgrades for a specific environment.
{command} prints a table listing the available image-version upgrades with the
following columns:
* Image Version ID
* Composer 'default' flag
* List of supported python versions
"""
@staticmethod
def Args(parser):
resource_args.AddEnvironmentResourceArg(parser, 'to list upgrades')
base.URI_FLAG.RemoveFromParser(parser)
parser.display_info.AddFormat(
'table[box,title="AVAILABLE UPGRADES"]('
'imageVersionId:label="IMAGE VERSION",'
'isDefault:label="COMPOSER DEFAULT",'
'supportedPythonVersions.list():label="SUPPORTED PYTHON VERSIONS")')
def Run(self, args):
env_ref = args.CONCEPTS.environment.Parse()
return image_versions_command_util.ListImageVersionUpgrades(
env_ref, release_track=self.ReleaseTrack())
| StarcoderdataPython |
3344608 | <filename>src/bot.py<gh_stars>0
import argparse
import aws_comprehend as ac
import json
import os
import qa_engine as qa
import slackclient
import time
import urllib.request
INSTANCE_ID = urllib.request.urlopen('http://169.254.169.254/latest/meta-data/instance-id').read().decode()
AVAILABILITY_ZONE = urllib.request.urlopen('http://169.254.169.254/latest/meta-data/placement/availability-zone').read().decode()
IDENTITY = json.loads(urllib.request.urlopen('http://169.254.169.254/latest/dynamic/instance-identity/document').read().decode())
AWS_REGION = IDENTITY['region']
class MySlackClass:
"""A class for handling Slack Calls"""
def __init__(self, token=None):
if token is None:
if 'SLACK_BOT_TOKEN' not in os.environ:
print('SLACK_BOT_TOKEN has not been defined as an environment variable')
quit()
self._token = os.environ.get('SLACK_BOT_TOKEN')
else:
self._token = token
self._slack_client = slackclient.SlackClient(self._token)
self._bot_id = self.bot_userid()
self._bot_name = 'BrainBotX'
def api_call(self, *args, **kwargs):
return self._slack_client.api_call(*args, **kwargs)
def bot_userid(self):
return self.api_call('auth.test')['user_id']
def contains_at_mention(self, text):
at_mention = f'<@{self._bot_id}>'
return at_mention in text
def get_channels(self):
result = self.api_call("channels.list")
if result.get('ok'):
return result['channels']
def list_channels(self):
channels = self.get_channels()
for channel in channels:
# print(channel)
print('{id} "{name}": Purpose - {purpose}'.format(**channel))
def send_message(self, channel_id, message):
result = self._slack_client.api_call(
"chat.postMessage",
channel=channel_id,
text=message,
username=f'{self._bot_name}',
icon_emoji=':robot_face:'
)
return result
def parse_commands(self, events, LEM=None, QAE=None):
for event in events:
if event['type'] == 'message' and not 'subtype' in event:
user_id = event['user']
msg = event['text']
channel = event['channel']
print(f'User: {user_id}@{channel}, msg="{msg}"')
if not self.contains_at_mention(msg):
continue
if LEM is not None:
txt = LEM.get_entities(msg)
print('LEM.get_entities(msg):')
print(f'"{txt}"')
self.send_message(channel, txt)
if QAE is not None:
txt = QAE.respond_to_question(msg)
if txt is not None:
self.send_message(channel, txt)
def run(self, LEM=None, QAE=None, delay=1):
connection_check = self._slack_client.rtm_connect(with_team_state=False)
if connection_check is False:
print('Connection failed')
quit()
print(f'Bot (ID={self._bot_id}) is now running ', end='')
print(f'on `slackclient` version {slackclient.version.__version__}')
while True:
events = self._slack_client.rtm_read()
print(events)
if len(events) > 0:
self.parse_commands(events, LEM, QAE)
time.sleep(delay)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--api_test', action='store_true')
parser.add_argument('--channels', action='store_true')
parser.add_argument('--message', default='')
parser.add_argument('--run', action='store_true')
parser.add_argument('--userid', action='store_true')
args = parser.parse_args()
SC = MySlackClass()
if args.api_test is True:
# python3 bot.py --api_test
result = SC.api_call('api.test')
txt = json.dumps(result, indent=4)
print(txt)
elif args.channels is True:
# python3 bot.py --channels
txt = SC.list_channels()
print(txt)
elif len(args.message) > 0:
# python3 bot.py --message "Hello"
my_channel = 'CGRBJFKPH'
result = SC.send_message(my_channel, args.message)
elif args.userid is True:
# python3 bot.py --userid
print(f'Bot userid = {SC.bot_userid()}')
elif args.run is True:
# python3 bot.py --run
LEM = ac.LanguageEngineMedical('us-west-2')
QAE = qa.QnA_Engine('../data/q-n-a.csv')
SC.run(LEM, QAE)
| StarcoderdataPython |
4834538 | <filename>panelapp/panels/views/panels.py
##
## Copyright (c) 2016-2019 Genomics England Ltd.
##
## This file is part of PanelApp
## (see https://panelapp.genomicsengland.co.uk).
##
## Licensed to the Apache Software Foundation (ASF) under one
## or more contributor license agreements. See the NOTICE file
## distributed with this work for additional information
## regarding copyright ownership. The ASF licenses this file
## to you under the Apache License, Version 2.0 (the
## "License"); you may not use this file except in compliance
## with the License. You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing,
## software distributed under the License is distributed on an
## "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
## KIND, either express or implied. See the License for the
## specific language governing permissions and limitations
## under the License.
##
import csv
from datetime import datetime
from django.db.models import Q
from django.contrib import messages
from django.http import HttpResponse
from django.core.exceptions import ValidationError
from django.views.generic import ListView
from django.views.generic import CreateView
from django.views.generic import DetailView
from django.views.generic import UpdateView
from django.views.generic import TemplateView
from django.views.generic import FormView
from django.views.generic import RedirectView
from django.views.generic.base import View
from django.shortcuts import redirect
from django.shortcuts import get_object_or_404
from django.utils.functional import cached_property
from django.urls import reverse_lazy
from django.urls import reverse
from django.utils import timezone
from django.http import StreamingHttpResponse
from panelapp.mixins import GELReviewerRequiredMixin
from accounts.models import User
from panels.forms import PromotePanelForm
from panels.forms import ComparePanelsForm
from panels.forms import PanelForm
from panels.forms import UploadGenesForm
from panels.forms import UploadPanelsForm
from panels.forms import UploadReviewsForm
from panels.forms import ActivityFilterForm
from panels.mixins import PanelMixin
from panels.models import ProcessingRunCode
from panels.models import Activity
from panels.models import GenePanel
from panels.models import GenePanelSnapshot
from .entities import EchoWriter
class PanelsIndexView(ListView):
template_name = "panels/genepanel_list.html"
model = GenePanelSnapshot
context_object_name = "panels"
objects = []
def get_queryset(self, *args, **kwargs):
if self.request.user.is_authenticated and self.request.user.reviewer.is_GEL():
if self.request.GET.get("gene"):
self.objects = GenePanelSnapshot.objects.get_gene_panels(
self.request.GET.get("gene"), all=True, internal=True
)
else:
self.objects = GenePanelSnapshot.objects.get_active_annotated(
all=True, internal=True
)
else:
if self.request.GET.get("gene"):
self.objects = GenePanelSnapshot.objects.get_gene_panels(
self.request.GET.get("gene")
)
else:
self.objects = GenePanelSnapshot.objects.get_active_annotated()
return self.panels
@cached_property
def panels(self):
return self.objects
@cached_property
def compare_panels_form(self):
return ComparePanelsForm()
def get_context_data(self, *args, **kwargs):
ctx = super().get_context_data(*args, **kwargs)
return ctx
class CreatePanelView(GELReviewerRequiredMixin, CreateView):
"""Create a new panel"""
template_name = "panels/genepanel_create.html"
form_class = PanelForm
def get_form_kwargs(self, *args, **kwargs):
res = super().get_form_kwargs(*args, **kwargs)
res["gel_curator"] = (
self.request.user.is_authenticated and self.request.user.reviewer.is_GEL()
)
res["request"] = self.request
return res
def form_valid(self, form):
self.instance = form.instance
ret = super().form_valid(form)
messages.success(self.request, "Successfully added a new panel")
return ret
def get_success_url(self):
return reverse_lazy("panels:detail", kwargs={"pk": self.instance.panel.pk})
class UpdatePanelView(GELReviewerRequiredMixin, PanelMixin, UpdateView):
"""Update panel information"""
template_name = "panels/genepanel_create.html"
form_class = PanelForm
def get_form_kwargs(self, *args, **kwargs):
res = super().get_form_kwargs(*args, **kwargs)
res["gel_curator"] = (
self.request.user.is_authenticated and self.request.user.reviewer.is_GEL()
)
res["request"] = self.request
return res
def form_valid(self, form):
self.instance = form.instance
ret = super().form_valid(form)
messages.success(self.request, "Successfully updated the panel")
return ret
class GenePanelView(DetailView):
model = GenePanel
def get_context_data(self, *args, **kwargs):
ctx = super().get_context_data(*args, **kwargs)
ctx["panel"] = self.object.active_panel
ctx["edit"] = PanelForm(
initial=ctx["panel"].get_form_initial(),
instance=ctx["panel"],
gel_curator=self.request.user.is_authenticated
and self.request.user.reviewer.is_GEL(),
request=self.request,
)
ctx["contributors"] = ctx["panel"].contributors
ctx["promote_panel_form"] = PromotePanelForm(
instance=ctx["panel"],
request=self.request,
initial={"version_comment": None},
)
return ctx
class AdminContextMixin:
def get_context_data(self, *args, **kwargs):
ctx = super().get_context_data(*args, **kwargs)
ctx["gene_form"] = UploadGenesForm()
ctx["panel_form"] = UploadPanelsForm()
ctx["review_form"] = UploadReviewsForm()
return ctx
class AdminView(GELReviewerRequiredMixin, AdminContextMixin, TemplateView):
template_name = "panels/admin.html"
class ImportToolMixin(GELReviewerRequiredMixin, AdminContextMixin, FormView):
template_name = "panels/admin.html"
success_url = reverse_lazy("panels:admin")
def form_valid(self, form):
ret = super().form_valid(form)
try:
res = form.process_file(user=self.request.user)
if res is ProcessingRunCode.PROCESS_BACKGROUND:
messages.error(
self.request,
"Import started in the background."
" You will get an email once it has"
" completed.",
)
else:
messages.success(self.request, "Import successful")
except ValidationError as errors:
for error in errors:
messages.error(self.request, error)
return ret
class AdminUploadGenesView(ImportToolMixin, AdminContextMixin):
form_class = UploadGenesForm
def get(self, request, *args, **kwargs):
return redirect(reverse_lazy("panels:admin"))
class AdminUploadPanelsView(ImportToolMixin, AdminContextMixin):
form_class = UploadPanelsForm
def get(self, request, *args, **kwargs):
return redirect(reverse_lazy("panels:admin"))
class AdminUploadReviewsView(ImportToolMixin, AdminContextMixin):
form_class = UploadReviewsForm
def get(self, request, *args, **kwargs):
return redirect(reverse_lazy("panels:admin"))
class PromotePanelView(GELReviewerRequiredMixin, GenePanelView, UpdateView):
template_name = "panels/genepanel_detail.html"
form_class = PromotePanelForm
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs["request"] = self.request
kwargs["instance"] = self.object.active_panel
return kwargs
def form_valid(self, form):
ret = super().form_valid(form)
self.instance = form.instance.panel
messages.success(
self.request,
"Panel {} will be promoted in a few moments.".format(
self.get_object().name
),
)
return ret
def get_success_url(self):
return self.get_object().get_absolute_url()
class ActivityListView(ListView):
model = Activity
context_object_name = "activities"
paginate_by = 3000
def get(self, request, *args, **kwargs):
if (
request.GET.get("format", "").lower() == "csv"
and request.user.is_authenticated
and request.user.reviewer.is_GEL
):
response = HttpResponse(content_type="text/csv")
response[
"Content-Disposition"
] = 'attachment; filename="export-panelapp-activities-{}.csv"'.format(
timezone.now()
)
writer = csv.writer(response)
writer.writerow(
[
"Created",
"Panel",
"Panel ID",
"Panel Version",
"Entity Type",
"Entity Name",
"User",
"Activity",
]
)
self.object_list = self.get_queryset()
context = self.get_context_data()
for activity in context["activities"]:
writer.writerow(
[
activity.created,
activity.extra_data.get("panel_name"),
activity.extra_data.get("panel_id"),
activity.extra_data.get("panel_version"),
activity.extra_data.get("entity_type"),
activity.extra_data.get("entity_name"),
activity.extra_data.get("user_name"),
activity.text,
]
)
return response
return super().get(request, *args, **kwargs)
def _filter_queryset_kwargs(self):
filters = {}
if self.request.user.is_authenticated and self.request.user.reviewer.is_GEL:
filters = {"all": True, "deleted": True, "internal": True}
return filters
def available_panels(self):
return GenePanelSnapshot.objects.get_panels_active_panels(
**self._filter_queryset_kwargs()
)
def available_panel_versions(self):
if self.request.GET.get("panel"):
return GenePanelSnapshot.objects.get_panel_versions(
self.request.GET.get("panel"), **self._filter_queryset_kwargs()
)
return []
def available_panel_entities(self):
if self.request.GET.get("panel") and self.request.GET.get("version"):
try:
major_version, minor_version = self.request.GET.get("version").split(
"."
)
return GenePanelSnapshot.objects.get_panel_entities(
self.request.GET.get("panel"),
major_version,
minor_version,
**self._filter_queryset_kwargs()
)
except ValueError:
return []
elif self.request.GET.get("panel") and self.request.GET.get("entity"):
return [(self.request.GET.get("entity"), self.request.GET.get("entity"))]
return []
def get_context_data(self, **kwargs):
ctx = super().get_context_data()
ctx["filter_active"] = True if self.request.GET else False
form_kwargs = {
"panels": self.available_panels(),
"versions": self.available_panel_versions()
if self.request.GET.get("panel")
else None,
"entities": self.available_panel_entities()
if self.request.GET.get("version") or self.request.GET.get("entity")
else None,
}
ctx["filter_form"] = ActivityFilterForm(
self.request.GET if self.request.GET else None, **form_kwargs
)
return ctx
def get_queryset(self):
if self.request.user.is_authenticated and self.request.user.reviewer.is_GEL():
qs = self.model.objects.visible_to_gel()
else:
qs = self.model.objects.visible_to_public()
filter_kwargs = {}
if self.request.GET.get("panel", "").isdigit():
filter_kwargs["extra_data__panel_id"] = int(self.request.GET.get("panel"))
if self.request.GET.get("version"):
filter_kwargs["extra_data__panel_version"] = self.request.GET.get("version")
if self.request.GET.get("date_from"):
filter_kwargs["created__gte"] = self.request.GET.get("date_from")
if self.request.GET.get("date_to"):
filter_kwargs["created__lte"] = self.request.GET.get("date_to")
qs = qs.filter(**filter_kwargs)
if self.request.GET.get("entity"):
entity = self.request.GET.get("entity")
qs = qs.filter(
Q(extra_data__entity_name=entity)
| Q(entity_name=entity)
| Q(text__icontains=entity)
)
return qs.prefetch_related("user", "panel", "user__reviewer")
class DownloadAllPanels(GELReviewerRequiredMixin, View):
def panel_iterator(self, request):
yield (
"Level 4 title",
"Level 3 title",
"Level 2 title",
"URL",
"Current Version",
"Version time stamp",
"# rated genes/total genes",
"#reviewers",
"Reviewer name and affiliation (;)",
"Reviewer emails (;)",
"Status",
"Relevant disorders",
"Types",
)
panels = (
GenePanelSnapshot.objects.get_active_annotated(all=True, internal=True)
.prefetch_related(
"panel",
"level4title",
"panel__types",
"genepanelentrysnapshot_set",
"genepanelentrysnapshot_set__evaluation",
"genepanelentrysnapshot_set__evaluation__user",
"genepanelentrysnapshot_set__evaluation__user__reviewer",
"str_set__evaluation",
"str_set__evaluation__user",
"str_set__evaluation__user__reviewer",
)
.all()
.iterator()
)
for panel in panels:
rate = "{} of {} genes reviewed".format(
panel.stats.get("number_of_evaluated_genes"),
panel.stats.get("number_of_genes"),
)
reviewers = panel.contributors
contributors = [
"{} {} ({})".format(user.first_name, user.last_name, user.email)
if user.first_name
else user.username
for user in reviewers
]
yield (
panel.level4title.name,
panel.level4title.level3title,
panel.level4title.level2title,
request.build_absolute_uri(
reverse("panels:detail", args=(panel.panel.id,))
),
panel.version,
panel.created,
rate,
len(reviewers),
";".join(contributors), # aff
";".join([user.email for user in reviewers if user.email]), # email
panel.panel.status.upper(),
";".join(panel.old_panels),
";".join(panel.panel.types.values_list("name", flat=True)),
)
def get(self, request, *args, **kwargs):
pseudo_buffer = EchoWriter()
writer = csv.writer(pseudo_buffer, delimiter="\t")
response = StreamingHttpResponse(
(writer.writerow(row) for row in self.panel_iterator(request)),
content_type="text/tab-separated-values",
)
attachment = "attachment; filename=All_panels_{}.tsv".format(
datetime.now().strftime("%Y%m%d-%H%M")
)
response["Content-Disposition"] = attachment
return response
class OldCodeURLRedirect(RedirectView):
"""Redirect old code URLs to the new pks"""
permanent = True
def dispatch(self, request, *args, **kwargs):
panel = get_object_or_404(GenePanel, old_pk=kwargs.get("pk"))
self.url = reverse("panels:detail", args=(panel.id,)) + kwargs.get("uri", "")
return super().dispatch(request, *args, **kwargs)
| StarcoderdataPython |
1654654 | <gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Sat Apr 3 22:26:25 2021
@author: zrowl
"""
import types
from geographiclib.geodesic import Geodesic
from paths import linear
def coords2path(coords=[], geo=Geodesic.WGS84, interp_func_name='linear'):
lat_funcstr = "def lat_path(d, _):"
lon_funcstr = "def lon_path(d, _):"
d = 0.
for i in range(len(coords)-1):
g = geo.Inverse(coords[i][0], coords[i][1],
coords[i+1][0], coords[i+1][1])
azi = g['azi1']
d = d + g['s12']
if i==0:
ifstate = "if"
elif i>0:
ifstate = "elif"
lat_line = ifstate + " d < "+str(d)+": \
return " + interp_func_name + "(d, {'ordinate': 'lat', 'aziDeg': "+str(azi)+"})"
lon_line = ifstate + " d < "+str(d)+": \
return " + interp_func_name + "(d, {'ordinate': 'lon', 'aziDeg': "+str(azi)+"})"
lat_funcstr = lat_funcstr + "\n\t" + lat_line
lon_funcstr = lon_funcstr + "\n\t" + lon_line
lat_funcstr = lat_funcstr + "\n\t" + "else: return 0"
lon_funcstr = lon_funcstr + "\n\t" + "else: return 0"
lat_funcobj = compile(lat_funcstr, '<string>', 'exec')
lon_funcobj = compile(lon_funcstr, '<string>', 'exec')
lat_functype = types.FunctionType(lat_funcobj.co_consts[0], globals())
lon_functype = types.FunctionType(lon_funcobj.co_consts[0], globals())
return lat_functype, lon_functype, lat_funcstr, lon_funcstr, d
| StarcoderdataPython |
1685997 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from dataclasses import dataclass
from typing import Optional
from rfmt.blocks import LineBlock as LB
from rfmt.blocks import TextBlock as TB
from rfmt.blocks import ChoiceBlock as CB
from rfmt.blocks import WrapBlock as WB
from .utils import with_commas
from .ident import SQLIdentifier
from .query import SQLQuery
from .node import SQLNodeList
from .node import SQLNode
from .const import SQLString
from .types import SQLType
from .expr import SQLExpr
@dataclass(frozen=True)
class SQLFuncExpr(SQLExpr):
names: SQLNodeList[SQLIdentifier]
args: SQLNodeList[SQLExpr]
def sqlf(self, compact):
name = LB([x.sqlf(True) for x in self.names])
compact_sql = LB([name, TB('(')] +
with_commas(True, self.args) +
[TB(')')])
if compact:
return compact_sql
if not self.args:
return compact_sql
return CB([
compact_sql,
LB([
LB([name, TB('(')]),
WB(with_commas(False, self.args)),
TB(')')
])
])
@dataclass(frozen=True)
class SQLCustomFuncs(SQLExpr):
@staticmethod
def consume(lex) -> 'Optional[SQLCustomFuncs]':
# TODO(scannell) - add DATE, TIME, DATETIME, TIMESTAMP literals
return (SQLCAST.consume(lex) or
SQLDate.consume(lex) or
SQLCount.consume(lex) or
SQLExists.consume(lex) or
SQLInterval.consume(lex) or
SQLAnalyticNavigation.consume(lex) or
SQLExtract.consume(lex))
@dataclass(frozen=True)
class SQLExists(SQLCustomFuncs):
sql: SQLNode
def sqlf(self, compact):
return LB([
TB('EXISTS('),
self.sql.sqlf(True),
TB(')'),
])
@staticmethod
def consume(lex) -> 'Optional[SQLExists]':
if not lex.consume('EXISTS'):
return None
lex.expect('(')
query = SQLQuery.parse(lex)
lex.expect(')')
return SQLExists(query)
@dataclass(frozen=True)
class SQLCount(SQLCustomFuncs):
isdistinct: bool
expr: SQLExpr
def sqlf(self, compact):
return LB([
TB('COUNT(' + ('DISTINCT ' if self.isdistinct else '')),
self.expr.sqlf(compact),
TB(')'),
])
@staticmethod
def consume(lex) -> 'Optional[SQLCount]':
if not lex.consume('COUNT'):
return None
lex.expect('(')
isdistinct = bool(lex.consume('DISTINCT'))
expr = SQLExpr.parse(lex)
lex.expect(')')
return SQLCount(isdistinct, expr)
@dataclass(frozen=True)
class SQLInterval(SQLCustomFuncs):
sql_node: SQLNode
def sqlf(self, compact):
return LB([
TB('INTERVAL('),
self.sql_node.sqlf(compact),
TB(')'),
])
@staticmethod
def consume(lex) -> 'Optional[SQLInterval]':
if not lex.consume('INTERVAL'):
return None
if lex.consume('('):
sql_node = (SQLString.consume(lex) or
lex.error('expected string'))
lex.expect(')')
else:
sql_node = (SQLString.consume(lex) or
lex.error('expected string'))
return SQLInterval(sql_node)
@dataclass(frozen=True)
class SQLExtract(SQLCustomFuncs):
name: str
part: SQLIdentifier
expr: SQLExpr
def sqlf(self, compact):
return LB([
TB(self.name), TB('('), self.part.sqlf(compact),
TB(' '), TB('FROM'), TB(' '),
self.expr.sqlf(compact), TB(')')
])
@staticmethod
def consume(lex) -> 'Optional[SQLExtract]':
if not lex.consume('EXTRACT'):
return None
lex.expect('(')
daypart = SQLIdentifier.parse(lex)
lex.expect('FROM')
date_expr = SQLExpr.parse(lex)
lex.expect(')')
return SQLExtract('EXTRACT', daypart, date_expr)
@dataclass(frozen=True)
class SQLCAST(SQLCustomFuncs):
name: str
expr: SQLExpr
type: SQLType
def sqlf(self, compact):
return LB([
TB(self.name), TB('('), self.expr.sqlf(compact),
TB(' '), TB('AS'), TB(' '),
self.type.sqlf(compact), TB(')')
])
@staticmethod
def consume(lex) -> 'Optional[SQLCAST]':
name = None
if lex.consume('CAST'):
name = 'CAST'
elif lex.consume('SAFE_CAST'):
name = 'SAFE_CAST'
else:
return None
lex.expect('(')
value_expr = SQLExpr.parse(lex)
lex.expect('AS')
new_type = SQLType.parse(lex)
lex.expect(')')
return SQLCAST(name, value_expr, new_type)
@dataclass(frozen=True)
class SQLAnalyticNavigation(SQLCustomFuncs):
name: str
args: SQLNodeList
opt: str
def sqlf(self, compact):
if len(self.args) == 1:
return LB([
TB(self.name + '('),
self.args[0].sqlf(compact),
TB(self.opt + ')')
])
return LB([
TB(self.name + '('),
self.args[0].sqlf(compact),
TB(', '),
self.args[1].sqlf(compact),
TB(self.opt + ')')
])
@staticmethod
def consume(lex) -> 'Optional[SQLAnalyticNavigation]':
name = (lex.consume('FIRST_VALUE') or
lex.consume('LAST_VALUE') or
lex.consume('NTH_VALUE') or
lex.consume('PERCENTILE_COUNT') or
lex.consume('PERCENTILE_DISC'))
if not name:
return None
lex.expect('(')
args = []
while True:
args.append(SQLExpr.parse(lex))
if not lex.consume(','):
break
opt = (lex.consume('RESPECT') or
lex.consume('IGNORE'))
if opt:
lex.expect('NULLS')
opt = ' ' + opt + ' NULLS'
elif lex.consume('NULLS'):
opt = ' NULLS'
else:
opt = ''
lex.expect(')')
return SQLAnalyticNavigation(name, SQLNodeList(args), opt)
@dataclass(frozen=True)
class SQLDate(SQLCustomFuncs):
name: str
args: SQLNodeList
def sqlf(self, compact):
return LB([
TB(self.name), TB('('), self.args[0].sqlf(True),
TB(', '), TB('INTERVAL'), TB(' '),
self.args[1].sqlf(True), TB(' '), self.args[2].sqlf(True),
TB(')')
])
@staticmethod
def consume(lex) -> 'Optional[SQLDate]':
name = lex.consume('DATE_ADD') or lex.consume('DATE_SUB')
if not name:
return None
lex.expect('(')
date_expr = SQLExpr.parse(lex)
lex.expect(',')
lex.expect('INTERVAL')
count = SQLExpr.parse(lex)
date_part = SQLIdentifier.parse(lex)
lex.expect(')')
return SQLDate(name, SQLNodeList((date_expr, count, date_part)))
| StarcoderdataPython |
164345 | import pickle
from sklearn.decomposition import PCA
import numpy as np
class PCA_reduction:
def __init__(self, pca_path):
self.pca_reload = pickle.load(open(pca_path,'rb'))
def reduce_size(self, vector):
return self.pca_reload.transform([vector])[0]
@staticmethod
def create_new_pca_model(vectors, path_to_save, percentage_variance):
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
data_rescaled = scaler.fit_transform(vectors)
pca = PCA(n_components=percentage_variance)
result = pca.fit(data_rescaled)
pickle.dump(pca, open(path_to_save,"wb"))
@staticmethod
def plot_variance_nbComponents(vectors, percentage_variance, figsize=(15, 5)):
import matplotlib.pyplot as plt
pca = PCA().fit(vectors)
fig = plt.figure(figsize=figsize)
plt.plot(np.cumsum(pca.explained_variance_ratio_), marker='o')
plt.axhline(y=percentage_variance, color="red")
plt.xlabel('No. of principal components')
plt.ylabel('cumulative % variance retained')
plt.grid(True)
plt.title('Cumulative explained variance across the number of components ') | StarcoderdataPython |
198628 | <filename>src/main.py
import numpy as np
from kaggle_environments import evaluate, make
from src import agents
from src.utils import render_game
def play_game(agent1, agent2, environment, configuration):
env = make(environment=environment, configuration=configuration, debug=True)
env.run([agent1, agent2])
render_game(env)
def get_win_rate(
agent1, agent2, environment: str, configuration={}, episodes=100
):
rewards = evaluate(
environment=environment,
agents=[agent1, agent2],
configuration=configuration,
num_episodes=episodes // 2,
)
rewards += [
[b, a]
for [a, b] in evaluate(
environment=environment,
agents=[agent2, agent1],
configuration=configuration,
num_episodes=episodes - episodes // 2,
)
]
agent_1_win_rate = np.round(
rewards.count([1, -1]) / len(rewards), decimals=2
)
print(f"Agent 1 Win Rate: {agent_1_win_rate}")
agent_2_win_rate = np.round(
rewards.count([-1, 1]) / len(rewards), decimals=2
)
print(f"Agent 2 Win Rate: {agent_2_win_rate}")
agent_1_invalid_games = rewards.count([None, 0])
print(f"Agent 1 Invalid games: {agent_1_invalid_games}")
agent_2_invalid_games = rewards.count([0, None])
print(f"Agent 2 Invalid games: {agent_2_invalid_games}")
def main():
get_win_rate(
agent1=agents.agent_random_check_winning_move,
agent2=agents.agent_leftmost,
environment="connectx",
)
main()
| StarcoderdataPython |
3329737 | <gh_stars>0
import tensorflow as tf
import datetime
from datetime import timedelta
from timeit import default_timer as timer
from estimation.config import get_default_configuration
# base_dir = "D://coco-dataset"
# annot_path_train = base_dir + "/annotations/person_keypoints_train2017.json"
# annot_path_val = base_dir + "/annotations/person_keypoints_val2017.json"
# img_dir_train = base_dir + "/train2017/train2017/"
# img_dir_val = base_dir + "/val2017/val2017"
base_dir = "/root/coco-dataset"
annot_path_train = base_dir + "/annotations/person_keypoints_train2017.json"
annot_path_val = base_dir + "/annotations/person_keypoints_val2017.json"
img_dir_train = base_dir + "/train2017/"
img_dir_val = base_dir + "/val2017"
# model save path
checkpoints_folder = "./model/ckpt/"
output_weights = './model/weights/singlenet'
output_model = './model/structs/struct_model'
train_loss = tf.keras.metrics.Mean('train_loss', dtype=tf.float32)
train_loss_heatmap = tf.keras.metrics.Mean('train_loss_heatmap', dtype=tf.float32)
train_loss_paf0 = tf.keras.metrics.Mean('train_loss_paf_0', dtype=tf.float32)
train_loss_paf1 = tf.keras.metrics.Mean('train_loss_paf_1', dtype=tf.float32)
train_loss_paf2 = tf.keras.metrics.Mean('train_loss_paf_2', dtype=tf.float32)
val_loss = tf.keras.metrics.Mean('val_loss', dtype=tf.float32)
val_loss_heatmap = tf.keras.metrics.Mean('val_loss_heatmap', dtype=tf.float32)
val_loss_paf0 = tf.keras.metrics.Mean('val_loss_paf_0', dtype=tf.float32)
val_loss_paf1 = tf.keras.metrics.Mean('val_loss_paf_1', dtype=tf.float32)
val_loss_paf2 = tf.keras.metrics.Mean('val_loss_paf_2', dtype=tf.float32)
current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
train_log_dir = 'logs_singlenet/gradient_tape/train/train'
train_summary_writer = tf.summary.create_file_writer(train_log_dir)
val_log_dir = 'logs_singlenet/gradient_tape/train/val'
val_summary_writer = tf.summary.create_file_writer(val_log_dir)
t_paf0_log = 'logs_singlenet/gradient_tape/train/paf0'
t_paf1_log = 'logs_singlenet/gradient_tape/train/paf1'
t_paf2_log = 'logs_singlenet/gradient_tape/train/paf2'
t_paf0_sw = tf.summary.create_file_writer(t_paf0_log)
t_paf1_sw = tf.summary.create_file_writer(t_paf1_log)
t_paf2_sw = tf.summary.create_file_writer(t_paf2_log)
v_paf0_log = 'logs_singlenet/gradient_tape/val/paf0'
v_paf1_log = 'logs_singlenet/gradient_tape/val/paf1'
v_paf2_log = 'logs_singlenet/gradient_tape/val/paf2'
v_paf0_sw = tf.summary.create_file_writer(v_paf0_log)
v_paf1_sw = tf.summary.create_file_writer(v_paf1_log)
v_paf2_sw = tf.summary.create_file_writer(v_paf2_log)
graph_log_dir = "logs_singlenet/func/record"
graph_summary_writer = tf.summary.create_file_writer(graph_log_dir)
cfg=get_default_configuration()
plot_update_steps = 120
batch_size = 80
lr = 3e-4
train_epoch = 100
fn_epoch = 5
output_paf_idx = 2
output_heatmap_idx = 3
# @profiler
def update_scalar(epoch, step_per_epoch, cur_step):
summary_step = (epoch - 1) * step_per_epoch + cur_step - 1
with train_summary_writer.as_default():
with tf.name_scope('tloss_a'):
tf.summary.scalar('tloss_a', train_loss.result(), step=summary_step)
tf.summary.scalar('tloss_a_heatmap', train_loss_heatmap.result(), step=summary_step)
with tf.name_scope('tloss_paf'):
tf.summary.scalar('tloss_paf_stage_2', train_loss_paf2.result(), step=summary_step)
tf.summary.scalar('tloss_paf_stage_0', train_loss_paf0.result(), step=summary_step)
tf.summary.scalar('tloss_paf_stage_1', train_loss_paf1.result(), step=summary_step)
with t_paf0_sw.as_default():
with tf.name_scope('tloss_paf'):
tf.summary.scalar('tloss_paf_all', train_loss_paf0.result(), step=summary_step)
with t_paf1_sw.as_default():
with tf.name_scope('tloss_paf'):
tf.summary.scalar('tloss_paf_all', train_loss_paf1.result(), step=summary_step)
with t_paf2_sw.as_default():
with tf.name_scope('tloss_paf'):
tf.summary.scalar('tloss_paf_all', train_loss_paf2.result(), step=summary_step)
def update_val_scalar(epoch):
val_loss_res = val_loss.result()
val_loss_heatmap_res = val_loss_heatmap.result()
val_loss_paf_res2 = val_loss_paf2.result()
val_loss_paf_res1 = val_loss_paf1.result()
val_loss_paf_res0 = val_loss_paf0.result()
print(f'Validation losses for epoch: {epoch} : Loss paf {val_loss_paf_res2}, Loss heatmap '
f'{val_loss_heatmap_res}, Total loss {val_loss_res}')
with val_summary_writer.as_default():
with tf.name_scope("val_loss_a"):
tf.summary.scalar('val_loss_a', val_loss_res, step=epoch)
tf.summary.scalar('val_loss_a_heatmap', val_loss_heatmap_res, step=epoch)
with tf.name_scope("val_loss_paf"):
tf.summary.scalar('val_loss_paf2', val_loss_paf_res2, step=epoch)
tf.summary.scalar('val_loss_paf1', val_loss_paf_res1, step=epoch)
tf.summary.scalar('val_loss_paf0', val_loss_paf_res0, step=epoch)
with v_paf0_sw.as_default():
with tf.name_scope('val_loss_paf'):
tf.summary.scalar('val_loss_paf_all', val_loss_paf_res0, step=epoch)
with v_paf1_sw.as_default():
with tf.name_scope('val_loss_paf'):
tf.summary.scalar('val_loss_paf_all', val_loss_paf_res1, step=epoch)
with v_paf2_sw.as_default():
with tf.name_scope('val_loss_paf'):
tf.summary.scalar('val_loss_paf_all', val_loss_paf_res2, step=epoch)
val_loss.reset_states()
val_loss_heatmap.reset_states()
val_loss_paf2.reset_states()
val_loss_paf0.reset_states()
val_loss_paf1.reset_states() | StarcoderdataPython |
3360999 | import sys
import numpy as np
import cv2
import scipy.io as sio
import matplotlib.pyplot as plt
from scipy import ndimage
from numpy import *
port_side_data = sys.argv[1]
stbd_side_data = sys.argv[2]
port_mat = sio.loadmat(port_side_data)
port_matrix = np.array(port_mat['port_intensity_matrix'])
stbd_mat = sio.loadmat(stbd_side_data)
stbd_matrix= np.array(stbd_mat['stbd_intensity_matrix'])
port_image = cv2.flip(port_matrix, 0)
stbd_image = cv2.flip(stbd_matrix, 0)
image_matrix = np.concatenate((port_image, stbd_image), axis = 1)
m_row = image_matrix.shape[0]
m_col = image_matrix.shape[1]
if image_matrix.max() > 255:
x_t = np.arange(0,65536,1)
intensity_num = np.zeros(65536)
for i in range(0, m_row):
for j in range(0, m_col):
intensity_num[math.ceil(image_matrix[i,j])] += 1
cur_intensity_num = 0
total_intensity_num = m_row * m_col
intensity_threst_hold = 0.88
max_intensity = 0
for i in range(0,intensity_num.size):
cur_intensity_num += intensity_num[i]
if (cur_intensity_num/total_intensity_num) > intensity_threst_hold :
break
max_intensity = i
for i in range(0, m_row):
for j in range(0, m_col):
if image_matrix[i,j] < max_intensity:
image_matrix[i,j] = image_matrix[i,j] * 255 / max_intensity
else:
image_matrix[i,j] = 255
plt.figure("image")
plt.imshow(image_matrix, cmap = plt.cm.gray)
plt.show()
else:
plt.figure("image")
plt.imshow(image_matrix, cmap = plt.cm.gray)
plt.show()
| StarcoderdataPython |
581 | <reponame>kirmerzlikin/intellij-community
r'''
This module provides utilities to get the absolute filenames so that we can be sure that:
- The case of a file will match the actual file in the filesystem (otherwise breakpoints won't be hit).
- Providing means for the user to make path conversions when doing a remote debugging session in
one machine and debugging in another.
To do that, the PATHS_FROM_ECLIPSE_TO_PYTHON constant must be filled with the appropriate paths.
@note:
in this context, the server is where your python process is running
and the client is where eclipse is running.
E.g.:
If the server (your python process) has the structure
/user/projects/my_project/src/package/module1.py
and the client has:
c:\my_project\src\package\module1.py
the PATHS_FROM_ECLIPSE_TO_PYTHON would have to be:
PATHS_FROM_ECLIPSE_TO_PYTHON = [(r'c:\my_project\src', r'/user/projects/my_project/src')]
alternatively, this can be set with an environment variable from the command line:
set PATHS_FROM_ECLIPSE_TO_PYTHON=[['c:\my_project\src','/user/projects/my_project/src']]
@note: DEBUG_CLIENT_SERVER_TRANSLATION can be set to True to debug the result of those translations
@note: the case of the paths is important! Note that this can be tricky to get right when one machine
uses a case-independent filesystem and the other uses a case-dependent filesystem (if the system being
debugged is case-independent, 'normcase()' should be used on the paths defined in PATHS_FROM_ECLIPSE_TO_PYTHON).
@note: all the paths with breakpoints must be translated (otherwise they won't be found in the server)
@note: to enable remote debugging in the target machine (pydev extensions in the eclipse installation)
import pydevd;pydevd.settrace(host, stdoutToServer, stderrToServer, port, suspend)
see parameter docs on pydevd.py
@note: for doing a remote debugging session, all the pydevd_ files must be on the server accessible
through the PYTHONPATH (and the PATHS_FROM_ECLIPSE_TO_PYTHON only needs to be set on the target
machine for the paths that'll actually have breakpoints).
'''
from _pydevd_bundle.pydevd_constants import IS_PY2, IS_PY3K, DebugInfoHolder, IS_WINDOWS, IS_JYTHON
from _pydev_bundle._pydev_filesystem_encoding import getfilesystemencoding
import json
import os.path
import sys
import traceback
_os_normcase = os.path.normcase
basename = os.path.basename
exists = os.path.exists
join = os.path.join
try:
rPath = os.path.realpath # @UndefinedVariable
except:
# jython does not support os.path.realpath
# realpath is a no-op on systems without islink support
rPath = os.path.abspath
# defined as a list of tuples where the 1st element of the tuple is the path in the client machine
# and the 2nd element is the path in the server machine.
# see module docstring for more details.
try:
PATHS_FROM_ECLIPSE_TO_PYTHON = json.loads(os.environ.get('PATHS_FROM_ECLIPSE_TO_PYTHON', '[]'))
except Exception:
sys.stderr.write('Error loading PATHS_FROM_ECLIPSE_TO_PYTHON from environment variable.\n')
traceback.print_exc()
PATHS_FROM_ECLIPSE_TO_PYTHON = []
else:
if not isinstance(PATHS_FROM_ECLIPSE_TO_PYTHON, list):
sys.stderr.write('Expected PATHS_FROM_ECLIPSE_TO_PYTHON loaded from environment variable to be a list.\n')
PATHS_FROM_ECLIPSE_TO_PYTHON = []
else:
# Converting json lists to tuple
PATHS_FROM_ECLIPSE_TO_PYTHON = [tuple(x) for x in PATHS_FROM_ECLIPSE_TO_PYTHON]
# example:
# PATHS_FROM_ECLIPSE_TO_PYTHON = [
# (r'd:\temp\temp_workspace_2\test_python\src\yyy\yyy',
# r'd:\temp\temp_workspace_2\test_python\src\hhh\xxx')
# ]
convert_to_long_pathname = lambda filename:filename
convert_to_short_pathname = lambda filename:filename
get_path_with_real_case = lambda filename:filename
if sys.platform == 'win32':
try:
import ctypes
from ctypes.wintypes import MAX_PATH, LPCWSTR, LPWSTR, DWORD
GetLongPathName = ctypes.windll.kernel32.GetLongPathNameW
GetLongPathName.argtypes = [LPCWSTR, LPWSTR, DWORD]
GetLongPathName.restype = DWORD
GetShortPathName = ctypes.windll.kernel32.GetShortPathNameW
GetShortPathName.argtypes = [LPCWSTR, LPWSTR, DWORD]
GetShortPathName.restype = DWORD
def _convert_to_long_pathname(filename):
buf = ctypes.create_unicode_buffer(MAX_PATH)
if IS_PY2 and isinstance(filename, str):
filename = filename.decode(getfilesystemencoding())
rv = GetLongPathName(filename, buf, MAX_PATH)
if rv != 0 and rv <= MAX_PATH:
filename = buf.value
if IS_PY2:
filename = filename.encode(getfilesystemencoding())
return filename
def _convert_to_short_pathname(filename):
buf = ctypes.create_unicode_buffer(MAX_PATH)
if IS_PY2 and isinstance(filename, str):
filename = filename.decode(getfilesystemencoding())
rv = GetShortPathName(filename, buf, MAX_PATH)
if rv != 0 and rv <= MAX_PATH:
filename = buf.value
if IS_PY2:
filename = filename.encode(getfilesystemencoding())
return filename
def _get_path_with_real_case(filename):
ret = convert_to_long_pathname(convert_to_short_pathname(filename))
# This doesn't handle the drive letter properly (it'll be unchanged).
# Make sure the drive letter is always uppercase.
if len(ret) > 1 and ret[1] == ':' and ret[0].islower():
return ret[0].upper() + ret[1:]
return ret
# Check that it actually works
_get_path_with_real_case(__file__)
except:
# Something didn't quite work out, leave no-op conversions in place.
if DebugInfoHolder.DEBUG_TRACE_LEVEL > 2:
traceback.print_exc()
else:
convert_to_long_pathname = _convert_to_long_pathname
convert_to_short_pathname = _convert_to_short_pathname
get_path_with_real_case = _get_path_with_real_case
elif IS_JYTHON and IS_WINDOWS:
def get_path_with_real_case(filename):
from java.io import File
f = File(filename)
ret = f.getCanonicalPath()
if IS_PY2 and not isinstance(ret, str):
return ret.encode(getfilesystemencoding())
return ret
if IS_WINDOWS:
if IS_JYTHON:
def normcase(filename):
return filename.lower()
else:
def normcase(filename):
# `normcase` doesn't lower case on Python 2 for non-English locale, but Java
# side does it, so we should do it manually.
if '~' in filename:
filename = convert_to_long_pathname(filename)
filename = _os_normcase(filename)
return filename.lower()
else:
def normcase(filename):
return filename # no-op
_ide_os = 'WINDOWS' if IS_WINDOWS else 'UNIX'
def set_ide_os(os):
'''
We need to set the IDE os because the host where the code is running may be
actually different from the client (and the point is that we want the proper
paths to translate from the client to the server).
:param os:
'UNIX' or 'WINDOWS'
'''
global _ide_os
prev = _ide_os
if os == 'WIN': # Apparently PyCharm uses 'WIN' (https://github.com/fabioz/PyDev.Debugger/issues/116)
os = 'WINDOWS'
assert os in ('WINDOWS', 'UNIX')
if prev != os:
_ide_os = os
# We need to (re)setup how the client <-> server translation works to provide proper separators.
setup_client_server_paths(_last_client_server_paths_set)
DEBUG_CLIENT_SERVER_TRANSLATION = os.environ.get('DEBUG_PYDEVD_PATHS_TRANSLATION', 'False').lower() in ('1', 'true')
# Caches filled as requested during the debug session.
NORM_PATHS_CONTAINER = {}
NORM_PATHS_AND_BASE_CONTAINER = {}
def _NormFile(filename):
abs_path, real_path = _NormPaths(filename)
return real_path
def _AbsFile(filename):
abs_path, real_path = _NormPaths(filename)
return abs_path
# Returns tuple of absolute path and real path for given filename
def _NormPaths(filename):
try:
return NORM_PATHS_CONTAINER[filename]
except KeyError:
if filename.__class__ != str:
raise AssertionError('Paths passed to _NormPaths must be str. Found: %s (%s)' % (filename, type(filename)))
abs_path = _NormPath(filename, os.path.abspath)
real_path = _NormPath(filename, rPath)
# cache it for fast access later
NORM_PATHS_CONTAINER[filename] = abs_path, real_path
return abs_path, real_path
def _NormPath(filename, normpath):
r = normpath(filename)
ind = r.find('.zip')
if ind == -1:
ind = r.find('.egg')
if ind != -1:
ind += 4
zip_path = r[:ind]
inner_path = r[ind:]
if inner_path.startswith('!'):
# Note (fabioz): although I can replicate this by creating a file ending as
# .zip! or .egg!, I don't really know what's the real-world case for this
# (still kept as it was added by @jetbrains, but it should probably be reviewed
# later on).
# Note 2: it goes hand-in-hand with 'exists'.
inner_path = inner_path[1:]
zip_path = zip_path + '!'
if inner_path.startswith('/') or inner_path.startswith('\\'):
inner_path = inner_path[1:]
if inner_path:
r = join(normcase(zip_path), inner_path)
return r
r = normcase(r)
return r
_ZIP_SEARCH_CACHE = {}
_NOT_FOUND_SENTINEL = object()
def exists(file):
if os.path.exists(file):
return file
ind = file.find('.zip')
if ind == -1:
ind = file.find('.egg')
if ind != -1:
ind += 4
zip_path = file[:ind]
inner_path = file[ind:]
if inner_path.startswith("!"):
# Note (fabioz): although I can replicate this by creating a file ending as
# .zip! or .egg!, I don't really know what's the real-world case for this
# (still kept as it was added by @jetbrains, but it should probably be reviewed
# later on).
# Note 2: it goes hand-in-hand with '_NormPath'.
inner_path = inner_path[1:]
zip_path = zip_path + '!'
zip_file_obj = _ZIP_SEARCH_CACHE.get(zip_path, _NOT_FOUND_SENTINEL)
if zip_file_obj is None:
return False
elif zip_file_obj is _NOT_FOUND_SENTINEL:
try:
import zipfile
zip_file_obj = zipfile.ZipFile(zip_path, 'r')
_ZIP_SEARCH_CACHE[zip_path] = zip_file_obj
except:
_ZIP_SEARCH_CACHE[zip_path] = _NOT_FOUND_SENTINEL
return False
try:
if inner_path.startswith('/') or inner_path.startswith('\\'):
inner_path = inner_path[1:]
_info = zip_file_obj.getinfo(inner_path.replace('\\', '/'))
return join(zip_path, inner_path)
except KeyError:
return None
return None
# Now, let's do a quick test to see if we're working with a version of python that has no problems
# related to the names generated...
try:
try:
code = rPath.func_code
except AttributeError:
code = rPath.__code__
if not exists(_NormFile(code.co_filename)):
sys.stderr.write('-------------------------------------------------------------------------------\n')
sys.stderr.write('pydev debugger: CRITICAL WARNING: This version of python seems to be incorrectly compiled (internal generated filenames are not absolute)\n')
sys.stderr.write('pydev debugger: The debugger may still function, but it will work slower and may miss breakpoints.\n')
sys.stderr.write('pydev debugger: Related bug: http://bugs.python.org/issue1666807\n')
sys.stderr.write('-------------------------------------------------------------------------------\n')
sys.stderr.flush()
NORM_SEARCH_CACHE = {}
initial_norm_paths = _NormPaths
def _NormPaths(filename): # Let's redefine _NormPaths to work with paths that may be incorrect
try:
return NORM_SEARCH_CACHE[filename]
except KeyError:
abs_path, real_path = initial_norm_paths(filename)
if not exists(real_path):
# We must actually go on and check if we can find it as if it was a relative path for some of the paths in the pythonpath
for path in sys.path:
abs_path, real_path = initial_norm_paths(join(path, filename))
if exists(real_path):
break
else:
sys.stderr.write('pydev debugger: Unable to find real location for: %s\n' % (filename,))
abs_path = filename
real_path = filename
NORM_SEARCH_CACHE[filename] = abs_path, real_path
return abs_path, real_path
except:
# Don't fail if there's something not correct here -- but at least print it to the user so that we can correct that
traceback.print_exc()
# Note: as these functions may be rebound, users should always import
# pydevd_file_utils and then use:
#
# pydevd_file_utils.norm_file_to_client
# pydevd_file_utils.norm_file_to_server
#
# instead of importing any of those names to a given scope.
def _original_file_to_client(filename, cache={}):
try:
return cache[filename]
except KeyError:
cache[filename] = get_path_with_real_case(_AbsFile(filename))
return cache[filename]
_original_file_to_server = _NormFile
norm_file_to_client = _original_file_to_client
norm_file_to_server = _original_file_to_server
def _fix_path(path, sep):
if path.endswith('/') or path.endswith('\\'):
path = path[:-1]
if sep != '/':
path = path.replace('/', sep)
return path
_last_client_server_paths_set = []
def setup_client_server_paths(paths):
'''paths is the same format as PATHS_FROM_ECLIPSE_TO_PYTHON'''
global norm_file_to_client
global norm_file_to_server
global _last_client_server_paths_set
_last_client_server_paths_set = paths[:]
# Work on the client and server slashes.
python_sep = '\\' if IS_WINDOWS else '/'
eclipse_sep = '\\' if _ide_os == 'WINDOWS' else '/'
norm_filename_to_server_container = {}
norm_filename_to_client_container = {}
initial_paths = list(paths)
paths_from_eclipse_to_python = initial_paths[:]
# Apply normcase to the existing paths to follow the os preferences.
for i, (path0, path1) in enumerate(paths_from_eclipse_to_python[:]):
if IS_PY2:
if isinstance(path0, unicode):
path0 = path0.encode(sys.getfilesystemencoding())
if isinstance(path1, unicode):
path1 = path1.encode(sys.getfilesystemencoding())
path0 = _fix_path(path0, eclipse_sep)
path1 = _fix_path(path1, python_sep)
initial_paths[i] = (path0, path1)
paths_from_eclipse_to_python[i] = (normcase(path0), normcase(path1))
if not paths_from_eclipse_to_python:
# no translation step needed (just inline the calls)
norm_file_to_client = _original_file_to_client
norm_file_to_server = _original_file_to_server
return
# only setup translation functions if absolutely needed!
def _norm_file_to_server(filename, cache=norm_filename_to_server_container):
# Eclipse will send the passed filename to be translated to the python process
# So, this would be 'NormFileFromEclipseToPython'
try:
return cache[filename]
except KeyError:
if eclipse_sep != python_sep:
# Make sure that the separators are what we expect from the IDE.
filename = filename.replace(python_sep, eclipse_sep)
# used to translate a path from the client to the debug server
translated = normcase(filename)
for eclipse_prefix, server_prefix in paths_from_eclipse_to_python:
if translated.startswith(eclipse_prefix):
if DEBUG_CLIENT_SERVER_TRANSLATION:
sys.stderr.write('pydev debugger: replacing to server: %s\n' % (translated,))
translated = translated.replace(eclipse_prefix, server_prefix)
if DEBUG_CLIENT_SERVER_TRANSLATION:
sys.stderr.write('pydev debugger: sent to server: %s\n' % (translated,))
break
else:
if DEBUG_CLIENT_SERVER_TRANSLATION:
sys.stderr.write('pydev debugger: to server: unable to find matching prefix for: %s in %s\n' % \
(translated, [x[0] for x in paths_from_eclipse_to_python]))
# Note that when going to the server, we do the replace first and only later do the norm file.
if eclipse_sep != python_sep:
translated = translated.replace(eclipse_sep, python_sep)
translated = _NormFile(translated)
cache[filename] = translated
return translated
def _norm_file_to_client(filename, cache=norm_filename_to_client_container):
# The result of this method will be passed to eclipse
# So, this would be 'NormFileFromPythonToEclipse'
try:
return cache[filename]
except KeyError:
# used to translate a path from the debug server to the client
translated = _NormFile(filename)
# After getting the real path, let's get it with the path with
# the real case and then obtain a new normalized copy, just in case
# the path is different now.
translated_proper_case = get_path_with_real_case(translated)
translated = _NormFile(translated_proper_case)
if IS_WINDOWS:
if translated.lower() != translated_proper_case.lower():
translated_proper_case = translated
if DEBUG_CLIENT_SERVER_TRANSLATION:
sys.stderr.write(
'pydev debugger: _NormFile changed path (from: %s to %s)\n' % (
translated_proper_case, translated))
for i, (eclipse_prefix, python_prefix) in enumerate(paths_from_eclipse_to_python):
if translated.startswith(python_prefix):
if DEBUG_CLIENT_SERVER_TRANSLATION:
sys.stderr.write('pydev debugger: replacing to client: %s\n' % (translated,))
# Note: use the non-normalized version.
eclipse_prefix = initial_paths[i][0]
translated = eclipse_prefix + translated_proper_case[len(python_prefix):]
if DEBUG_CLIENT_SERVER_TRANSLATION:
sys.stderr.write('pydev debugger: sent to client: %s\n' % (translated,))
break
else:
if DEBUG_CLIENT_SERVER_TRANSLATION:
sys.stderr.write('pydev debugger: to client: unable to find matching prefix for: %s in %s\n' % \
(translated, [x[1] for x in paths_from_eclipse_to_python]))
translated = translated_proper_case
if eclipse_sep != python_sep:
translated = translated.replace(python_sep, eclipse_sep)
# The resulting path is not in the python process, so, we cannot do a _NormFile here,
# only at the beginning of this method.
cache[filename] = translated
return translated
norm_file_to_server = _norm_file_to_server
norm_file_to_client = _norm_file_to_client
setup_client_server_paths(PATHS_FROM_ECLIPSE_TO_PYTHON)
def _is_int(filename):
# isdigit() doesn't support negative numbers
try:
int(filename)
return True
except:
return False
def is_real_file(filename):
# Check for Jupyter cells
return not _is_int(filename) and not filename.startswith("<ipython-input")
# For given file f returns tuple of its absolute path, real path and base name
def get_abs_path_real_path_and_base_from_file(f):
try:
return NORM_PATHS_AND_BASE_CONTAINER[f]
except:
if _NormPaths is None: # Interpreter shutdown
return f
if f is not None:
if f.endswith('.pyc'):
f = f[:-1]
elif f.endswith('$py.class'):
f = f[:-len('$py.class')] + '.py'
if not is_real_file(f):
abs_path, real_path, base = f, f, f
else:
abs_path, real_path = _NormPaths(f)
base = basename(real_path)
ret = abs_path, real_path, base
NORM_PATHS_AND_BASE_CONTAINER[f] = ret
return ret
def get_abs_path_real_path_and_base_from_frame(frame):
try:
return NORM_PATHS_AND_BASE_CONTAINER[frame.f_code.co_filename]
except:
# This one is just internal (so, does not need any kind of client-server translation)
f = frame.f_code.co_filename
if f is not None and f.startswith (('build/bdist.', 'build\\bdist.')):
# files from eggs in Python 2.7 have paths like build/bdist.linux-x86_64/egg/<path-inside-egg>
f = frame.f_globals['__file__']
if get_abs_path_real_path_and_base_from_file is None: # Interpreter shutdown
return f
ret = get_abs_path_real_path_and_base_from_file(f)
# Also cache based on the frame.f_code.co_filename (if we had it inside build/bdist it can make a difference).
NORM_PATHS_AND_BASE_CONTAINER[frame.f_code.co_filename] = ret
return ret
def get_fullname(mod_name):
if IS_PY3K:
import pkgutil
else:
from _pydev_imps import _pydev_pkgutil_old as pkgutil
try:
loader = pkgutil.get_loader(mod_name)
except:
return None
if loader is not None:
for attr in ("get_filename", "_get_filename"):
meth = getattr(loader, attr, None)
if meth is not None:
return meth(mod_name)
return None
def get_package_dir(mod_name):
for path in sys.path:
mod_path = join(path, mod_name.replace('.', '/'))
if os.path.isdir(mod_path):
return mod_path
return None
| StarcoderdataPython |
1704576 | <reponame>annacarbery/VS_ECFP
from sklearn.tree import DecisionTreeClassifier
import json
import numpy as np
from sklearn.metrics import plot_confusion_matrix
import matplotlib.pyplot as plt
def make_input(list1, list2):
X = []
y = []
for l in [list1, list2]:
for ECFPs in l:
vec = [0]*2048
for v in range(len(vec)):
for ECFP in ECFPs:
if ECFP[v] > 0:
vec[v] += 1
X.append(vec)
if l == list1:
y.append(1)
else:
y.append(0)
return X, y
def make_long_input(list1, list2):
X = []
y = []
for l in [list1, list2]:
for ECFPs in l:
full = []
for ECFP in ECFPs[:3]:
full += ECFP
X.append(full)
if l == list1:
y.append(1)
else:
y.append(0)
return X, y
X = json.load(open('pairs_study/x_train.json', 'r'))
y = json.load(open('pairs_study/y_train.json', 'r'))
X_test = json.load(open('pairs_study/x_test.json', 'r'))
y_test = json.load(open('pairs_study/y_test.json', 'r'))
print(len(X), len(X_test))
print(y.count(1), y_test.count(1))
# scaler = StandardScaler()
# scaler.fit(X)
# X = scaler.transform(X)
# X_test = scaler.transform(X_test)
clf = DecisionTreeClassifier(random_state=0)
clf.fit(X, y)
res = clf.predict(X_test)
plot_confusion_matrix(clf, X_test, y_test)
plt.savefig('confusion.png')
| StarcoderdataPython |
2656 | <filename>DistributedRL/Gateway/build/Code/sim/Parser/LAI/GreenIndex.py<gh_stars>1-10
import argparse
from PIL import Image, ImageStat
import math
parser = argparse.ArgumentParser()
parser.add_argument('fname')
parser.add_argument('pref', default="", nargs="?")
args = parser.parse_args()
im = Image.open(args.fname)
RGB = im.convert('RGB')
imWidth, imHeight = im.size
ratg = 1.2
ratgb = 1.66
ming = 10
ratr = 2
speed = 8
leafcount = 0
total = 0
for i in range(0, int(imWidth/speed)):
for j in range(0, int(imHeight/speed)):
R,G,B = RGB.getpixel((i*speed,j*speed))
if R*ratg < G and B*ratgb < G and B*ratr < R:
leafcount = leafcount + 1
total = total+1
print("LAI="+str(float(leafcount)/total))
| StarcoderdataPython |
1746730 | # Given a binary search tree with non-negative values,
# find the minimum absolute difference between values of any two nodes.
# Example:
# Input:
# 1
# \
# 3
# /
# 2
# Output:
# 1
# Explanation:
# The minimum absolute difference is 1,
# which is the difference between 2 and 1 (or between 2 and 3).
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
# 二叉搜索树的最小绝对差
# 由于BST的左<根<右,中序遍历结果即为有序,转化为在有序树组中求最小差值
def getMinimumDifference(self, root: TreeNode) -> int:
ret = []
self.helper(ret, root)
m = ret[-1] - ret[0]
last = None
for i in ret:
if last is None:
last = i
else:
if m is None or i - last < m:
m = i - last
last = i
return m
def helper(self, ret, root):
if root:
self.helper(ret, root.left)
ret.append(root.val)
self.helper(ret, root.right)
| StarcoderdataPython |
3261180 | <reponame>SMAPPNYU/smappdragon
import os
import pymongo
import unittest
from test.config import config
from smappdragon import MongoCollection
from smappdragon.tools.tweet_parser import TweetParser
class TestMongoCollection(unittest.TestCase):
def test_iterator_returns_tweets(self):
collection = MongoCollection( \
config['mongo']['host'], \
config['mongo']['port'], \
config['mongo']['user'], \
config['mongo']['password'], \
config['mongo']['database'], \
config['mongo']['collection'] \
)
self.assertTrue(len(list(collection.set_limit(10).get_iterator())) > 0)
# special test because custom logic is different on mongo
def test_mongo_collection_custom_filter_filters(self):
collectionone = MongoCollection(
config['mongo']['host'],
config['mongo']['port'],
config['mongo']['user'],
config['mongo']['password'],
config['mongo']['database'],
config['mongo']['collection']
)
full_collection_len = len(list(collectionone.set_limit(10).get_iterator()))
def is_tweet_a_retweet(tweet):
if 'retweeted' in tweet and tweet['retweeted']:
return True
else:
return False
num_retweets = len(list(collectionone.set_limit(10).set_custom_filter(is_tweet_a_retweet).get_iterator()))
collectiontwo = MongoCollection(
config['mongo']['host'],
config['mongo']['port'],
config['mongo']['user'],
config['mongo']['password'],
config['mongo']['database'],
config['mongo']['collection']
)
def is_not_a_retweet(tweet):
if 'retweeted' in tweet and tweet['retweeted']:
return False
else:
return True
num_non_retweets = len(list(collectiontwo.set_limit(10).set_custom_filter(is_not_a_retweet).get_iterator()))
#the number of retweets and non retweets should add up to the whole collection
self.assertEqual(num_retweets + num_non_retweets, full_collection_len)
def test_strip_tweets_keeps_fields(self):
tweet_parser = TweetParser()
collection = MongoCollection(
config['mongo']['host'],
config['mongo']['port'],
config['mongo']['user'],
config['mongo']['password'],
config['mongo']['database'],
config['mongo']['collection']
)
self.maxDiff = None
it = collection.set_limit(10).strip_tweets(['id', 'entities.user_mentions', 'user.profile_image_url_https']).get_iterator()
def tweets_have_right_keys(iterator, fields):
for tweet in iterator:
keys = [key for key,value in tweet_parser.flatten_dict(tweet)]
for elem in fields:
if elem not in keys:
return False
return True
self.assertTrue(tweets_have_right_keys(it, [['id'], ['entities', 'user_mentions'], ['user', 'profile_image_url_https']]))
def test_pass_in_mongo(self):
mongo_to_pass = pymongo.MongoClient(config['mongo']['host'], int(config['mongo']['port']))
collection = MongoCollection(
config['mongo']['user'],
config['mongo']['password'],
config['mongo']['database'],
config['mongo']['collection'],
passed_mongo=mongo_to_pass
)
self.assertTrue(len(list(collection.set_limit(10).get_iterator())) > 0)
if __name__ == '__main__':
unittest.main()
'''
author @yvan
'''
| StarcoderdataPython |
1643181 | """
watches the remote status file (on s3) and updates a local status file
infinite loop like a deamon
if you want sms control this must be running
"""
from __future__ import print_function
from time import sleep
import urllib2
print('ok watching https://s3.amazonaws.com/blackcatsensor/status')
while True:
try:
status = urllib2.urlopen('https://s3.amazonaws.com/blackcatsensor/status').readlines()[0]
f = open('/home/pi/blackcat/status.txt','w')
print(status, file=f),
f.close()
except urllib2.URLError:
pass
sleep(60) # check every minute
| StarcoderdataPython |
4815624 | <reponame>jaimiles23/Multiplication_Medley<filename>2_interaction_model/sample_utterance_generation/UserNameIntent_utterances.py
"""/**
* @author [<NAME>]
* @email [<EMAIL>]
* @create date 2020-05-06 10:59:24
* @modify date 2020-05-06 10:59:24
* @desc [
Script to generate sample utterances for UserNameIntent.
]
*/
"""
##########
# Slots
##########
slots = (
r"{first_name}",
r"{us_first_name}",
r"{gb_first_name}",
)
utterance_format = (
"{}",
"My name is {}",
"Call me {}",
"I'm {}",
)
##########
# Print utterances
##########
for utterance in utterance_format:
for slot in slots:
print(utterance.format(slot))
| StarcoderdataPython |
78919 | import json
from topojson.core.extract import Extract
from shapely import geometry
import geopandas
import geojson
# extract copies coordinates sequentially into a buffer
def test_extract_linestring():
data = {
"foo": {"type": "LineString", "coordinates": [[0, 0], [1, 0], [2, 0]]},
"bar": {"type": "LineString", "coordinates": [[0, 0], [1, 0], [2, 0]]},
}
topo = Extract(data).to_dict()
assert len(topo["linestrings"]) == 2
# assess if a multipolygon with hole is processed into the right number of rings
def test_extract_multipolygon():
# multipolygon with hole
data = {
"foo": {
"type": "MultiPolygon",
"coordinates": [
[
[[0, 0], [20, 0], [10, 20], [0, 0]], # CCW
[[3, 2], [10, 16], [17, 2], [3, 2]], # CW
],
[[[6, 4], [14, 4], [10, 12], [6, 4]]], # CCW
[[[25, 5], [30, 10], [35, 5], [25, 5]]],
],
}
}
topo = Extract(data).to_dict()
assert len(topo["bookkeeping_geoms"]) == 3
assert len(topo["linestrings"]) == 4
# a LineString without coordinates is an empty polygon geometry
def test_extract_empty_linestring():
data = {"empty_ls": {"type": "LineString", "coordinates": None}}
topo = Extract(data).to_dict()
assert topo["objects"]["empty_ls"]["arcs"] == None
# invalid polygon geometry
def test_extract_invalid_polygon():
data = {
"wrong": {"type": "Polygon", "coordinates": [[[0, 0], [1, 0], [2, 0], [0, 0]]]},
"valid": {"type": "Polygon", "coordinates": [[[0, 0], [2, 0], [1, 1], [0, 0]]]},
}
topo = Extract(data).to_dict()
assert len(topo["linestrings"]) == 1
# test multiliinestring
def test_extract_multilinestring():
data = {
"foo": {
"type": "MultiLineString",
"coordinates": [
[[0.0, 0.0], [1, 1], [3, 3]],
[[1, 1], [0, 1]],
[[3, 3], [4, 4], [0, 1]],
],
}
}
topo = Extract(data).to_dict()
assert len(topo["linestrings"]) == 3
# test nested geojosn geometrycollection collection
def test_extract_nested_geometrycollection():
data = {
"foo": {
"type": "GeometryCollection",
"geometries": [
{
"type": "GeometryCollection",
"geometries": [
{"type": "LineString", "coordinates": [[0.1, 0.2], [0.3, 0.4]]}
],
},
{
"type": "Polygon",
"coordinates": [[[0.5, 0.6], [0.7, 0.8], [0.9, 1.0]]],
},
],
}
}
topo = Extract(data).to_dict()
assert len(topo["objects"]["foo"]["geometries"][0]["geometries"][0]["arcs"]) == 1
# test geometry collection + polygon
def test_extract_geometrycollection_polygon():
data = {
"bar": {"type": "Polygon", "coordinates": [[[0, 0], [1, 1], [2, 0]]]},
"foo": {
"type": "GeometryCollection",
"geometries": [
{"type": "LineString", "coordinates": [[0.1, 0.2], [0.3, 0.4]]}
],
},
}
topo = Extract(data).to_dict()
assert len(topo["linestrings"]) == 2
# test feature type
def test_extract_features():
data = {
"foo": {
"type": "Feature",
"geometry": {"type": "LineString", "coordinates": [[0.1, 0.2], [0.3, 0.4]]},
},
"bar": {
"type": "Feature",
"geometry": {
"type": "Polygon",
"coordinates": [[[0.5, 0.6], [0.7, 0.8], [0.9, 1.0]]],
},
},
}
topo = Extract(data).to_dict()
assert len(topo["linestrings"]) == 2
# test feature collection including geometry collection
def test_extract_featurecollection():
data = {
"collection": {
"type": "FeatureCollection",
"features": [
{
"type": "Feature",
"geometry": {
"type": "LineString",
"coordinates": [[0.1, 0.2], [0.3, 0.4]],
},
},
{
"type": "Feature",
"geometry": {
"type": "GeometryCollection",
"geometries": [
{
"type": "Polygon",
"coordinates": [[[0.5, 0.6], [0.7, 0.8], [0.9, 1.0]]],
}
],
},
},
],
}
}
topo = Extract(data).to_dict()
assert len(topo["objects"]) == 2
assert len(topo["bookkeeping_geoms"]) == 2
assert len(topo["linestrings"]) == 2
assert topo["objects"]["feature_0"]["geometries"][0]["type"] == "LineString"
assert (
topo["objects"]["feature_1"]["geometries"][0]["geometries"][0]["type"]
== "Polygon"
)
# test to parse feature collection from a geojson file through geojson library
def test_extract_geojson_feat_col_geom_col():
with open("tests/files_geojson/feature_collection.geojson") as f:
data = geojson.load(f)
topo = Extract(data).to_dict()
assert len(topo["objects"]) == 1
assert len(topo["bookkeeping_geoms"]) == 3
assert len(topo["linestrings"]) == 3
# test to parse a feature from a geojson file through geojson library
def test_extract_geojson_feature_geom_col():
with open("tests/files_geojson/feature.geojson") as f:
data = geojson.load(f)
topo = Extract(data).to_dict()
assert len(topo["objects"]) == 1
assert len(topo["bookkeeping_geoms"]) == 3
assert len(topo["linestrings"]) == 3
# test feature collection including geometry collection
def test_extract_geopandas_geoseries():
data = geopandas.GeoSeries(
[
geometry.Polygon([(0, 0), (1, 0), (1, 1)]),
geometry.Polygon([(0, 0), (1, 0), (1, 1), (0, 1)]),
geometry.Polygon([(2, 0), (3, 0), (3, 1), (2, 1)]),
]
)
topo = Extract(data).to_dict()
assert len(topo["objects"]) == 3
assert len(topo["bookkeeping_geoms"]) == 3
assert len(topo["linestrings"]) == 3
# TEST FAILS because of https://github.com/geopandas/geopandas/issues/1070
# test shapely geometry collection.
def test_extract_shapely_geometrycollection():
data = geometry.GeometryCollection(
[
geometry.Polygon([[0, 0], [1, 0], [1, 1], [0, 1], [0, 0]]),
geometry.Polygon([[1, 0], [2, 0], [2, 1], [1, 1], [1, 0]]),
]
)
topo = Extract(data).to_dict()
assert len(topo["objects"]) == 1
assert len(topo["bookkeeping_geoms"]) == 2
assert len(topo["linestrings"]) == 2
def test_extract_geo_interface_from_list():
data = [
{"type": "LineString", "coordinates": [[0, 0], [1, 0], [2, 0]]},
{"type": "LineString", "coordinates": [[0, 0], [1, 0], [2, 0]]},
]
topo = Extract(data).to_dict()
assert len(topo["linestrings"]) == 2
def test_extract_shapely_geo_interface_from_list():
data = [
geometry.Polygon([[0, 0], [1, 0], [1, 1], [0, 1], [0, 0]]),
geometry.Polygon([[1, 0], [2, 0], [2, 1], [1, 1], [1, 0]]),
]
topo = Extract(data).to_dict()
assert len(topo["linestrings"]) == 2
assert isinstance(topo["objects"][0], dict)
# duplicate rotated geometry bar with hole interior in geometry foo
def test_extract_geomcol_multipolygon_polygon():
data = {
"foo": {
"type": "GeometryCollection",
"geometries": [
{
"type": "MultiPolygon",
"coordinates": [
[
[[10, 20], [20, 0], [0, 0], [3, 13], [10, 20]],
[[3, 2], [10, 16], [17, 2], [3, 2]],
],
[[[10, 4], [14, 4], [10, 12], [10, 4]]],
],
},
{
"type": "Polygon",
"coordinates": [[[20, 0], [35, 5], [10, 20], [20, 0]]],
},
],
}
}
topo = Extract(data).to_dict()
assert len(topo["linestrings"]) == 4
def test_extract_geo_interface_shapefile():
import shapefile
data = shapefile.Reader("tests/files_shapefile/southamerica.shp")
topo = Extract(data).to_dict()
assert len(topo["linestrings"]) == 15
def test_extract_points():
data = [
{"type": "Polygon", "coordinates": [[[0, 0], [1, 0], [1, 1], [0, 1], [0, 0]]]},
{"type": "Point", "coordinates": [0.5, 0.5]},
]
topo = Extract(data).to_dict()
assert len(topo["bookkeeping_coords"]) == 1
assert len(topo["bookkeeping_geoms"]) == 1
assert topo["coordinates"][0].wkt == "POINT (0.5 0.5)"
assert "coordinates" in topo["objects"][1].keys()
def test_extract_single_polygon():
data = geometry.Polygon([[0, 0], [1, 0], [1, 1], [0, 1], [0, 0]])
topo = Extract(data).to_dict()
assert len(topo["bookkeeping_geoms"]) == 1
def test_extract_single_linestring():
data = geometry.LineString([[0, 0], [1, 0], [1, 1], [0, 1]])
topo = Extract(data).to_dict()
assert len(topo["bookkeeping_geoms"]) == 1
def test_extract_single_multilinestring():
data = geometry.MultiLineString([[[0, 0], [1, 1]], [[-1, 0], [1, 0]]])
topo = Extract(data).to_dict()
assert len(topo["bookkeeping_geoms"]) == 2
def test_extract_single_multilinestring_list():
data = [geometry.MultiLineString([[[0, 0], [1, 1]], [[-1, 0], [1, 0]]])]
topo = Extract(data).to_dict()
assert len(topo["bookkeeping_geoms"]) == 2
def test_extract_geopandas_geodataframe():
data = geopandas.read_file(
"tests/files_geojson/naturalearth_alb_grc.geojson", driver="GeoJSON"
)
topo = Extract(data).to_dict()
assert len(topo["bookkeeping_geoms"]) == 3
# dict shoud have a valued key:geom_object. Otherwise key:value is removed
def test_extract_invalid_dict_item():
data = {
"type": "MultiPolygon",
"coordinates": [[[0, 0], [1, 0], [1, 1], [0, 1], [0, 0]]],
}
topo = Extract(data).to_dict()
assert len(topo["bookkeeping_geoms"]) == 0
| StarcoderdataPython |
1720611 | <reponame>craigatron/parse-tle<gh_stars>0
from setuptools import setup
setup(
name='parsetle',
version='0.1',
description='Parses two-line element set files',
url='https://github.com/craigatron/parse-tle',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
packages=['parsetle'],
zip_safe=False
) | StarcoderdataPython |
1653628 | class Solution:
# @param {integer[]} nums
# @param {integer} val
# @return {integer}
def removeElement(self, nums, val):
count = 0
idx = 0
while idx < len(nums) - count:
if nums[idx] == val:
count += 1
nums[idx], nums[-count] = nums[-count], nums[idx]
else:
idx += 1
return len(nums) - count
| StarcoderdataPython |
3393373 | <reponame>gdikov/MNIST_Challenge
from models.model import AbstractModel
from models.nn.layers import *
from numerics.softmax import softmax
import config as cfg
import os
import cPickle
import numpy as np
# from utils.vizualiser import plot_filters
class ConvolutionalNeuralNetwork(AbstractModel):
def __init__(self, convolution_mode='scipy'):
super(ConvolutionalNeuralNetwork, self).__init__('ConvNet')
self.batch_size = cfg.batch_size
if convolution_mode in ['scipy', 'naive']:
self.conv_mode = convolution_mode
else:
raise ValueError
self._build_network()
self.train_history = {'train_loss': [],
'val_acc': []}
def _build_network(self):
"""
Build a modified version of LeNet
:return:
"""
inp_layer = Input()
filter_size = 5
conv1 = Conv(incoming=inp_layer,
conv_params={'stride': 1, 'pad': (filter_size - 1) / 2, 'filter_size': filter_size},
num_filters=20,
conv_mode=self.conv_mode)
relu1 = ReLU(incoming=conv1)
pool1 = Pool(incoming=relu1,
pool_params={'pool_height': 2, 'pool_width': 2, 'stride': 2})
conv2 = Conv(incoming=pool1,
conv_params={'stride': 1, 'pad': (filter_size - 1) / 2, 'filter_size': filter_size},
num_filters=50,
conv_mode=self.conv_mode)
relu2 = ReLU(incoming=conv2)
pool2 = Pool(incoming=relu2, pool_params={'pool_height': 2, 'pool_width': 2, 'stride': 2})
linear1 = Linear(incoming=pool2, num_units=500)
lrelu1 = ReLU(incoming=linear1)
dropout1 = Dropout(incoming=lrelu1, p=0.5)
out_layer = Linear(incoming=dropout1, num_units=10)
self.layers = (inp_layer,
conv1, relu1, pool1,
conv2, relu2, pool2,
linear1, lrelu1,
dropout1,
out_layer)
def save_trainable_params(self):
path_to_params = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'pretrained')
if not os.path.exists(path_to_params):
os.makedirs(path_to_params)
for layer_id, layer in enumerate(self.layers):
if layer.params is not None:
with open(os.path.join(path_to_params, 'layer_{0}.npy'.format(layer_id)), 'wb') as f:
cPickle.dump(layer.params, f)
def load_trainable_params(self):
path_to_params = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'pretrained')
if not os.path.exists(path_to_params):
os.makedirs(path_to_params)
print("Path to pre-computed parameters not found at: {}".format(path_to_params))
raise IOError
for layer_id, layer in enumerate(self.layers):
if layer.params is not None:
print("\tLoading pre-computed parameters for layer {0}".format(layer_id))
with open(os.path.join(path_to_params, 'layer_{0}.npy'.format(layer_id)), 'rb') as f:
layer.params = cPickle.load(f)
def _compute_forward_pass(self, inp_data_batch, mode):
out_data_batch = self.layers[0].forward(inp_data_batch)
for layer_id in xrange(1, len(self.layers)):
out_data_batch = self.layers[layer_id].forward(out_data_batch, mode=mode)
return out_data_batch
def _compute_backward_pass(self, end_derivatives):
# update the last layer manually
upstream_derivatives = self.layers[-1].backward(end_derivatives)
self.layers[-1].update_trainable_params()
for layer_id in xrange(len(self.layers)-2, 0, -1):
upstream_derivatives = self.layers[layer_id].backward(upstream_derivatives)
self.layers[layer_id].update_trainable_params()
return upstream_derivatives
def _compute_loss(self, scores, targets):
num_train = scores.shape[0]
probabilities = softmax(scores)
loss = -np.sum(np.log(probabilities[np.arange(num_train), targets])) / num_train
probabilities[np.arange(num_train), targets] -= 1
dsoftmax = probabilities / num_train
return loss, dsoftmax
def _batch_idx(self, data_size, shuffle=True):
if shuffle:
# maybe this is unnecessary because they are already shuffled
# but it doesn't harm much to do it again
shuffled_order = np.random.permutation(np.arange(data_size))
else:
shuffled_order = np.arange(data_size)
for x in np.array_split(shuffled_order, data_size // self.batch_size):
yield x
def fit(self, train_data, **kwargs):
# reshape the input so that a channel dimension is added
self.data = train_data
# reshape the images into row vectors of 28*28 elements
num_samples, dim_x, dim_y = self.data['x_train'].shape
self.data['x_train'] = self.data['x_train'].reshape(num_samples, dim_x * dim_y)
num_epochs = kwargs.get('num_epochs', 100)
best_val_acc = 0.0
for i in xrange(num_epochs):
epoch_losses = []
for idx in self._batch_idx(num_samples):
scores = self._compute_forward_pass(self.data['x_train'][idx], mode='train')
loss, dscores = self._compute_loss(scores, self.data['y_train'][idx])
self._compute_backward_pass(dscores)
self.train_history['train_loss'].append(loss)
epoch_losses.append(loss)
print("\t\tMinibatch train loss: {}".format(loss))
# validate
val_predictions = self.predict(self.data['x_val'])
val_acc = np.sum(val_predictions == self.data['y_val']) / float(val_predictions.shape[0]) * 100.
self.train_history['val_acc'].append(val_acc)
if val_acc > best_val_acc:
print("\t\tSaving weights")
self.save_trainable_params()
best_val_acc = val_acc
print("\t\tEpoch: {0}, mean loss: {1}, validation accuracy: {2}".format(i, np.mean(epoch_losses), val_acc))
def predict(self, new_data, **kwargs):
# reshape the input so that a channel dimension is added
# reshape the images into row vectors of 28*28 elements
num_samples, dim_x, dim_y = new_data.shape
new_data = new_data.reshape(num_samples, dim_x * dim_y)
scores_all = []
for i, idx in enumerate(self._batch_idx(num_samples, shuffle=False)):
scores = self._compute_forward_pass(new_data[idx], mode='test')
scores_all.append(scores)
scores_all = np.concatenate(scores_all)
return np.argmax(scores_all, axis=1)
if __name__ == "__main__":
from utils.data_utils import load_MNIST
data_train, data_test = load_MNIST()
model = ConvolutionalNeuralNetwork()
model.load_trainable_params()
# plot_filters(model.layers[1].params['W'], plot_shape=(2,10), channel=1)
# model.fit(data, num_epochs=100)
predictions = model.predict(data_train['x_val'])
test_acc = np.sum(predictions == data_train['y_val']) / float(predictions.shape[0]) * 100.
print("Validation accuracy: {0}"
.format(test_acc))
#
# miscalssified_idx = predictions != data['y_val'][:100]
# from utils.vizualiser import plot_digits
# #
# plot_digits(data['x_val'][:100][miscalssified_idx][:64], predictions[miscalssified_idx][:64], plot_shape=(8, 8)) | StarcoderdataPython |
3300107 | <gh_stars>10-100
#! /usr/bin/env python3
###
# KINOVA (R) KORTEX (TM)
#
# Copyright (c) 2018 Kinova inc. All rights reserved.
#
# This software may be modified and distributed
# under the terms of the BSD 3-Clause license.
#
# Refer to the LICENSE file for details.
#
###
import sys
import os
import time
from kortex_api.autogen.client_stubs.DeviceConfigClientRpc import DeviceConfigClient
from kortex_api.autogen.client_stubs.BaseClientRpc import BaseClient
from kortex_api.autogen.messages import DeviceConfig_pb2, Session_pb2, Base_pb2
from kortex_api.Exceptions.KException import KException
from google.protobuf import json_format
def example_notification(base):
def notification_callback(data):
print("****************************")
print("* Callback function called *")
print(json_format.MessageToJson(data))
print("****************************")
# Subscribe to ConfigurationChange notifications
print("Subscribing to ConfigurationChange notifications")
try:
notif_handle = base.OnNotificationConfigurationChangeTopic(notification_callback, Base_pb2.NotificationOptions())
except KException as k_ex:
print("Error occured: {}".format(k_ex))
except Exception:
print("Error occured")
# ... miscellaneous tasks
time.sleep(3)
# Create a user profile to trigger a notification
full_user_profile = Base_pb2.FullUserProfile()
full_user_profile.user_profile.username = 'jcash'
full_user_profile.user_profile.firstname = 'Johnny'
full_user_profile.user_profile.lastname = 'Cash'
full_user_profile.user_profile.application_data = "Custom Application Stuff"
full_user_profile.password = "<PASSWORD>"
user_profile_handle = Base_pb2.UserProfileHandle()
try:
print("Creating user profile to trigger notification")
user_profile_handle = base.CreateUserProfile(full_user_profile)
except KException:
print("User profile creation failed")
# Following the creation of the user profile, we should receive the ConfigurationChange notification (notification_callback() should be called)
print("User {0} created".format(full_user_profile.user_profile.username))
# Give time for the notification to arrive
time.sleep(3)
print("Now unsubscribing from ConfigurationChange notifications")
base.Unsubscribe(notif_handle)
try:
print("Deleting previously created user profile ({0})".format(full_user_profile.user_profile.username))
base.DeleteUserProfile(user_profile_handle) # Should not have received notification about this modification
except KException:
print("User profile deletion failed")
# Sleep to confirm that ConfigurationChange notification is not raised anymore after the unsubscribe
time.sleep(3)
def main():
# Import the utilities helper module
sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
import utilities
# Parse arguments
args = utilities.parseConnectionArguments()
# Create connection to the device and get the router
with utilities.DeviceConnection.createTcpConnection(args) as router:
# Create required services
base = BaseClient(router)
# Example core
example_notification(base)
if __name__ == "__main__":
main()
| StarcoderdataPython |
188864 | """
Author: ArchieYoung <<EMAIL>>
Time: Thu Jul 5 09:24:07 CST 2018
"""
import sys
import argparse
import os
from multiprocessing import Pool
from glob import iglob
from sv_vcf import SV
def vcf_to_db_bed(_args):
vcf, min_support_reads, out_dir, sv_id_prefix = _args
with open(vcf, "r") as io:
lines = io.readlines()
# chromosomes to be kept
main_chr = ([str(i) for i in range(1, 23)] + ["X", "Y", "MT", "M", "chrX", "chrY",
"chrM", "chrMT"] + ["chr" + str(i) for i in range(1, 23)])
# output bedlines
bed_lines = []
# check if id is unique
id_dict = {}
#chrom1,pos1,chrom2,pos2
previous_sv_breakpoint = ["NA", "NA", "NA", "NA"]
for line in lines:
#skip comment lines
if line.strip()[0] == "#":
continue
sv = SV(line)
# filter
if sv.chrom1 not in main_chr or sv.chrom2 not in main_chr:
continue
if int(sv.re) < min_support_reads:
continue
# remove 'chr' in chromosome id
sv.chrom1 = sv.chrom1.replace("chr", "")
sv.chrom2 = sv.chrom2.replace("chr", "")
# rename sv id
if sv_id_prefix:
sv.id = "_".join([sv_id_prefix, sv.id])
if sv.id not in id_dict:
id_dict[sv.id] = 1
else:
raise RuntimeError("Duplicated SV ID in you VCF "
"file {}".format(sv.id))
sv_breakpoint = [sv.chrom1, sv.pos1, sv.chrom2, sv.pos2]
# remove duplicate adjacency BND record in picky vcf
# Exactly the same
if ((sv.svtype == "TRA" or sv.svtype == "INV") and
sv_breakpoint[:4] == previous_sv_breakpoint[:4]):
continue
# just swap breakpoint1 and breakpoint2, still the same
if ((sv.svtype == "TRA" or sv.svtype == "INV") and
sv_breakpoint[:2] == previous_sv_breakpoint[2:] and
sv_breakpoint[2:] == previous_sv_breakpoint[:2]):
previous_sv_breakpoint = sv_breakpoint
continue
previous_sv_breakpoint = sv_breakpoint
# convert to bed format
# chrom,start,end,svtype,id,svlen,re,info
if sv.chrom1 == sv.chrom2:
if int(sv.pos1) > 1:
sv.pos1 = str(int(sv.pos1)-1)
bed_line = "\t".join([sv.chrom1,sv.pos1,sv.pos2,sv.svtype,
sv.id,sv.svlen,sv.re])+"\n"
else: #TRA
if int(sv.pos1) > 1:
pos1_1 = str(int(sv.pos1)-1)
pos1_2 = sv.pos1
elif int(sv.pos1) == 1:
pos1_1 = "1"
pos1_2 = "1"
else:
continue # invalid position
if int(sv.pos2) > 1:
pos2_1 = str(int(sv.pos2)-1)
pos2_2 = sv.pos2
elif int(sv.pos2) == 1:
pos2_1 = "1"
pos2_2 = "1"
else:
continue # invalid position
bed_line1 = "\t".join([sv.chrom1,pos1_1,pos1_2,sv.svtype,
sv.id+"_1",sv.svlen,sv.re])+"\n"
bed_line2 = "\t".join([sv.chrom2,pos2_1,pos2_2,sv.svtype,
sv.id+"_2",sv.svlen,sv.re])+"\n"
bed_line = bed_line1+bed_line2
bed_lines.append(bed_line)
out_bed_path = os.path.join(out_dir,
"{}.sv.database.bed".format(sv_id_prefix))
with open(out_bed_path, "w") as out_hd:
out_hd.writelines(bed_lines)
def db_check(vcf_list, db_bed_list):
"""
return vcf list if it have not been added to the database
"""
db_sample_ids = dict()
for i in db_bed_list:
basename = os.path.basename(i)
sample_id = basename.split(".")[0]
if sample_id not in db_sample_ids:
db_sample_ids[sample_id] = 1
else:
raise RuntimeError("Duplicated sample {} in your database".format(sample_id))
vcf_tobe_add = []
for i in vcf_list:
basename = os.path.basename(i)
sample_id = basename.split(".")[0]
if sample_id not in db_sample_ids:
vcf_tobe_add.append(i)
return vcf_tobe_add
def get_args():
parser = argparse.ArgumentParser(
description="Prepare Local SV Database",
usage="usage: %(prog)s [options]")
parser.add_argument("--vcf_dir",
help="vcf file directory [default %(default)s]", metavar="STR")
parser.add_argument("--db_dir",
help="database directory [default %(default)s]", metavar="STR")
parser.add_argument("--min_re",
help="minimum support reads number [default %(default)s]", type=float,
default=2, metavar="INT")
parser.add_argument("--threads",
help="number of threads [default %(default)s]", type=int,
default=4, metavar="INT")
if len(sys.argv) <= 1:
parser.print_help()
exit()
return parser.parse_args()
def main():
args = get_args()
# vcf file list
vcfs = iglob(os.path.join(args.vcf_dir, "*.vcf"))
db_beds = []
if not os.path.exists(args.db_dir):
os.mkdir(args.db_dir)
else:
db_beds = iglob(os.path.join(args.db_dir, "*.bed"))
vcf_tobe_add = db_check(vcfs, db_beds)
#for i in vcf_tobe_add:
# print("add {} to local SV database".format(i))
if len(vcf_tobe_add) == 0:
print("database is the newest.")
sys.exit(0)
work_args_list = [(i, args.min_re, args.db_dir,
os.path.basename(i)[:-4]) for i in vcf_tobe_add]
with Pool(processes=args.threads) as pool:
pool.map(vcf_to_db_bed, work_args_list)
if __name__ == "__main__":
main()
| StarcoderdataPython |
3232120 | from typing import Any, Callable, Dict, List, Mapping, Optional, Tuple, Union
import copy
import os
import numpy as np
import torch
import torch.distributed as dist
import torch.nn as nn
from torch.nn.parallel import DistributedDataParallel
from catalyst.core.engine import IEngine
from catalyst.typing import (
Device,
Model,
Optimizer,
RunnerCriterion,
RunnerModel,
RunnerOptimizer,
RunnerScheduler,
)
from catalyst.utils.distributed import ddp_reduce
from catalyst.utils.torch import (
any2device,
load_checkpoint,
pack_checkpoint,
save_checkpoint,
unpack_checkpoint,
)
class DeviceEngine(IEngine):
"""Single training device engine.
Args:
device: use device, default is `"cpu"`.
Examples:
.. code-block:: python
from catalyst import dl
runner = dl.SupervisedRunner()
runner.train(
engine=dl.DeviceEngine("cuda:1"),
...
)
.. code-block:: python
from catalyst import dl
class MyRunner(dl.IRunner):
# ...
def get_engine(self):
return dl.DeviceEngine("cuda:1")
# ...
.. code-block:: yaml
args:
logs: ...
model:
_target_: ...
...
engine:
_target_: DeviceEngine
device: cuda:1
stages:
...
"""
def __init__(self, device: str = None):
"""Init."""
device = device or ("cuda" if torch.cuda.is_available() else "cpu")
self._device = device
def __repr__(self) -> str: # noqa: D105
return f"{self.__class__.__name__}(device='{self._device}')"
@property
def device(self) -> Device:
"""Pytorch device."""
return self._device
@property
def rank(self) -> int:
"""Process rank for distributed training."""
return -1
@property
def world_size(self) -> int:
"""Process world size for distributed training."""
return 1
@property
def backend(self) -> Optional[str]:
"""String identifier for distributed backend."""
return None
def sync_device(
self, tensor_or_module: Union[Dict, List, Tuple, np.ndarray, torch.Tensor, nn.Module]
) -> Union[Dict, List, Tuple, torch.Tensor, nn.Module]:
"""Moves ``tensor_or_module`` to Engine's deivce."""
return any2device(tensor_or_module, device=self.device)
def sync_tensor(self, tensor: torch.Tensor, mode: str) -> torch.Tensor:
"""Syncs ``tensor`` over ``world_size`` in distributed mode."""
return tensor
def sync_metrics(self, metrics: Dict) -> Dict:
"""Syncs ``metrics`` over ``world_size`` in the distributed mode."""
return metrics
def init_components(
self, model_fn=None, criterion_fn=None, optimizer_fn=None, scheduler_fn=None
):
"""Inits the runs components."""
# model
model = model_fn()
model = self.sync_device(model)
# criterion
criterion = criterion_fn()
criterion = self.sync_device(criterion)
# optimizer
optimizer = optimizer_fn()
optimizer = self.sync_device(optimizer)
# scheduler
scheduler = scheduler_fn()
scheduler = self.sync_device(scheduler)
return model, criterion, optimizer, scheduler
def deinit_components(self, runner=None):
"""Deinits the runs components."""
pass
def zero_grad(self, loss: torch.Tensor, model: Model, optimizer: Optimizer) -> None:
"""Abstraction over ``model.zero_grad()`` step."""
model.zero_grad()
def backward_loss(self, loss: torch.Tensor, model: Model, optimizer: Optimizer) -> None:
"""Abstraction over ``loss.backward()`` step."""
loss.backward()
def optimizer_step(self, loss: torch.Tensor, model: Model, optimizer: Optimizer) -> None:
"""Abstraction over ``optimizer.step()`` step."""
optimizer.step()
def pack_checkpoint(
self,
model: RunnerModel = None,
criterion: RunnerCriterion = None,
optimizer: RunnerOptimizer = None,
scheduler: RunnerScheduler = None,
**kwargs,
) -> Dict:
"""
Packs ``model``, ``criterion``, ``optimizer``, ``scheduler``
and some extra info ``**kwargs`` to torch-based checkpoint.
Args:
model: torch model
criterion: torch criterion
optimizer: torch optimizer
scheduler: torch scheduler
**kwargs: some extra info to pack
Returns:
torch-based checkpoint with ``model_state_dict``,
``criterion_state_dict``, ``optimizer_state_dict``,
``scheduler_state_dict`` keys.
"""
return pack_checkpoint(
model=model, criterion=criterion, optimizer=optimizer, scheduler=scheduler, **kwargs
)
def unpack_checkpoint(
self,
checkpoint: Dict,
model: RunnerModel = None,
criterion: RunnerCriterion = None,
optimizer: RunnerOptimizer = None,
scheduler: RunnerScheduler = None,
**kwargs,
) -> None:
"""Load checkpoint from file and unpack the content to a model
(if not None), criterion (if not None), optimizer (if not None),
scheduler (if not None).
Args:
checkpoint: checkpoint to load
model: model where should be updated state
criterion: criterion where should be updated state
optimizer: optimizer where should be updated state
scheduler: scheduler where should be updated state
kwargs: extra arguments
"""
unpack_checkpoint(
checkpoint=checkpoint,
model=model,
criterion=criterion,
optimizer=optimizer,
scheduler=scheduler,
)
def save_checkpoint(self, checkpoint: Mapping[str, Any], path: str):
"""Saves checkpoint to a file.
Args:
checkpoint: data to save.
path: filepath where checkpoint should be stored.
"""
save_checkpoint(checkpoint=checkpoint, path=path)
def load_checkpoint(self, path: str):
"""Load checkpoint from path.
Args:
path: checkpoint file to load
Returns:
loaded checkpoint
"""
return load_checkpoint(path=path)
class DataParallelEngine(DeviceEngine):
"""MultiGPU training device engine.
Examples:
.. code-block:: python
from catalyst import dl
runner = dl.SupervisedRunner()
runner.train(
engine=dl.DataParallelEngine(),
...
)
.. code-block:: python
from catalyst import dl
class MyRunner(dl.IRunner):
# ...
def get_engine(self):
return dl.DataParallelEngine()
# ...
.. code-block:: yaml
args:
logs: ...
model:
_target_: ...
...
engine:
_target_: DataParallelEngine
stages:
...
"""
def __init__(self):
"""Init"""
super().__init__(f"cuda:{torch.cuda.current_device()}")
self.device_count = torch.cuda.device_count()
def __repr__(self) -> str: # noqa: D105
return f"{self.__class__.__name__}(device_count={self.device_count})"
def init_components(
self, model_fn=None, criterion_fn=None, optimizer_fn=None, scheduler_fn=None
):
"""Inits the runs components."""
model = model_fn()
model = self.sync_device(model)
if isinstance(model, nn.Module):
model = nn.DataParallel(model)
elif isinstance(model, dict):
model = {k: nn.DataParallel(v) for k, v in model.items()}
else:
raise ValueError("Model should be ``nn.Module`` or ``dict``")
# criterion
criterion = criterion_fn()
criterion = self.sync_device(criterion)
# optimizer
optimizer = optimizer_fn()
optimizer = self.sync_device(optimizer)
# scheduler
scheduler = scheduler_fn()
scheduler = self.sync_device(scheduler)
return model, criterion, optimizer, scheduler
class DistributedDataParallelEngine(DeviceEngine):
"""Distributed MultiGPU training device engine.
Args:
address: master node (rank 0)'s address, should be either the IP address or the hostname
of node 0, for single node multi-proc training, can simply be 127.0.0.1
port: master node (rank 0)'s free port that needs to be used for communication
during distributed training
world_size: the number of processes to use for distributed training.
Should be less or equal to the number of GPUs
workers_dist_rank: the rank of the first process to run on the node.
It should be a number between `number of initialized processes` and `world_size - 1`,
the other processes on the node wiil have ranks `# of initialized processes + 1`,
`# of initialized processes + 2`, ...,
`# of initialized processes + num_node_workers - 1`
num_node_workers: the number of processes to launch on the node.
For GPU training, this is recommended to be set to the number of GPUs
on the current node so that each process can be bound to a single GPU
process_group_kwargs: parameters for `torch.distributed.init_process_group`.
More info here:
https://pytorch.org/docs/stable/distributed.html#torch.distributed.init_process_group
sync_bn: boolean flag for batchnorm synchonization during disributed training.
if True, applies PyTorch `convert_sync_batchnorm`_ to the model for native torch
distributed only. Default, False.
ddp_kwargs: parameters for `torch.nn.parallel.DistributedDataParallel`.
More info here:
https://pytorch.org/docs/stable/generated/torch.nn.parallel.DistributedDataParallel.html#torch.nn.parallel.DistributedDataParallel
Examples:
.. code-block:: python
from catalyst import dl
runner = dl.SupervisedRunner()
runner.train(
engine=dl.DistributedDataParallelEngine(),
...
)
.. code-block:: python
from catalyst import dl
class MyRunner(dl.IRunner):
# ...
def get_engine(self):
return dl.DistributedDataParallelEngine(
address="0.0.0.0",
port=23234,
ddp_kwargs={"find_unused_parameters": False},
process_group_kwargs={"backend": "nccl"},
)
# ...
.. code-block:: yaml
args:
logs: ...
model:
_target_: ...
...
engine:
_target_: DistributedDataParallelEngine
address: 0.0.0.0
port: 23234
ddp_kwargs:
find_unused_parameters: false
process_group_kwargs:
backend: nccl
stages:
...
.. _convert_sync_batchnorm:
https://pytorch.org/docs/stable/generated/torch.nn.SyncBatchNorm.html#
torch.nn.SyncBatchNorm.convert_sync_batchnorm
"""
def __init__(
self,
address: str = "127.0.0.1",
port: Union[str, int] = 2112,
world_size: Optional[int] = None,
workers_dist_rank: int = 0,
num_node_workers: Optional[int] = None,
process_group_kwargs: Dict[str, Any] = None,
sync_bn: bool = False,
ddp_kwargs: Dict[str, Any] = None,
):
"""Init."""
super().__init__()
self.address = address
self.port = port
self.workers_global_rank = workers_dist_rank
self.num_local_workers = num_node_workers
if not self.num_local_workers and torch.cuda.is_available():
self.num_local_workers = torch.cuda.device_count()
self._world_size = world_size or self.num_local_workers
self._rank = -1 # defined in `setup_process(...)`
self._device = None # defined in `setup_process(...)`
process_group_kwargs = copy.deepcopy(process_group_kwargs) or {}
self.process_group_kwargs = {
"backend": "nccl",
"world_size": self._world_size,
**process_group_kwargs,
}
self._backend = self.process_group_kwargs["backend"]
self._sync_bn = sync_bn
ddp_kwargs = ddp_kwargs or {}
self.ddp_kwargs = copy.deepcopy(ddp_kwargs)
def __repr__(self): # noqa: D105
return (
f"{self.__class__.__name__}(address={self.address}, "
f"port={self.port}, "
f"ddp_kwargs={self.ddp_kwargs}, "
f"process_group_kwargs={self.process_group_kwargs})"
)
@property
def rank(self) -> int:
"""Process rank for distributed training."""
return self._rank
@property
def world_size(self) -> int:
"""Process world size for distributed training."""
return self._world_size
@property
def backend(self) -> Optional[str]:
"""String identifier for distributed backend."""
return self._backend
def barrier(self) -> None:
"""
Synchronizes all processes.
This collective blocks processes until the all runs enter the function.
"""
dist.barrier()
def spawn(self, fn: Callable, *args: Any, **kwargs: Any) -> None:
"""Spawns abstraction for``nprocs`` creation with specified ``fn`` and ``args``/``kwargs``.
Args:
fn: function is called as the entrypoint of the spawned process.
This function must be defined at the top level of a module
so it can be pickled and spawned.
This is a requirement imposed by multiprocessing.
The function is called as ``fn(i, *args)``, where ``i`` is
the process index and ``args`` is the passed through tuple
of arguments.
*args: arguments passed to spawn method
**kwargs: keyword-arguments passed to spawn method
Returns:
wrapped function.
"""
return torch.multiprocessing.spawn(
fn, args=(self._world_size,), nprocs=self.num_local_workers, join=True
)
def setup_process(self, rank: int = -1, world_size: int = 1):
"""Initialize DDP variables and processes.
Args:
rank: local process rank
world_size: number of devices in netwok to expect for train
"""
self._rank = self.workers_global_rank + rank
if torch.cuda.is_available():
torch.cuda.set_device(int(rank))
self._device = f"cuda:{int(rank)}"
os.environ["MASTER_ADDR"] = str(self.address)
os.environ["MASTER_PORT"] = str(self.port)
os.environ["WORLD_SIZE"] = str(self._world_size)
os.environ["RANK"] = str(self._rank)
os.environ["LOCAL_RANK"] = str(rank)
dist.init_process_group(**self.process_group_kwargs)
def cleanup_process(self):
"""Clean DDP variables and processes."""
self.barrier()
dist.destroy_process_group()
def sync_tensor(self, tensor: torch.Tensor, mode: str = "all") -> torch.Tensor:
"""Syncs ``tensor`` over ``world_size`` in distributed mode.
Args:
tensor: tensor to sync across the processes.
mode: tensor synchronization type,
should be one of ``'sum'``, ``'mean'``, or ``all``.
Returns:
torch.Tensor with synchronized values.
"""
return ddp_reduce(tensor, mode, self._world_size)
def sync_metrics(self, metrics: Dict) -> Dict:
"""Syncs ``metrics`` over ``world_size`` in the distributed mode."""
metrics = {
k: self.sync_tensor(torch.tensor(v, device=self.device), "mean")
for k, v in metrics.items()
}
return metrics
def init_components(
self, model_fn=None, criterion_fn=None, optimizer_fn=None, scheduler_fn=None
):
"""Inits the runs components."""
if "device_ids" not in self.ddp_kwargs and self._device is not None:
self.ddp_kwargs["device_ids"] = [self._device]
# model
model = model_fn()
model = self.sync_device(model)
if isinstance(model, DistributedDataParallel):
pass
elif isinstance(model, nn.Module):
if self._sync_bn:
model = nn.SyncBatchNorm.convert_sync_batchnorm(model)
model = DistributedDataParallel(model, **self.ddp_kwargs)
elif isinstance(model, dict):
if self._sync_bn:
model = {k: nn.SyncBatchNorm.convert_sync_batchnorm(v) for k, v in model.items()}
model = {k: DistributedDataParallel(v, **self.ddp_kwargs) for k, v in model.items()}
else:
raise ValueError("Model should be ``nn.Module`` or ``dict``")
# criterion
criterion = criterion_fn()
criterion = self.sync_device(criterion)
# optimizer
optimizer = optimizer_fn(model)
optimizer = self.sync_device(optimizer)
# scheduler
scheduler = scheduler_fn(optimizer)
scheduler = self.sync_device(scheduler)
self.barrier()
return model, criterion, optimizer, scheduler
__all__ = ["DeviceEngine", "DataParallelEngine", "DistributedDataParallelEngine"]
| StarcoderdataPython |
137308 | <reponame>matthijsvk/convNets
# ----------------------------------------------------------------------------
# Copyright 2016 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
from builtins import str, zip
import os
import glob
import logging
from neon import NervanaObject
logger = logging.getLogger(__name__)
class Indexer(NervanaObject):
"""
Create index file for dataloader.
"""
def __init__(self, path, index_file, pattern='*'):
self.path = path
self.index_file = index_file
self.pattern = pattern
def run(self):
"""
Create index file for dataloader.
"""
if os.path.exists(self.index_file):
return
logger.warning('%s not found. Attempting to create...' % self.index_file)
assert os.path.exists(self.path)
subdirs = glob.iglob(os.path.join(self.path, '*'))
subdirs = [x for x in subdirs if os.path.isdir(x)]
classes = sorted([os.path.basename(x) for x in subdirs])
class_map = {key: val for key, val in zip(classes, list(range(len(classes))))}
with open(self.index_file, 'w') as fd:
fd.write('filename,label1\n')
for subdir in subdirs:
label = class_map[os.path.basename(subdir)]
files = glob.iglob(os.path.join(subdir, self.pattern))
for filename in files:
rel_path = os.path.join(os.path.basename(subdir),
os.path.basename(filename))
fd.write(rel_path + ',' + str(label) + '\n')
logger.info('Created index file: %s' % self.index_file)
| StarcoderdataPython |
1708850 | #!/usr/bin/env python3
"""Supporting utilities.
Classes
-------
.. autosummary::
ProgressBar
OptionReader
Routines
--------
.. autosummary::
read_param
round_up
evaluate_ratio
humansize
humantime
----
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
try:
import configparser
except ImportError:
import ConfigParser as configparser
import math
import os
import re
import sys
import time
def read_param(params, key, default):
"""Read and return a parameter from a dict.
If the key `key` is absent from the dict `params`, return the
default value `default` instead.
Parameters
----------
params : dict
A dict containing parameters.
key : str
Name of the parameter, i.e., its corresponding key in `params`.
default
Default value for the parameter, if `key` is absent from `params`.
Returns
-------
value
If `key` in `params`, return ``params[key]``; otherwise, return
`default` instead.
"""
if not isinstance(key, str):
raise ValueError('invalid parameter name %s' % str(key))
return params[key] if key in params else default
def round_up(number, ndigits=0):
"""Round a floating point number *upward* to a given precision.
Unlike the builtin `round`, the return value `round_up` is always
the smallest float *greater than or equal to* the given number
matching the specified precision.
Parameters
----------
number : float
Number to be rounded up.
ndigits : int, optional
Number of decimal digits in the result. Default is 0.
Returns
-------
float
Examples
--------
>>> round_up(math.pi)
4.0
>>> round_up(math.pi, ndigits=1)
3.2
>>> round_up(math.pi, ndigits=2)
3.15
>>> round_up(-math.pi, ndigits=4)
-3.1415
"""
multiplier = 10 ** ndigits
return math.ceil(number * multiplier) / multiplier
# patterns numerator:denominator and numerator/denominator
_NUM_COLON_DEN = re.compile(r'^([1-9][0-9]*):([1-9][0-9]*)$')
_NUM_SLASH_DEN = re.compile(r'^([1-9][0-9]*)/([1-9][0-9]*)$')
def evaluate_ratio(ratio_str):
"""Evaluate ratio in the form num:den or num/den.
Note that numerator and denominator should both be positive
integers.
Parameters
----------
ratio_str : str
The ratio as a string (either ``'num:den'`` or ``'num/den'``
where ``num`` and ``den``, the numerator and denominator, are
positive integers.
Returns
-------
ratio : float
The ratio as a float, or ``None`` if `ratio_str` is malformed.
Examples
--------
>>> evaluate_ratio('16:9')
1.7777777777777777
>>> evaluate_ratio('16/9')
1.7777777777777777
>>> print(evaluate_ratio('0/9'))
None
"""
match = _NUM_COLON_DEN.match(ratio_str)
if match:
numerator = int(match.group(1))
denominator = int(match.group(2))
return numerator / denominator
match = _NUM_SLASH_DEN.match(ratio_str)
if match:
numerator = int(match.group(1))
denominator = int(match.group(2))
return numerator / denominator
return None
def humansize(size):
"""Return a human readable string of the given size in bytes."""
multiplier = 1024.0
if size < multiplier:
return "%dB" % size
for unit in ['Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:
size /= multiplier
if size < multiplier:
if size < 10:
return "%.2f%sB" % (round_up(size, 2), unit)
elif size < 100:
return "%.1f%sB" % (round_up(size, 1), unit)
else:
return "%.0f%sB" % (round_up(size, 0), unit)
break
else:
return "%.1f%sB" % (round_up(size, 1), unit)
def humantime(seconds, ndigits=2, one_hour_digit=False):
"""Format a duration as a human readable string.
The duration in seconds (a nonnegative float) is formatted as
``HH:MM:SS.frac``, where the number of fractional digits is
controlled by `ndigits`; if `ndigits` is 0, the decimal point is not
printed. The number of hour digits (``HH``) can be reduced to one
with the `one_hour_digits` option.
Parameters
----------
seconds : float
Duration in seconds, must be nonnegative.
ndigits : int, optional
Number of digits after the decimal point for the seconds part.
Default is 2. If 0, the decimal point is suppressed.
one_hour_digit : bool, optional
If ``True``, only print one hour digit (e.g., nine hours is
printed as 9:00:00.00). Default is ``False``, i.e., two hour
digits (nine hours is printed as 09:00:00.00).
Returns
-------
human_readable_duration : str
Raises
------
ValueError:
If `seconds` is negative.
Examples
--------
>>> humantime(10.55)
'00:00:10.55'
>>> humantime(10.55, ndigits=1)
'00:00:10.6'
>>> humantime(10.55, ndigits=0)
'00:00:11'
>>> humantime(10.55, one_hour_digit=True)
'0:00:10.55'
>>> # two hours digits for >= 10 hours, even if one_hour_digit is
>>> # set to True
>>> humantime(86400, one_hour_digit=True)
'24:00:00.00'
>>> humantime(-1)
Traceback (most recent call last):
...
ValueError: seconds=-1.000000 is negative, expected nonnegative value
"""
# pylint: disable=invalid-name
if seconds < 0:
raise ValueError("seconds=%f is negative, "
"expected nonnegative value" % seconds)
hh = int(seconds) // 3600 # hours
mm = (int(seconds) // 60) % 60 # minutes
ss = seconds - (int(seconds) // 60) * 60 # seconds
hh_str = "%01d" % hh if one_hour_digit else "%02d" % hh
mm_str = "%02d" % mm
if ndigits == 0:
ss_str = "%02d" % round(ss)
else:
ss_format = "%0{0}.{1}f".format(ndigits + 3, ndigits)
ss_str = ss_format % ss
return "%s:%s:%s" % (hh_str, mm_str, ss_str)
# default progress bar update interval
_PROGRESS_UPDATE_INTERVAL = 1.0
# the format string for a progress bar line
#
# 0: processed size, e.g., 2.02GiB
# 1: elapsed time (7 chars), e.g., 0:00:04
# 2: current processing speed, e.g., 424MiB (/s is already hardcoded)
# 3: the bar, in the form "=====> "
# 4: number of percent done, e.g., 99
# 5: estimated time remaining (11 chars), in the form "ETA H:MM:SS"; if
# finished, fill with space
_FORMAT_STRING = '\r{0:>7s} {1} [{2:>7s}/s] [{3}] {4:>3s}% {5}'
class ProgressBar(object):
"""Progress bar for file processing.
To generate a progress bar, init a ProgressBar instance, then update
frequently with the `update` method, passing in the size of newly
processed chunk. The `force_update` method should only be called if
you want to overwrite the processed size, which is automatically
calculated incrementally. After you finish processing the
file/stream, you must call the `finish` method to wrap it up. Any
further calls after the `finish` method has been called lead to
a ``RuntimeError``.
Each ProgressBar instance defines several public attributes listed
below. Some are available during processing, and some after
processing. These attributes are meant for informational purposes,
and you should not manually tamper with them (which mostly likely
leads to undefined behavior).
The progress bar format is inspired by ``pv(1)`` (pipe viewer).
Parameters
----------
totalsize : int
Total size, in bytes, of the file/stream to be processed.
interval : float, optional
Update (refresh) interval of the progress bar, in
seconds. Default is 1.0.
Attributes
----------
totalsize : int
Total size of file/stream, in bytes. Available throughout.
processed : int
Process size. Available only during processing (deleted after
the `finish` call).
start : float
Starting time (an absolute time returned by
``time.time()``). Available throughout.
interval : float
Update (refresh) interval of the progress bar, in
seconds. Available only during processing (deleted after the
`finish` call).
elapsed : float
Total elapsed time, in seconds. Only available after the
`finish` call.
Notes
-----
For developers: ProgressBar also defines three private attributes,
`_last`, `_last_processed` and `_barlen`, during processing (deleted
after the `finish` call). `_last` stores the absolute time of last
update (refresh), `_last_processed` stores the processed size at the
time of the last update (refresh), and `_barlen` stores the length
of the progress bar (only the bar portion).
There is another private attribute `__finished` (bool) keeping track
of whether `finish` has been called. (Protected with double leading
underscores since no one should ever tamper with this.)
"""
# pylint: disable=too-many-instance-attributes
def __init__(self, totalsize, interval=_PROGRESS_UPDATE_INTERVAL):
"""Initialize the ProgressBar class.
See class docstring for parameters of the constructor.
"""
self.totalsize = totalsize
self.processed = 0
self.start = time.time()
self.interval = interval
self._last = self.start
self._last_processed = 0
self.__finished = False
# calculate bar length
try:
ncol, _ = os.get_terminal_size()
except (AttributeError, OSError):
# Python2 do not have os.get_terminal_size. Also,
# os.get_terminal_size fails if stdout is redirected to a
# pipe (pretty stupid -- should check stderr; relevant
# Python bug: https://bugs.python.org/issue14841). In either
# case, Assume a minimum of 80 columns.
ncol = 80
self._barlen = (ncol - 48) if ncol >= 58 else 10
def update(self, chunk_size):
"""Update the progress bar for a newly processed chunk.
The size of the processed chunk is registered. Whether the
progress bar is refreshed depends on whether we have reached the
refresh interval since the last refresh (handled automatically).
Parameters
----------
chunk_size : int
The size of the newly processed chunk (since last update),
in bytes. This size will be added to the `processed`
attribute.
Raises
------
RuntimeError:
If `finish` has been called on the ProgressBar instance.
"""
if self.__finished:
raise RuntimeError('operation on finished progress bar')
self.processed += chunk_size
if self.processed > self.totalsize:
self.processed = self.totalsize
self._update_output()
def force_update(self, processed_size):
"""Force update the progress bar with a given processed size.
The `processed` attribute is overwritten by the new value.
Parameters
----------
processed_size :
Processed size of the file/stream, in bytes. Existing value
is overwritten by this value.
Raises
------
RuntimeError:
If `finish` has been called on the ProgressBar instance.
"""
if self.__finished:
raise RuntimeError('operation on finished progress bar')
self.processed = processed_size
if self.processed > self.totalsize:
self.processed = self.totalsize
self._update_output()
def finish(self):
"""Finish file progressing and wrap up on the progress bar.
Always call this method exactly once after you finish
processing. This method adds the finishing touches to the
progress bar, deletes several attributes (`processed`,
`interval`), and adds a new attribute (`elapsed`).
After `finish` is called on a ProgressBar attribute, it enters a
read-only mode: you may read the `totalsize`, `start`, and
`elapsed` attributes, but any method call leads to a
``RuntimeError``.
Raises
------
RuntimeError:
If `finish` has already been called on the ProgressBar
instance before.
"""
# pylint: disable=attribute-defined-outside-init
if self.__finished:
raise RuntimeError('operation on finished progress bar')
self.elapsed = time.time() - self.start
if self.elapsed < 0.001:
self.elapsed = 0.001 # avoid division by zero
del self.processed
del self.interval
del self._last
del self._last_processed
self.__finished = True
processed_s = humansize(self.totalsize)
elapsed_s = self._humantime(self.elapsed)
speed_s = humansize(self.totalsize / self.elapsed)
bar_s = '=' * (self._barlen - 1) + '>'
percent_s = '100'
eta_s = ' ' * 11
sys.stderr.write(_FORMAT_STRING.format(
processed_s, elapsed_s, speed_s, bar_s, percent_s, eta_s
))
sys.stderr.write("\n")
sys.stderr.flush()
def _update_output(self):
"""Update the progress bar and surrounding data as appropriate.
Whether the progress bar is refreshed depends on whether we have
reached the refresh interval since the last refresh (handled
automatically).
Raises
------
RuntimeError:
If `finish` has already been called on the ProgressBar
instance before.
"""
if self.__finished:
raise RuntimeError('operation on finished progress bar')
elapsed_since_last = time.time() - self._last
if elapsed_since_last < self.interval:
return
if elapsed_since_last < 0.001:
elapsed_since_last = 0.001 # avoid division by zero
# speed in the last second, in bytes per second
speed = ((self.processed - self._last_processed) / elapsed_since_last)
# update last stats for the next update
self._last = time.time()
self._last_processed = self.processed
# _s suffix stands for string
processed_s = humansize(self.processed)
elapsed_s = self._humantime(time.time() - self.start)
speed_s = humansize(speed)
percentage = self.processed / self.totalsize # absolute
percent_s = str(int(percentage * 100))
# generate bar
length = int(round(self._barlen * percentage))
fill = self._barlen - length
if length == 0:
bar_s = " " * self._barlen
else:
bar_s = '=' * (length - 1) + '>' + ' ' * fill
# calculate ETA
remaining = self.totalsize - self.processed
# estimate based on current speed
eta = remaining / speed
eta_s = "ETA %s" % self._humantime(eta)
sys.stderr.write(_FORMAT_STRING.format(
processed_s, elapsed_s, speed_s, bar_s, percent_s, eta_s
))
sys.stderr.flush()
@staticmethod
def _humantime(seconds):
"""Customized humantime for ProgressBar."""
return humantime(seconds, ndigits=0, one_hour_digit=True)
class OptionReader(object):
"""Class for reading options from a list of fallbacks.
OptionReader optionally takes command line arguments parsed by
argparse, a list of possible configuration files, and a dictionary
of default values. Then one can query the class for the value of an
option using the ``opt(name)`` method. The value is determined in
the order of CLI argument, value specified in config files, default
value, and at last ``None`` if none of the above is available.
Parameters
----------
cli_args : argparse.Namespace, optional
CLI arguments returned by
``argparse.ArgumentParser.parse_args()``. If ``None``, do not
consider CLI arguments. Default is ``None``.
config_files : str or list, optional
Path(s) to the expected configuration file(s). If ``None``,
do not read from config files. Default is ``None``.
section : str, optional
Name of the config file section to read from. Do not use
``DEFAULT``, as it is the reserved name for a special
section. If ``None``, do not read from config files. Default is
``None``.
defaults : dict, optional
A dict containing default values of one or more options. If
``None``, do not consider default values. Default is ``None``.
Raises
------
configparser.Error:
If some of the supplied configuration files are malformed.
Notes
-----
For developers: there are three private attributes, ``_cli_opts``,
``_cfg_opts`` and ``_default_opts``, which are dicts containing CLI,
config file, and default options, respectively.
"""
def __init__(self, cli_args=None, config_files=None, section=None,
defaults=None):
"""
Initialize the OptionReader class.
See class docstring for parameters.
"""
if section == 'DEFAULT':
raise ValueError("section name DEFAULT is not allowed")
# parse CLI arguments
if cli_args is not None:
# only include values that are not None
self._cli_opts = dict((k, v) for k, v in cli_args.__dict__.items()
if v is not None)
else:
self._cli_opts = {}
# parse config files
if config_files is not None and section is not None:
config = configparser.ConfigParser()
config.read(config_files)
if config.has_section(section):
self._cfg_opts = dict(config.items(section))
else:
self._cfg_opts = {}
else:
self._cfg_opts = {}
# default options
if defaults is not None:
self._default_opts = defaults
else:
self._default_opts = {}
def cli_opt(self, name):
"""
Read the value of an option from the corresponding CLI argument.
Parameters
----------
name : str
Name of the option.
Returns
-------
value
Value of the corresponding CLI argument if available, and
``None`` otherwise.
"""
return self._cli_opts[name] if name in self._cli_opts else None
def cfg_opt(self, name, opttype=None):
"""
Read the value of an option from config files.
Parameters
----------
name : str
Name of the option.
opttype : {None, str, int, float, bool}
Type of the option. The value of the option is converted to
the corresponding type. If ``None``, no conversion is done
(so the return type will actually be ``str``). If ``bool``,
the returned value will be ``True`` if the default value is
``'yes'``, ``'on'``, or ``'1'``, and ``False`` if the
default value is ``'no'``, ``'off'``, or ``'0'`` (case
insensitive), just like
``configparser.ConfigParser.getboolean``.
Returns
-------
value
Value of the option in config files if available, and
``None`` otherwise.
Raises
------
ValueError:
If the raw value of the option in config files (if
available) cannot be converted to `opttype`, or if `opttype`
is not one of the supported types.
"""
# pylint: disable=too-many-return-statements
if name not in self._cfg_opts:
return None
rawopt = self._cfg_opts[name]
if opttype is None:
return rawopt
elif opttype is str:
return rawopt
elif opttype is int:
return int(rawopt)
elif opttype is float:
return float(rawopt)
elif opttype is bool:
rawopt_lower = rawopt.lower()
if rawopt_lower in {'yes', 'on', '1'}:
return True
elif rawopt_lower in {'no', 'off', '0'}:
return False
else:
raise ValueError("not a boolean: %s" % rawopt)
else:
raise ValueError("unrecognized opttype %s" % str(opttype))
def default_opt(self, name):
"""
Read the default value of an option.
Parameters
----------
name : str
Name of the option.
Returns
-------
value
Default value of the option if available, and ``None`` otherwise.
"""
return self._default_opts[name] if name in self._default_opts else None
def opt(self, name, opttype=None):
"""
Read the value of an option.
The value is determined in the following order: CLI argument,
config files, default value, and at last ``None``.
Parameters
----------
name : str
Name of the option.
opttype : {None, str, int, float, bool}
Type of the option, only useful for config files. See the
`opttype` parameter of the `cfg_opt` method.
Returns
-------
value
Value of the option.
Raises
------
ValueError:
If the raw value of the option in config files (if
available) cannot be converted to `opttype`, or if `opttype`
is not one of the supported types.
"""
if name in self._cli_opts:
return self._cli_opts[name]
elif name in self._cfg_opts:
return self.cfg_opt(name, opttype)
elif name in self._default_opts:
return self._default_opts[name]
else:
return None
| StarcoderdataPython |
35795 | <reponame>NCGThompson/damgard-jurik
#!/usr/bin/env python3
from damgard_jurik.crypto import EncryptedNumber, PrivateKeyRing, PrivateKeyShare, PublicKey, keygen
| StarcoderdataPython |
1668744 | <filename>notebooks/utils.py
import numpy as np
def _epsilon(i, j, k):
"""
Levi-Civita tensor
"""
assert i>=0 and i<3, "Index i goes from 0 to 2 included"
assert j>=0 and j<3, "Index j goes from 0 to 2 included"
assert k>=0 and k<3, "Index k goes from 0 to 2 included"
if (i, j, k) in [(0, 1, 2), (1, 2, 0), (2, 0, 1)]:
return +1
if (i, j, k) in [(2, 1, 0), (0, 2, 1), (1, 0, 2)]:
return -1
return 0
def _delta(i, j):
assert i!=0 or i!=1, "i index can only be 0 or 1"
assert j!=0 or j!=1, "j index can only be 0 or 1"
if i==j:
return 1
return 0
def RotationMatrix(angle: float, axis: np.array=np.array([0.0, 0.0, 1.0], dtype=np.float64)):
"""
Given a unitary axis vector in 3D perform a rotation of a certain angle.
"""
assert axis.dot(axis) < 1.0 + 0.0001, "axis has to be unitary vector"
assert axis.dot(axis) > 1.0 - 0.0001, "axis has to be unitary vector"
p1, p2, p3 = axis[0], axis[1], axis[2]
R = np.zeros((3,3), dtype=np.float64)
for i in range(3):
for j in range(3):
R[i][j] = np.cos(angle)*_delta(i,j) + (1 - np.cos(angle))*axis[i]*axis[j]
for k in range(3):
R[i][j] -= np.sin(angle)*_epsilon(i, j, k)*axis[k]
return R | StarcoderdataPython |
21942 | <filename>pythonVersion/interpolateMetm.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 11 21:36:32 2021
@author: rachel
"""
import numpy as np
from scipy import interpolate
import matplotlib.pyplot as plt
def interpolateMetm(uxsg, ufsg, uxl, ufl, m, M, fact1, fact2):
#%%%%% interpolate on [m,M];
xss = uxsg
yss = ufsg
src = [0,0]; #% (0,0)
dest = [max(m,M),1]; #% (m,1)
xss = np.hstack((xss,dest[0]))
yss = np.hstack((yss,dest[1]))
t = np.linspace(0, 1, np.size(xss))
mn=5000
tt = np.linspace(0, 1, mn);
# xx = interpolate.interp1d(t,xss, kind = 'cubic')(tt)
# yy = interpolate.interp1d(t,yss, kind = 'cubic')(tt)
xx = interpolate.PchipInterpolator(t,xss)(tt)
yy = interpolate.PchipInterpolator(t,yss)(tt)
# plt.plot(xss,yss,'ro');
# plt.plot(xx, yy, 'b', 'LineWidth', 1.5);
xll = uxl
yll = ufl;
t = np.linspace(0, 1, np.size(xll))
mn=5000
ttl = np.linspace(0, 1, mn);
# xxl = interpolate.interp1d(t,xll, kind = 'cubic')(ttl)
# yyl = interpolate.interp1d(t,yll, kind = 'cubic')(ttl)
xxl = interpolate.PchipInterpolator(t,xll)(ttl)
yyl = interpolate.PchipInterpolator(t,yll)(ttl)
# plt.plot(xss,yss,'ro')
# plt.plot(xxl, yyl, 'b', 'LineWidth', 1.5);
# plt.plot(xll,yll, 'ro');
# plt.plot(xx, yy, 'g', 'LineWidth', 1.5);
M_ = max(uxsg)/fact2;
m_ = min(uxl[1:])*fact1;
m = min(m_,M_); M = max(m_,M_);
inddm = np.transpose(np.logical_and([xxl>m],[xxl<M])[0])
y2 = yyl[inddm];xt = xxl[inddm]
indxx1 = np.where(xx>=m)[0];
indxx1 = indxx1[0]-1;
indxx2 = np.where(xx<=M)[0];
indxx2 = indxx2[-1];
ytest = yy[indxx1:indxx2+1];
x1 = xx[indxx1:indxx2+1];
#[ux1,ux1i] = unique(x1);
ux1, ux1i = np.unique(x1, return_index=True)
x1 = x1[ux1i];
ytest = ytest[ux1i];
#y1 = interpolate.interp1d(x1, ytest, kind = 'cubic')(xt)
y1 = interpolate.PchipInterpolator(x1, ytest)(xt)
return y1, y2 | StarcoderdataPython |
4825047 | from __future__ import absolute_import, division, print_function
from cctbx.geometry_restraints.auto_linking_types import origin_ids
class linking_class(dict):
def __init__(self):
self.data = {}
origin_id = 0
for oi in origin_ids:
for i, item in oi.items():
if item[0] in self: continue
self[item[0]] = i #origin_id
self.data[item[0]] = item
origin_id+=1
def __repr__(self):
outl = 'links\n'
for i, item in sorted(self.items()):
if type(i)==type(0) and 0:
outl += ' %3d\n' % (i)
for j in item:
outl += ' %s\n' % (j)
else:
outl += ' %-20s : %s\n' % (i, item)
return outl
def __getitem__(self, key):
try:
return dict.__getitem__(self, key)
except KeyError as e:
print('''
Look for a key in the list below
%s
''' % self)
raise e
def get_origin_id(self, key):
rc = self.get(key, None)
assert rc is not None, 'linking origin id not found for "%s"' % key
return rc
def _get_origin_id_labels(self, internals=None):
keys = list(self.keys())
def _sort_on_values(k1, k2):
if self[k1]<self[k2]: return -1
return 1
def _filter_on_internals(k1):
ptr = {'bonds':0,
'angles':1,
'dihedrals':2,
'planes':3,
'chirals':4,
'parallelity':5,
}[internals]
if internals is None: return True
if ptr in self.data[k1].internals: return True
return False
from functools import cmp_to_key
keys.sort(key = cmp_to_key(_sort_on_values))
keys = filter(_filter_on_internals, keys)
return keys
def get_bond_origin_id_labels(self):
return self._get_origin_id_labels(internals='bonds')
def get_angle_origin_id_labels(self):
return self._get_origin_id_labels(internals='angles')
def get_dihedral_origin_id_labels(self):
return self._get_origin_id_labels(internals='dihedrals')
def get_chiral_origin_id_labels(self):
return self._get_origin_id_labels(internals='chirals')
def get_plane_origin_id_labels(self):
return self._get_origin_id_labels(internals='planes')
def get_parallelity_origin_id_labels(self):
return self._get_origin_id_labels(internals='parallelity')
def get_geo_file_header(self, origin_id_label, internals=None):
info = self.data.get(origin_id_label, None)
assert info
if len(info)>=4:
rc = info[3]
assert type(rc)==type([])
if internals in [None, 'bonds']: return rc[0]
elif internals in ['angles']: return rc[1]
elif internals in ['dihedrals']: return rc[2]
elif internals in ['chirals']: return rc[3]
elif internals in ['planes']: return rc[4]
elif internals in ['parallelities']: return rc[5]
else: assert 0
else: return info[0]
if __name__=='__main__':
lc = linking_class()
print(lc)
| StarcoderdataPython |
3291438 | <gh_stars>1-10
"""Constants for Plum ecoMAX test suite."""
from custom_components.plum_ecomax.const import (
CONF_CAPABILITIES,
CONF_CONNECTION_TYPE,
CONF_DEVICE,
CONF_HOST,
CONF_MODEL,
CONF_PORT,
CONF_SOFTWARE,
CONF_UID,
CONF_UPDATE_INTERVAL,
CONNECTION_TYPE_SERIAL,
CONNECTION_TYPE_TCP,
)
# Config entry data for TCP connection.
MOCK_CONFIG_DATA = {
CONF_CONNECTION_TYPE: CONNECTION_TYPE_TCP,
CONF_DEVICE: "/dev/ttyUSB0",
CONF_HOST: "example.com",
CONF_PORT: 8899,
CONF_UPDATE_INTERVAL: 10,
}
# Config entry data for serial connection.
MOCK_CONFIG_DATA_SERIAL = {
CONF_CONNECTION_TYPE: CONNECTION_TYPE_SERIAL,
CONF_DEVICE: "/dev/ttyUSB0",
CONF_PORT: 8899,
CONF_UPDATE_INTERVAL: 10,
}
# Device data that added on entry create.
MOCK_DEVICE_DATA = {
CONF_UID: "D251PAKR3GCPZ1K8G05G0",
CONF_MODEL: "EM350P2",
CONF_SOFTWARE: "1.13.5.Z1",
CONF_CAPABILITIES: ["fuel_burned", "heating_temp"],
}
# Mock config entry data.
MOCK_CONFIG = dict(MOCK_CONFIG_DATA, **MOCK_DEVICE_DATA)
| StarcoderdataPython |
1693328 | <reponame>tarmstrong/nbdiff<gh_stars>10-100
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'The NBDiff Team'
__email__ = '<EMAIL>'
__version__ = '1.0.4'
| StarcoderdataPython |
4825490 | <gh_stars>10-100
"""Defines prop default values."""
_BOX_WIDTH = 0.04
_BOX_HEIGHT = 0.04
_BOX_DEPTH = 0.04
_BOX_MASS = 0.02
_SPHERE_RADIUS = 0.02
_SPHERE_MASS = 0.02
_CYLINDER_RADIUS = 0.02
_CYLINDER_LENGTH = 0.06
_CYLINDER_MASS = 0.02
_CAPSULE_RADIUS = 0.02
_CAPSULE_LENGTH = 0.06
_CAPSULE_MASS = 0.02
_TOTE_SCALE = 1.0
_TOTE_BASE_MASS = 1.3608
_TOTE_BASE_SX = 1.7
_TOTE_BASE_SY = 1.7
_TOTE_BASE_SZ = 1.1
_CUP_SCALE = 1.0
_CUP_BASE_MASS = 0.1
_CUP_BASE_SX = 1.0
_CUP_BASE_SY = 1.0
_CUP_BASE_SZ = 1.0
| StarcoderdataPython |
164195 | <gh_stars>1-10
from output.models.nist_data.atomic.positive_integer.schema_instance.nistschema_sv_iv_atomic_positive_integer_min_inclusive_5_xsd.nistschema_sv_iv_atomic_positive_integer_min_inclusive_5 import NistschemaSvIvAtomicPositiveIntegerMinInclusive5
__all__ = [
"NistschemaSvIvAtomicPositiveIntegerMinInclusive5",
]
| StarcoderdataPython |
3202644 | <reponame>unixfy/summercamp18<filename>Python/test.py
if __name__ == "__main__":
name = input("TELL ME YOUR NAME USER! ")
def print_name(loop):
for i in range(0,loop):
print("DID YOU KNOW? YOUR NAME IS %s" % (name))
print_name(10) | StarcoderdataPython |
3370200 | import os
import re
import locale
import sqlite3
import zipfile
import click
import arrow
from wtforms import Field
from jinja2 import Environment, StrictUndefined, FileSystemLoader
import ruamel.yaml
import IPython
from .app import create_app
from .submitter import sendmail
from .database import (
init_db, export as export_db, import_clean_json, add_user,
rehash_passwords as force_rehash_passwords)
from .models import Customer
locale.setlocale(locale.LC_ALL, 'en_US.utf8')
app = create_app()
@app.template_filter('currency')
def currency(value):
return locale.currency(value or 0, symbol=True, grouping=True, international=False)
@app.template_filter('billto')
def billto(customer_id):
return Customer.query.filter(Customer.id == customer_id).first().name1
@app.template_filter('isfield')
def is_field(item):
"""Returns `True` if the item is a WTForm Field object, False otherwise"""
return isinstance(item, Field)
def last_backup():
files = [x for x in os.listdir(app.config['BACKUP_DIR']) if re.search(r'backup-(\d{4}).zip', x, re.IGNORECASE)]
if not files:
return 0
backup_index = 0
for fname in files:
index = int(re.search(r'backup-(\d{4}).zip', fname, re.IGNORECASE).group(1))
if index > backup_index:
backup_index = index
return backup_index
def create_backup(wait=True):
index = last_backup() + 1
if index > 9999:
raise Exception("No more room for backups!")
fname = 'backup-{index:04d}.zip'.format(index=index)
fpath = os.path.join(app.config['BACKUP_DIR'], fname)
# Use a connection/cursor so we don't have to worry about in-progress
# db function.
conn = sqlite3.connect(app.config['DATABASE'])
cursor = conn.cursor()
cursor.execute('begin immediate')
with zipfile.ZipFile(fpath, 'w', zipfile.ZIP_DEFLATED) as myzip:
myzip.write(app.config['DATABASE'], '/invoicer.db')
conn.rollback()
click.echo(fname + " created")
def remove_older_backups(days=30):
"""
Deletes all backup files older than `days`.
"""
oldest = arrow.now().shift(days=-30).timestamp
files = [os.path.join(app.config['BACKUP_DIR'], x) for x in os.listdir(app.config['BACKUP_DIR']) if re.search(r'backup-(\d{4}).zip', x, re.IGNORECASE)]
for fpath in files:
s = os.stat(fpath)
if s.st_ctime < oldest:
print("deleting", fpath)
os.unlink(fpath)
@app.cli.command('initdb')
@click.argument('force', default='n')
def initdb_command(force):
"""Initializes the database."""
if force.lower().startswith('y'):
init_db(True)
click.echo('Sample data added to database.')
click.echo('Initialized the database.')
return
click.echo("WARNING: Continue will delete all data in the databse")
if not click.confirm('Do you want to continue?'):
raise click.Abort()
if click.confirm('Populate with sample data?'):
init_db(True)
click.echo('Sample data added to database.')
else:
init_db(False)
click.echo('Initialized the database.')
@app.cli.command('rotate')
@click.argument('days', default=30)
def rotate(days):
"""
Creates a new backup and possibly removes backups older than X days.
NOTE: Backup creation will always happen; the code does not test for
changes.
"""
create_backup()
remove_older_backups(days)
@app.cli.command('test-email')
def test_email():
sendmail(
sender='<EMAIL>',
to=[app.config['EMAIL_USERNAME']],
subject='Test email from Invoicer',
body="<h1>Hello, World!</h1>",
server=app.config['EMAIL_SERVER'],
body_type="html",
attachments=None,
username=app.config['EMAIL_USERNAME'],
password=app.config['EMAIL_PASSWORD'],
starttls=True
)
@app.cli.command('export-json')
@click.argument('path')
def export_json(path):
"""
Export the database into JSON format.
"""
export_db(path)
@app.cli.command('import-json')
@click.argument('path', type=click.Path(exists=True))
def import_json(path):
"""
Import the JSON data into the database.
"""
click.echo("WARNING: Continue will delete all data in the databse")
if not click.confirm('Do you want to continue?'):
raise click.Abort()
init_db(False)
import_clean_json(path)
click.echo('JSON data has been imported')
@app.cli.command('add-user')
@click.option('--username', prompt=True)
@click.option('--password', prompt=True, hide_input=True,
confirmation_prompt=True)
def new_user(username, password):
add_user(username=username, password=password)
click.echo("User [%s] has been added to the database" % username)
@app.cli.command('cli')
def interactive():
"""
Launch an interactive REPL
"""
IPython.start_ipython(argv=[])
@app.cli.command('build')
def build():
"""
Build the configuration files
"""
conf_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'conf'))
instance_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'instance'))
outdir = os.path.join(conf_dir, '..', '_build')
options_file = os.path.join(instance_dir, 'site.yaml')
if not os.path.exists(options_file):
click.echo('ERROR: Could not find %s' % options_file)
click.echo('...a sample is located in `conf`')
click.echo('...copy `conf/site.yaml` to your instance folder, and modify it as needed')
raise click.Abort()
options = ruamel.yaml.safe_load(open(options_file).read())
if not os.path.isdir(outdir):
os.makedirs(outdir)
env = Environment(
loader=FileSystemLoader(conf_dir),
undefined=StrictUndefined)
###########################################################################
click.echo('Creating `_build/invoicer-uwsgi.ini')
template = env.get_template('invoicer-uwsgi.ini.j2')
content = template.render(**options)
with open(os.path.join(outdir, 'invoicer-uwsgi.ini'), 'w') as fh:
fh.write(content)
click.echo('...done')
###########################################################################
###########################################################################
click.echo('Creating `_build/invoicer-systemd.service')
template = env.get_template('invoicer-systemd.service.j2')
content = template.render(**options)
with open(os.path.join(outdir, 'invoicer-systemd.service'), 'w') as fh:
fh.write(content)
click.echo('...done')
###########################################################################
###########################################################################
click.echo('Creating `_build/invoicer-upstream.nginx')
template = env.get_template('invoicer-upstream.nginx.j2')
content = template.render(**options)
with open(os.path.join(outdir, 'invoicer-upstream.nginx'), 'w') as fh:
fh.write(content)
click.echo('...done')
click.echo('Creating `_build/invoicer-location.nginx')
template = env.get_template('invoicer-location.nginx.j2')
content = template.render(**options)
with open(os.path.join(outdir, 'invoicer-location.nginx'), 'w') as fh:
fh.write(content)
click.echo('...done')
###########################################################################
###########################################################################
click.echo('Creating `_build/fail2ban/filter.d/invoicer.local')
f2b_filter_outdir = os.path.join(outdir, 'fail2ban', 'filter.d')
if not os.path.isdir(f2b_filter_outdir):
os.makedirs(f2b_filter_outdir)
template = env.get_template('fail2ban/filter.d/invoicer.local.j2')
content = template.render(**options)
with open(os.path.join(f2b_filter_outdir, 'invoicer.local'), 'w') as fh:
fh.write(content)
click.echo('...done')
###########################################################################
###########################################################################
click.echo('Creating `_build/fail2ban/jail.d/invoicer.local')
f2b_filter_outdir = os.path.join(outdir, 'fail2ban', 'jail.d')
if not os.path.isdir(f2b_filter_outdir):
os.makedirs(f2b_filter_outdir)
template = env.get_template('fail2ban/jail.d/invoicer.local.j2')
content = template.render(**options)
with open(os.path.join(f2b_filter_outdir, 'invoicer.local'), 'w') as fh:
fh.write(content)
click.echo('...done')
###########################################################################
###########################################################################
click.echo('Creating `_build/deploy.bash')
template = env.get_template('deploy.bash.j2')
content = template.render(**options)
with open(os.path.join(outdir, 'deploy.bash'), 'w') as fh:
fh.write(content)
click.echo('...done')
###########################################################################
@app.cli.command('rehash-passwords')
def rehash_passwords():
'''
App will rehash user passwords next time they log in
'''
try:
force_rehash_passwords()
except Exception as e:
click.echo('Operation failed: %s' % e)
return
click.echo("User's passwords are set to be re-hashed")
| StarcoderdataPython |
3277263 | from builtins import object
import re
from orderedmultidict import omdict
class GroupNotExists(Exception):
def __str__(self):
return "Group not exists"
class UserAlreadyInAGroup(Exception):
def __str__(self):
return "User already in a group"
class UserNotInAGroup(Exception):
def __str__(self):
return "User not in a group"
class Group(object):
""" Group object deals with group authorization files. It is passed the
path to groupdb file. """
def __init__(self, groupdb):
self.groupdb = groupdb
self.initial_groups = omdict()
self.new_groups = omdict()
def __enter__(self):
with open(self.groupdb, "r") as groupdb:
groupdb = re.sub("\\\\\n", "", groupdb.read())
for group in groupdb.splitlines():
groupname, users = group.split(": ", 1)
for user in users.split():
self.initial_groups.add(groupname, user)
self.new_groups = self.initial_groups.copy()
return self
def __exit__(self, type, value, traceback):
if self.new_groups == self.initial_groups:
return
with open(self.groupdb, "w") as userdb:
for group in self.new_groups:
userdb.write("%s: %s\n" % (group, " ".join(self.new_groups.getlist(group))))
def __contains__(self, group):
return group in self.groups
@property
def groups(self):
""" Returns groups in a tuple """
return list(self.new_groups.keys())
def is_user_in(self, user, group):
""" Returns True if user is in a group """
return user in self.new_groups.getlist(group)
def add_user(self, user, group):
""" Adds user to a group """
if self.is_user_in(user, group):
raise UserAlreadyInAGroup
self.new_groups.add(group, user)
def delete_user(self, user, group):
""" Deletes user from group """
if not self.__contains__(group):
raise GroupNotExists
if not self.is_user_in(user, group):
raise UserNotInAGroup
self.new_groups.popvalue(group, user)
| StarcoderdataPython |
1626949 | from src.services.world_name_generators.base_world_name_generator import BaseWorldNameGenerator
from src.services.world_name_generators.txt_file_world_name_generator import TxtFileWorldNameGenerator
class WorldNameGeneratorSelector:
def select_world_name_generator(self) -> BaseWorldNameGenerator:
return TxtFileWorldNameGenerator()
| StarcoderdataPython |
3370622 | <filename>coro/http/websocket.py
# -*- Mode: Python -*-
import base64
import struct
import coro
import os
import sys
import hashlib
W = coro.write_stderr
from coro.http.protocol import HTTP_Upgrade
from coro import read_stream
# RFC 6455
class WebSocketError (Exception):
pass
class TooMuchData (WebSocketError):
pass
class UnknownOpcode (WebSocketError):
pass
def do_mask (data, mask):
n = len (data)
r = bytearray (n)
i = 0
while i < len (data):
r[i] = chr (ord (data[i]) ^ mask[i % 4])
i += 1
return bytes (r)
class ws_packet:
fin = 0
opcode = 0
mask = 0
plen = 0
masking = []
payload = ''
def __repr__ (self):
return '<fin=%r opcode=%r mask=%r plen=%r masking=%r payload=%d bytes>' % (
self.fin,
self.opcode,
self.mask,
self.plen,
self.masking,
len (self.payload),
)
def unpack (self):
if self.mask:
return do_mask (self.payload, self.masking)
else:
return self.payload
class handler:
magic = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11"
def __init__ (self, path, factory):
self.path = path
self.factory = factory
def match (self, request):
# try to catch both versions of the protocol
return (
request.path == self.path
and request.method == 'get'
and request['upgrade']
and request['upgrade'].lower() == 'websocket'
)
def h76_frob (self, key):
digits = int (''.join ([x for x in key if x in '0123456789']))
spaces = key.count (' ')
return digits / spaces
def handle_request (self, request):
rh = request.request_headers
key = rh.get_one ('sec-websocket-key')
conn = request.client.conn
if key:
d = hashlib.new ('sha1')
d.update (key + self.magic)
reply = base64.encodestring (d.digest()).strip()
r = [
'HTTP/1.1 101 Switching Protocols',
'Upgrade: websocket',
'Connection: Upgrade',
'Sec-WebSocket-Accept: %s' % (reply,),
]
if rh.has_key ('sec-websocket-protocol'):
# XXX verify this
r.append (
'Sec-WebSocket-Protocol: %s' % (
rh.get_one ('sec-websocket-protocol')
)
)
conn.send ('\r\n'.join (r) + '\r\n\r\n')
protocol = 'rfc6455'
else:
# for Safari, this implements the obsolete hixie-76 protocol
# http://tools.ietf.org/html/draft-hixie-thewebsocketprotocol-76
key1 = self.h76_frob (rh.get_one ('sec-websocket-key1'))
key2 = self.h76_frob (rh.get_one ('sec-websocket-key2'))
tail = request.client.stream.read_exact (8)
key = struct.pack ('>L', key1) + struct.pack ('>L', key2) + tail
d = hashlib.new ('md5')
d.update (key)
reply = d.digest()
host = rh.get_one ('host')
r = [
'HTTP/1.1 101 WebSocket Protocol Handshake',
'Upgrade: WebSocket',
'Connection: Upgrade',
'Sec-WebSocket-Origin: http://%s' % (host,),
'Sec-WebSocket-Location: ws://%s%s' % (host, request.uri),
]
all = '\r\n'.join (r) + '\r\n\r\n' + reply
conn.send (all)
protocol = 'hixie_76'
# pass this websocket off to its new life...
self.factory (protocol, request, self)
raise HTTP_Upgrade
class websocket:
def __init__ (self, proto, http_request, handler):
self.request = http_request
self.handler = handler
self.stream = http_request.client.stream
self.conn = http_request.client.conn
self.send_mutex = coro.mutex()
# tlslite has a deeply buried "except: shutdown()" clause
# that breaks coro timeouts.
self.tlslite = hasattr (self.conn, 'ignoreAbruptClose')
self.proto = proto
if proto == 'rfc6455':
coro.spawn (self.read_thread)
else:
coro.spawn (self.read_thread_hixie_76)
# ------------ RFC 6455 ------------
def read_thread (self):
close_it = False
try:
while 1:
try:
if not self.tlslite:
close_it = coro.with_timeout (10, self.read_packet)
else:
close_it = self.read_packet()
except coro.TimeoutError:
self.send_pong ('bleep')
except coro.ClosedError:
break
if close_it:
break
finally:
self.handle_close()
self.conn.close()
def read_packet (self):
head = self.stream.read_exact (2)
if not head:
return True
head, = struct.unpack ('>H', head)
p = ws_packet()
p.fin = (head & 0x8000) >> 15
p.opcode = (head & 0x0f00) >> 8
p.mask = (head & 0x0080) >> 7
plen = (head & 0x007f) >> 0
if plen < 126:
pass
elif plen == 126:
plen, = struct.unpack ('>H', self.stream.read_exact (2))
else: # plen == 127:
plen, = struct.unpack ('>Q', self.stream.read_exact (8))
p.plen = plen
if plen > 1 << 20:
raise TooMuchData (plen)
if p.mask:
p.masking = struct.unpack ('>BBBB', self.stream.read_exact (4))
else:
p.masking = None
p.payload = self.stream.read_exact (plen)
if p.opcode in (0, 1, 2):
return self.handle_packet (p)
elif p.opcode == 8:
# close
return True
elif p.opcode == 9:
# ping
assert (p.fin) # probably up to no good...
self.send_pong (self, p.payload)
return False
else:
raise UnknownOpcode (p)
# ----------- hixie-76 -------------
def read_thread_hixie_76 (self):
self.stream = self.request.client.stream
close_it = False
try:
while 1:
try:
close_it = self.read_packet_hixie_76()
except coro.ClosedError:
break
if close_it:
break
finally:
self.conn.close()
def read_packet_hixie_76 (self):
ftype = self.stream.read_exact (1)
if not ftype:
return True
ftype = ord (ftype)
if ftype & 0x80:
length = 0
while 1:
b = ord (self.stream.read_exact (1))
length = (length << 7) | (b & 0x7f)
if not b & 0x80:
break
if length > 1 << 20:
raise TooMuchData (length)
if length:
payload = self.stream.read_exact (length)
if ftype == 0xff:
return True
else:
data = self.stream.read_until (b'\xff')
if ftype == 0x00:
p = ws_packet()
p.fin = 1
p.opcode = 0x01
p.mask = None
p.payload = data[:-1]
self.handle_packet (p)
# ---
def handle_packet (self, p):
# abstract method, override to implement your own logic
return False
def handle_close (self):
# abstract method
pass
def send_text (self, data, fin=True):
return self.send_packet (0x01, data, fin)
def send_binary (self, data, fin=True):
return self.send_packet (0x02, data, fin)
def send_pong (self, data):
return self.send_packet (0x0a, data, True)
def send_packet (self, opcode, data, fin=True):
with self.send_mutex:
if self.proto == 'rfc6455':
head = 0
if fin:
head |= 0x8000
assert opcode in (0, 1, 2, 8, 9, 10)
head |= opcode << 8
ld = len (data)
if ld < 126:
head |= ld
p = [struct.pack ('>H', head), data]
elif ld < 1 << 16:
head |= 126
p = [struct.pack ('>HH', head, ld), data]
elif ld < 1 << 32:
head |= 127
p = [struct.pack ('>HQ', head, ld), data]
else:
raise TooMuchData (ld)
# RFC6455: A server MUST NOT mask any frames that it sends to the client.
self.writev (p)
else:
self.writev (['\x00', data, '\xff'])
# for socket wrapping layers like tlslite
def writev (self, data):
try:
return self.conn.writev (data)
except AttributeError:
return self.conn.write (''.join (data))
| StarcoderdataPython |
1626262 | from datetime import datetime, timezone
import os
from io import StringIO
from pathlib import Path
import sys
import traceback
from typing import Union, Dict, Any
from uuid import uuid4
import ecs_logging
import structlog
from structlog.contextvars import bind_contextvars, merge_contextvars, unbind_contextvars
from .akamai import AkamaiClient
from .settings import Settings, AppSettings
def _format_error(event_dict):
if exc_info := event_dict.pop("exc_info", None):
# Shamelessly lifted from stdlib's logging module
sio = StringIO()
traceback.print_exception(exc_info.__class__, exc_info, exc_info.__traceback__, None, sio)
s = sio.getvalue()
sio.close()
if s[-1:] == "\n":
s = s[:-1]
event_dict["error"] = {
"stack_trace": s,
"message": str(exc_info),
"type": exc_info.__class__.__qualname__,
}
return event_dict
class ECSFormatter(ecs_logging.StructlogFormatter):
def format_to_ecs(self, event_dict): # type: (Dict[str, Any]) -> Dict[str, Any]
event_dict = super(ECSFormatter, self).format_to_ecs(event_dict)
event_dict = _format_error(event_dict)
return event_dict
structlog.configure(
cache_logger_on_first_use=True,
processors=[
merge_contextvars,
structlog.threadlocal.merge_threadlocal_context,
# structlog.processors.add_log_level,
# structlog.processors.StackInfoRenderer(),
# structlog.processors.format_exc_info,
ECSFormatter(),
],
context_class=dict,
logger_factory=structlog.PrintLoggerFactory(),
)
EXECUTION_ID = str(uuid4())
bind_contextvars(execution_id=EXECUTION_ID)
class AppException(Exception):
pass
def _get_root_cause(exc: Exception) -> str:
cause = exc
result = ""
while cause := cause.__cause__:
result = str(cause)
return result
class App:
def __init__(self, aws_settings: AppSettings, settings: Settings):
self._aws_settings = aws_settings
self._settings = settings
@classmethod
def configure_from_env(cls, env_file: Union[None, Path, str]):
logger = structlog.get_logger(**{"event.action": "config-load", "event.category": "configuration"})
try:
app_settings = AppSettings(_env_file=env_file)
settings = app_settings.fetch_settings()
if app_settings.aws_profile:
os.environ["AWS_PROFILE"] = app_settings.aws_profile
except Exception as e:
logger.exception(
"Failed to load settings",
exc_info=e,
**{"event.outcome": "failure", "event.reason": _get_root_cause(e)},
)
raise AppException("Failed to load settings") from e
else:
bind_contextvars()
logger.info("Loaded settings", **{"event.outcome": "success"})
return cls(app_settings, settings)
def work(self):
c = AkamaiClient(self._settings.akamai)
logger = structlog.get_logger()
maps_to_consider = c.list_maps()
if not maps_to_consider:
logger.warning("No SiteShield maps found")
return
else:
logger.info("Retrieved SiteShield maps", **{"ss2pl.map.id": [m.id for m in maps_to_consider]})
maps_to_consider = [
m for m in maps_to_consider if not m.acknowledged and m.id in self._settings.ss_to_pl.keys()
]
if not maps_to_consider:
logger.info("No unacknowledged maps")
return
for ss_map in maps_to_consider:
pl_ref = self._settings.ss_to_pl[ss_map.id]
context_dict = {
"ss2pl.map.id": ss_map.id,
"ss2pl.map.alias": ss_map.map_alias,
"ss2pl.map.proposed_ips": [str(x) for x in ss_map.proposed_cidrs],
"ss2pl.prefix_list.id": pl_ref.prefix_list_id,
"ss2pl.prefix_list.name": pl_ref.name,
}
bind_contextvars(**context_dict)
try:
if not ss_map.proposed_cidrs:
logger.warning("Empty proposed CIDR list!")
else:
pl_ref.set_cidrs(ss_map.proposed_cidrs)
c.acknowledge_map(ss_map.id)
except Exception as e:
logger.exception(str(e), exc_info=e)
finally:
unbind_contextvars(*context_dict.keys())
if __name__ == "__main__":
start_time = datetime.now(timezone.utc)
try:
app = App.configure_from_env(".env")
app.work()
except Exception as exc:
# logger.exception(str(exc), exc_info=exc)
exit_code = 1
else:
exit_code = 0
end_time = datetime.now(timezone.utc)
duration = (end_time - start_time).total_seconds()
structlog.get_logger().info(
"Shutting down",
**{
"process.exit_code": exit_code,
"process.uptime": duration,
"process.start": start_time.isoformat(),
"process.end": end_time,
},
)
sys.exit(exit_code)
| StarcoderdataPython |
1648242 | <filename>reserway/bookings/migrations/0001_initial.py
# Generated by Django 3.1.2 on 2020-11-24 19:46
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('accounts', '0002_bookingagent_credit_card'),
]
operations = [
migrations.CreateModel(
name='CoachStructureAC',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('seatNumber', models.IntegerField()),
('seatType', models.CharField(max_length=2)),
],
),
migrations.CreateModel(
name='CoachStructureSleeper',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('seatNumber', models.IntegerField()),
('seatType', models.CharField(max_length=2)),
],
),
migrations.CreateModel(
name='Passenger',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=30)),
('age', models.IntegerField(validators=[django.core.validators.MaxValueValidator(200), django.core.validators.MinValueValidator(1)])),
('gender', models.CharField(choices=[('M', 'Male'), ('F', 'Female'), ('O', 'Other')], max_length=1)),
],
),
migrations.CreateModel(
name='Station',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
],
),
migrations.CreateModel(
name='Train',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('train_name', models.CharField(max_length=30)),
('dest_station', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='dest_station', to='bookings.station')),
('source_station', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='source_station', to='bookings.station')),
],
),
migrations.CreateModel(
name='TrainSchedule',
fields=[
('journey_id', models.AutoField(primary_key=True, serialize=False)),
('journey_date', models.DateField()),
('num_ac_coaches', models.IntegerField(default=10, validators=[django.core.validators.MaxValueValidator(100), django.core.validators.MinValueValidator(1)])),
('num_sleeper_coaches', models.IntegerField(default=10, validators=[django.core.validators.MaxValueValidator(100), django.core.validators.MinValueValidator(1)])),
('train', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='bookings.train')),
],
),
migrations.CreateModel(
name='Ticket',
fields=[
('ticketId', models.AutoField(primary_key=True, serialize=False)),
('seat_type', models.CharField(default='AC', max_length=10)),
('pnrNumber', models.CharField(max_length=12)),
('booking_agent', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='tickets', to='accounts.bookingagent')),
('journey', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='tickets', to='bookings.trainschedule')),
('passenger1', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='ticket1', to='bookings.passenger')),
('passenger2', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='ticket2', to='bookings.passenger')),
('passenger3', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='ticket3', to='bookings.passenger')),
('passenger4', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='ticket4', to='bookings.passenger')),
('passenger5', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='ticket5', to='bookings.passenger')),
('passenger6', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='ticket6', to='bookings.passenger')),
],
),
migrations.CreateModel(
name='SleeperBookingStatus',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('coachNumber', models.IntegerField()),
('seatNumber', models.IntegerField()),
('journey', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='bookings.trainschedule')),
('passenger', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='bookings.passenger')),
('ticket', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='bookings.ticket')),
],
),
migrations.CreateModel(
name='BookingStatus',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('noOfACSeatsRemaining', models.IntegerField()),
('noOfSleeperSeatsRemaining', models.IntegerField()),
('journey', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='bookings.trainschedule')),
],
),
migrations.CreateModel(
name='ACBookingStatus',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('coachNumber', models.IntegerField()),
('seatNumber', models.IntegerField()),
('journey', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='bookings.trainschedule')),
('passenger', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='bookings.passenger')),
('ticket', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='bookings.ticket')),
],
),
]
| StarcoderdataPython |
3251424 | <gh_stars>0
"""Write helper script to submit job to slurm
"""
import os, sys
import subprocess
import platform
import datetime
from typing import Callable, List
from colorama import init, Fore
from omegaconf.dictconfig import DictConfig
init(autoreset=True)
SLURM_CMD = """#!/bin/bash
# set a job name
#SBATCH --job-name={job_name}
#################
# a file for job output, you can check job progress
#SBATCH --output={output}
#################
# a file for errors
#SBATCH --error={error}
#################
# time needed for job
#SBATCH --time={time}
#################
# gpus per node
#SBATCH --gres=gpu:{num_gpus}
#################
# number of requested nodes
#SBATCH --nodes={num_nodes}
#################
# slurm will send a signal this far out before it kills the job
{auto_submit}
#################
# Have SLURM send you an email when the job ends or fails
#SBATCH --mail-type=FAIL
#SBATCH --mail-user={email}
# #task per node
#SBATCH --ntasks-per-node={ntasks_per_node}
#################
# #cpu per task/gpu
#SBATCH --cpus-per-task={cpus_per_task}
#################
# memory per cpu
#SBATCH --mem-per-cpu={mem_per_cpu}
#################
# extra stuff
{extra}
export PYTHONFAULTHANDLER=1
master_addr=$(scontrol show hostnames "$SLURM_JOB_NODELIST" | head -n 1)
export MASTER_ADDR=$master_addr
{module}
srun {main_cmd}
"""
def get_max_trial_version(path: str):
files = os.listdir(path)
version_files = [f for f in files if 'trial_' in f]
if len(version_files) > 0:
# regex out everything except file version for ve
versions = [int(f_name.split('_')[1]) for f_name in version_files]
max_version = max(versions)
return max_version + 1
else:
return 0
def layout_path(params: DictConfig):
# format the logging folder path
slurm_out_path = os.path.join(params.log_root, params.job)
# when err logging is enabled, build add the err logging folder
err_path = os.path.join(slurm_out_path, 'slurm_err_logs')
if not os.path.exists(err_path):
os.makedirs(err_path)
# when out logging is enabled, build add the out logging folder
out_path = os.path.join(slurm_out_path, 'slurm_out_logs')
if not os.path.exists(out_path):
os.makedirs(out_path)
# place where slurm files log to
slurm_files_log_path = os.path.join(slurm_out_path, 'slurm_scripts')
if not os.path.exists(slurm_files_log_path):
os.makedirs(slurm_files_log_path)
return out_path, err_path, slurm_files_log_path
def get_argv() -> str:
argv = sys.argv
def _convert() -> str:
for x in argv:
if ("[" in x) or ("," in x):
x = "'" + x + "'"
yield x
return " ".join(list(_convert()))
def run_cluster(cfg: DictConfig, fn_main: Callable):
slurm_params = cfg.launcher
if slurm_params.job is None:
slurm_params.job = cfg.model_name
if slurm_params.from_slurm:
fn_main(cfg)
else:
timestamp = datetime.datetime.now().strftime("%Y-%m-%d__%H-%M")
extra = ""
out_path, err_path, slurm_files_log_path = layout_path(slurm_params)
trial_version = get_max_trial_version(out_path)
slurm_params.time = str(slurm_params.time)
if not ':' in slurm_params.time:
slurm_params.time = f"{int(slurm_params.time):02d}:00:00"
if slurm_params.auto is False:
auto_walltime = ""
else:
auto_walltime = f"#SBATCH --signal=USR1@150"
loaded_module = ''
extra = ""
node = platform.processor()
if slurm_params.partition is not None:
extra = f"#SBATCH --partition {slurm_params.partition} \n"
# raise ValueError("Do not specify partition in AIMOS")
# if node == "x86_64":
# PYTHON = "/gpfs/u/home/LLLD/LLLDashr/scratch/miniconda3x86_64/envs/fs_cdfsl/bin/python"
# elif node == "ppc64le":
# PYTHON = "/gpfs/u/home/LLLD/LLLDashr/scratch/miniconda3ppc64le/envs/fs_cdfsl/bin/python"
# extra = extra + "#SBATCH --partition dcs,rpi\n"
# if node == "ppc64le":
# extra = extra + "#SBATCH --partition dcs,rpi,el8,el8-rpi\n"
# extra = extra + "conda activate fs_cdfsl \n" # FIXME check
python_cmd = get_argv()
full_command = f"python {python_cmd} launcher.from_slurm=true "
outpath = os.path.join(out_path,
f'trial_{trial_version}_{timestamp}_%j.out')
error = os.path.join(err_path, f'trial_{trial_version}_{timestamp}_%j.err')
cmd_to_sbatch = SLURM_CMD.format(
job_name=slurm_params.job,
output=outpath,
error=error,
time=slurm_params.time,
num_gpus=slurm_params.gpus,
num_nodes=slurm_params.nodes,
auto_submit=auto_walltime,
email=slurm_params.email,
ntasks_per_node=slurm_params.gpus,
cpus_per_task=slurm_params.cpus_per_task,
mem_per_cpu=slurm_params.mem_per_cpu,
extra=extra,
module=loaded_module,
main_cmd=full_command,
)
# print(Fore.LIGHTWHITE_EX + cmd_to_sbatch)
script = "{}/{}.sh".format(slurm_files_log_path, slurm_params.job)
with open(script, 'w') as f:
print(cmd_to_sbatch, file=f, flush=True)
p = subprocess.Popen(['sbatch', script],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, _ = p.communicate()
stdout = stdout.decode("utf-8")
job_id = stdout.split(" ")[-1].strip()
print(f"Job {job_id} is submitted.")
print("sbatch script: ", str(script))
print(Fore.LIGHTGREEN_EX + f"stderr : ", error.replace("%j", job_id))
print(Fore.LIGHTYELLOW_EX + f"stdout : ", outpath.replace("%j", job_id))
print("-" * 60, "\n\n\n")
| StarcoderdataPython |
1748852 | schema = {
"$schema": "http://json-schema.org/draft-06/schema#",
"$comment": "Definition of the custom exchange with price data included in the specified CSV file.",
"type": "object",
"properties": {
"baseAsset": {
"$comment": "Base asset of the pair in the input CSV file, e.g. BTC, ETH, XLM, ADA, ...",
"type": "string"
},
"quoteAsset": {
"$comment": "Quote asset of the pair in the input CSV file, e.g. BTC, ETH, USDT, ...",
"type": "string"
},
"rules": {
"$comment": "Definition of market rules for data included in the input CSV file.",
"type": "object",
"properties": {
"baseAsset": {
"$comment": "Base asset of the corresponding market rules, e.g. BTC, ETH, XLM, ADA, ...",
"type": "string"
},
"quoteAsset": {
"$comment": "Quote asset of the corresponding market rules, e.g. BTC, ETH, USDT, ...",
"type": "string"
},
"baseAssetPrecision": {
"$comment": "Number of decimals in the quanitity of the base asset, e.g. 2, 6, 8, ...",
"$ref" : "#/definitions/stringNumber",
},
"quoteAssetPrecision": {
"$comment": "Number of decimals in the price of the quote asset, e.g. 2, 6, 8, ...",
"$ref" : "#/definitions/stringNumber",
},
"minPrice": {
"$comment": "Minimum price.",
"$ref" : "#/definitions/stringNumber",
},
"maxPrice": {
"$comment": "Maximum price.",
"$ref" : "#/definitions/stringNumber",
},
"minPriceDenom": {
"$comment": "Step that the price can be increased/decreased by.",
"$ref" : "#/definitions/stringNumber",
},
"minQty": {
"$comment": "Minimum quantity",
"$ref" : "#/definitions/stringNumber",
},
"maxQty": {
"$comment": "Maximum quantity",
"$ref" : "#/definitions/stringNumber",
},
"minQtyDenom": {
"$comment": "Step that the quantity can be increased/decreased by.",
"$ref" : "#/definitions/stringNumber",
},
"minNotional": {
"$comment": "Minimum notial value, calculated as price * quantity.",
"$ref" : "#/definitions/stringNumber",
}
},
},
"data": {
"$comment": "Section describing the input price data.",
"type": "object",
"properties": {
"db": {
"$comment": "Db description with the input data",
"type": "object",
"properties": {
"tableName": {
"$comment": "Name of the table containing input data",
"type": "string"
}
},
"required": ["tableName"]
},
"fieldMap": {
"$comment": "Specifies structure of the input CSV file. For each of the below attributes it maps corresponding column in the CSV file (starting from 0). Each attribute should be assigned a unique number (though it is not enforced by the application).",
"type": "object",
"properties": {
"openTmstmp": {
"type": "string"
},
"open": {
"type": "string"
},
"high": {
"type": "string"
},
"low": {
"type": "string"
},
"close": {
"type": "string"
},
"volume": {
"type": "string"
},
},
"required": ["openTmstmp", "open", "high", "low", "close", "volume"]
},
"timeFormat": {
"$comment": "Defines format of the timestamps in the input CSV file. Format needs to be in Python notation (e.g. %Y/%m/%d for daily data).",
"type": "string",
},
"interval": {
"$comment": "Timeframe of the price data included in the input CSV file.",
"type": "string",
"enum": [
"1MINUTE",
"3MINUTE",
"5MINUTE",
"15MINUTE",
"30MINUTE",
"1HOUR",
"2HOUR",
"4HOUR",
"6HOUR",
"8HOUR",
"12HOUR",
"1DAY",
"3DAY",
"1WEEK",
"1MONTH"
]
},
},
"required": ["db", "fieldMap", "timeFormat", "interval"]
}
},
"required": ["baseAsset", "quoteAsset", "rules", "data"],
"definitions": {
"stringNumber": {
"type": "string",
"pattern": "^[0-9]+\.?[0-9]*$"
}
},
} | StarcoderdataPython |
3213599 | from src.githubinfo.api.github_file import GitHubFile
from src.githubinfo.api.github_folder import GitHubFolder
from src.githubinfo.api.github_repo import GitHubRepo
class TestGitHubRepo:
def setup_method(self):
self.repo = GitHubRepo("Cutewarriorlover", "test-repo")
folder = self.repo.root_folder
folder.children.append(GitHubFile("file_1.txt", folder, "Hello 1!", "text"))
folder.children.append(GitHubFile("file_2.txt", folder, "Hello 2!", "text"))
folder.children.append(GitHubFile("file_3.txt", folder, "Hello 3!", "text"))
folder.children.append(GitHubFile("file_4.txt", folder, "Hello 4!", "text"))
folder.children.append(GitHubFile("file_5.txt", folder, "Hello 5!", "text"))
folder.children.append(GitHubFile("file_6.txt", folder, "Hello 6!", "text"))
new_folder = GitHubFolder(folder, "folder")
folder.children.append(new_folder)
new_folder.children.append(GitHubFile("file_1.txt", new_folder, "Hello 1!", "text"))
new_folder.children.append(GitHubFile("file_2.txt", new_folder, "Hello 2!", "text"))
new_folder.children.append(GitHubFile("file_3.txt", new_folder, "Hello 3!", "text"))
new_folder.children.append(GitHubFile("file_4.txt", new_folder, "Hello 4!", "text"))
new_folder.children.append(GitHubFile("file_5.txt", new_folder, "Hello 5!", "text"))
new_folder.children.append(GitHubFile("file_6.txt", new_folder, "Hello 6!", "text"))
def test_repo_dir(self):
self.repo.root_folder.dir()
| StarcoderdataPython |
116727 | <filename>main.py
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 26 05:11:54 2018
@author: zefa
"""
import sys
from PyQt5 import QtCore, QtGui, QtWidgets
from UI.main_win import Ui_MainWindow
from UI.LabelQWidget import LabelQWidget
from UI.HistPlotQWidget import HistPlotQWidget
from app.GuiControl import GuiControl
from app.helper import toQImage
class ISegViewerApp(QtWidgets.QMainWindow, Ui_MainWindow):
def __init__(self):
super(self.__class__, self).__init__()
self.setupUi(self)
self.setupActions()
self.setupPlotWidget()
self.ctrl = GuiControl()
def setupActions(self):
self.actionOpen.triggered.connect(self.open)
self.actionExit.triggered.connect(self.close)
self.actionAbout.triggered.connect(self.about)
self.frameSlider.valueChanged.connect(lambda frm: self.setFrame(frm))
self.listWidget_Labels.itemSelectionChanged.connect(self.labelSelected)
def setupPlotWidget(self):
self.histPlotWidget = HistPlotQWidget(self)
self.verticalLayout_Timeline.addWidget(self.histPlotWidget)
self.histPlotWidget.hide()
def updatePlotWidget(self, label=None):
data = self.ctrl.getStats(label)
if data is not None:
self.histPlotWidget.show()
self.histPlotWidget.plot(data)
def labelSelected(self):
# idx = self.listWidget_Labels.selectedIndexes()
# item = self.listWidget_Labels.itemAt(idx[0])
for i in range(self.listWidget_Labels.count()):
item = self.listWidget_Labels.item(i)
if item.isSelected():
labelWidget = self.listWidget_Labels.itemWidget(item)
self.updatePlotWidget(labelWidget.classLabel)
break
def updateListControl(self):
"""
Add new entry to workspace list.
"""
self.listWidget_Labels.clear()
for lbl in self.ctrl.getLabels():
self.createLabelEntry(lbl)
def createLabelEntry(self, label):
"""
Add new entry to workspace list.
"""
# Create workspace Widget
myQCustomQWidget = LabelQWidget(self, label)
# Create QListWidgetItem
myQListWidgetItem = QtWidgets.QListWidgetItem(self.listWidget_Labels)
# Set size hint
myQListWidgetItem.setSizeHint(myQCustomQWidget.sizeHint())
# Add QListWidgetItem into QListWidget
self.listWidget_Labels.addItem(myQListWidgetItem)
self.listWidget_Labels.setItemWidget(myQListWidgetItem, myQCustomQWidget)
def about(self):
QtWidgets.QMessageBox.about(self, "About ImSegU",
"<p>The <b>Image Segmentation & Understanding</b> app uses"
"instance segmentation to extract semantical information "
"from the images of a video or image sequence.</p>")
def open(self):
file_types = "Videos|Imgs (*.avi *.mp4 *.jpg *.png)"
fileName, _ = QtWidgets.QFileDialog.getOpenFileName(self, "Open File",
QtCore.QDir.currentPath(), file_types)
if fileName:
self.ctrl.open(fileName)
self.setFrame(0)
self.updateListControl()
self.updatePlotWidget()
if self.ctrl.numberOfImages() <= 0:
QtWidgets.QMessageBox.information(self, "Image Viewer",
"Cannot load %s." % fileName)
return
def setFrame(self, frameNumber):
self.ctrl.gotoImage(frameNumber)
image = toQImage(self.ctrl.getImage())
self.imageLabel.setPixmap(QtGui.QPixmap.fromImage(image))
self.updateSlider()
# def updateImage(self):
# image = toQImage(self.ctrl.currentImage())
# self.imageLabel.setPixmap(QtGui.QPixmap.fromImage(image))
def updateSlider(self):
f = self.ctrl.currentFrameNumber()
length = self.ctrl.numberOfImages()
self.sliderLabel.setText('%i / %i' % (f+1, length))
if length > 0:
self.frameSlider.setMinimum(0)
self.frameSlider.setMaximum(length-1)
def processImage(self):
self.ctrl.processImage()
def processImageSequence(self):
self.ctrl.processImageSequence()
def keyPressEvent(self, e):
if e.key() == QtCore.Qt.Key_F5:
self.processImage()
elif e.key() == QtCore.Qt.Key_F8:
self.processImageSequence()
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
form = ISegViewerApp()
form.show()
app.exec_()
| StarcoderdataPython |
4818779 | # -*- coding: utf-8 -*-
"""
Author: <NAME>
Created: 28.02.2019
Updated: 12.03.2019
Email: <EMAIL>
# Description
The main function of this script is to update the annotation of reactions to different subsystems and pathways.
The curated annotation of subsystems and pathways is found in "ComplementaryData/curation/pathway_and_subsystem/subsystem_curated.csv".
The curation is performed by using KEGG annotations, BioCyc annotations and subsystem annotations from iMK1208. Every reaction is now annotated to
*only one* subsystem and *only one* pathway. For the reactions with none or multiple pathway annotations in KEGG or BioCyc, we have used adjacent
reactions to determine the annotation. All reactions are given a subsystem, but some reactions are missing a pathway-annotation.
In addition to the main function *update_subsystem_annotation.pt*, there are several functions which has performs the following tasks:
- get_pathways_from_biocyc: Extract pathway annotations from the BioCyc database online
- get_pathways_from_KEGG: Extract pathway annotations from the KEGG database online
- export_reaction_subsystem_and_pathways: Create a csv file with the current reaction annotations (the file used as a basis for curation)
Information about the metacyc pathway hierarchy
https://ask.pathwaytools.com/question/17981/accessing-the-pathway-hierarchy/
"""
import cobra
import pandas as pd
from pathlib import Path
import json
import logging
from collections import defaultdict
REPO_DIR = Path(__file__).parent.parent.parent
KEGG_PATHWAYS_FN = str(REPO_DIR/"ComplementaryData"/"curation"/"pathway_and_subsystem"/"kegg_pathways.json")
def update_subsystem_annotations(model, csv_fn, remove_old_subsystem_annotations = True):
"""
This is the main function of this script, and also the one which should be called
from the *reconstruct_scoGEM.py* script. Removes old pathway and subsystem annotations by default.Then new annotations based on the curated columns in the
spreadsheet subsystem_curated.csv.
"""
df = pd.read_csv(csv_fn, sep = ",", usecols = ["Reaction ID", "curated pathway", "curated subsystem"], index_col = 0)
for r in model.reactions:
# Remove old annotations
if remove_old_subsystem_annotations:
try:
r.annotation.pop("kegg.pathway")
except KeyError:
pass
try:
r.annotation.pop("kegg.subsystem")
except KeyError:
pass
new_pathway_annotation = df.loc[r.id, "curated pathway"]
new_subsystem_annotation = df.loc[r.id, "curated subsystem"]
_add_annotation(r, "pathway", new_pathway_annotation)
_add_annotation(r, "subsystem", new_subsystem_annotation, ",")
def _add_annotation(r, key, value, delimiter = ";"):
if isinstance(value, (float, int)):
return False
if isinstance(value, str):
if len(value):
value = value.split(delimiter)
else:
return False
if isinstance(value, list):
value = [x.strip() for x in value]
if len(value) == 1:
if len(value[0]):
r.annotation[key] = value[0]
elif len(value) > 1:
print("Multiple annotations for {0}: {1}".format(r.id, value))
r.annotation[key] = value
else:
return False
else:
return False
logging.info("Added annotatin to {0}: {1}".format(r.id, r.annotation[key]))
return True
def get_pathways_from_biocyc(model, csv_fn, biocyc_subsystem_fn, db = "meta", add_annotations_to_model = True):
"""
** Warning: Must be run using python 2.7 and PathwayTools running in the background **
This function use pythoncyc to extract pathway information from the BioCyc database based
on the BioCyc annotation of each reaction. The result is a table the rows are reactions (IDs)
and the columns are the biocyc pathway annotations. Because BioCyC use very small pathways we
use the parent pathways as annotations.
Some key steps are required to run this function:
- PathwayTools must be running in the background ()
- You need the pythoncyc package (https://github.com/latendre/PythonCyc),
more info at https://bioinformatics.ai.sri.com/ptools/pythoncyc.html
- Pythoncyc only works with python 2.7
# Parameters
- model: SBML-model (imported with cobrapy)
- csv_fn: Where to store the created csv-file
- biocyc_subsystem_fn: This is in general the All_pathways_of_MetaCyC.txt file, but can be
replaced by similar csv-files.
- db: Which db in BioCyc to use.
- add_annotations_to_model: A flag used to turn on/off writing the biocyc annotations to the model reactions
"""
import sys
assert sys.version_info[0] < 3, ("Can't use PythonCyc with python 3")
import pythoncyc # Add this import here, so it is only imported if used
df_subsystem = pd.read_csv(biocyc_subsystem_fn, sep = "\t", index_col = 0)
biocyc_db = pythoncyc.select_organism(db)
pathway_list = []
for r in model.reactions[::2]:
print(r.id, end = "\t")
try:
biocyc = r.annotation["biocyc"]
except KeyError:
print()
continue
# Fix erroneous annotations
if biocyc[:5] == "META:":
biocyc = biocyc[5:]
r_db = biocyc_db[biocyc]
try:
pathways = r_db["in_pathway"]
except TypeError:
print(biocyc, " is not in sco-db")
continue
if isinstance(pathways, list):
sub1_list = []
sub2_list = []
for pathway in pathways:
print(pathway, end = ", ")
pwy = pathway.replace("|", "")
try:
sub1 = df_subsystem.loc[pwy, "Subsystem 1"].split("//")[0].strip()
sub2 = df_subsystem.loc[pwy, "Subsystem 2"].split("//")[0].strip()
except KeyError:
pass
else:
sub1_list.append(sub1)
sub2_list.append(sub2)
pathway_list.append([r.id, ";".join(pathway), ";".join(list(set(sub1_list))), ";".join(list(set(sub2_list)))])
if len(sub1_list) and add_annotations_to_model:
r.annotation["biocyc.subsystem1"] = list(set(sub1_list))
r.annotation["biocyc.subsystem2"] = list(set(sub2_list))
print(sub1_list, sub2_list)
else:
print("No pathways given for ", biocyc)
df = pd.DataFrame(pathway_list)
df.columns = ["Reaction ID", "Pathway", "Subsystem 1", "Subsystem 2"]
df.to_csv(csv_fn)
return model
def get_pathways_from_KEGG(model, update_existing = False):
"""
This function extracts pathway and subsystem information from KEGG by using the KEGG annotation of each reaction.
The pathways we use are the ones given here: https://www.genome.jp/kegg/pathway.html,
under heading 1.: Metabolism. However we don't use the *1.0 Global and overview maps* or
*1.12 Chemical structure and transformation maps*, because they don't
represent metabolic subsystems. What we here refer to as *subsustems* are the subheadings under Metabolism, i.e.:
- Carbohydrate metabolism
- Energy metabolism
- Lipid metabolism
- Nucleotide metabolism
- Amino acid metabolism
- Metabolism of other amino acids
- Glycan biosynthesis and metabolism
- Metabolism of cofactors and vitamins
- Metabolism of terpenoids and polyketides
- Biosynthesis of other secondary metabolites
- Xenobiotics biodegradation and metabolism
"""
from bioservices.kegg import KEGG
kegg = KEGG()
kegg_dict, kegg_overview_maps = _get_KEGG_pathways()
inverse_pathway_dict = _get_inverse_pathway_dict(kegg_dict)
for reaction in model.reactions:
# Skip reactions which already have an kegg.pathway annoatation
# if update_existing = False
if not update_existing:
try:
reaction.annotation["kegg.pathway"]
except KeyError:
pass
else:
# Skip this one
continue
try:
kegg_id = reaction.annotation["kegg.reaction"]
except KeyError:
continue
kegg_info = kegg.get(kegg_id, parse = True)
try:
full_kegg_pathways = kegg_info["PATHWAY"].values()
except:
continue
kegg_pathways = [x for x in full_kegg_pathways if not x in kegg_overview_maps]
try:
subsystem = list(set([inverse_pathway_dict[x] for x in kegg_pathways]))
except:
print("Error!: ", reaction.id, kegg_pathways)
continue
print("KEGG Subsystem ", reaction.id, subsystem)
reaction.annotation["kegg.pathway"] = kegg_pathways
reaction.annotation["kegg.subsystem"] = subsystem
return model
def _get_KEGG_pathways():
with open(KEGG_PATHWAYS_FN, "r") as f:
kegg_dict = json.load(f)
kegg_overview_maps = kegg_dict.pop("KEGG overview maps")
return kegg_dict, kegg_overview_maps
def _get_inverse_pathway_dict(kegg_dict):
new_dict = {}
for k, v in kegg_dict.items():
for v_i in v:
new_dict[v_i] = k
return new_dict
def export_reaction_subsystem_and_pathways(model, csv_fn):
"""
Use pandas to write a csv-file which can be used to curate the subsystem
annotation. The csv-file is written with the following columns:
Reaction ID, Reaction name, KEGG ID, Biocyc-annotation,
KEGG Subsystem, KEGG pathway, Subsystem
"""
annotation_keys = ["kegg.reaction", "biocyc", "kegg.pathway", "kegg.subsystem",
"biocyc.subsystem1", "biocyc.subsystem2", "subsystem"]
reactions_list = []
for r in model.reactions:
r_list = [r.id, r.name]
for key in annotation_keys:
try:
ann = r.annotation[key]
except KeyError:
r_list.append(None)
else:
if isinstance(ann, str):
r_list.append(ann)
else:
r_list.append(", ".join(ann))
reactions_list.append(r_list)
df = pd.DataFrame(reactions_list)
df.columns = ["Reaction ID", "Reaction name"] + annotation_keys
# Add empty column
df["curated pathway"] = None
df["curated subsystem"] = None
print(df.head())
df.to_csv(csv_fn, sep = ";")
def print_subsystem_summary(model, key = "subsystem"):
subsystem_total = defaultdict(int)
subsystem_other = defaultdict(int)
subsystem_sco4 = defaultdict(int)
subsystem_iAA1259 = defaultdict(int)
subsystem_iKS1317 = defaultdict(int)
for r in model.reactions:
subsystem = r.annotation[key]
try:
origin = r.annotation["origin"]
except KeyError:
print(r)
origin = "missing"
if origin == "Sco4":
subsystem_sco4[subsystem] += 1
elif origin == "iAA1259":
subsystem_iAA1259[subsystem] += 1
elif origin == "missing":
subsystem_other[subsystem] += 1
else:
subsystem_iKS1317[subsystem] += 1
subsystem_total[subsystem] += 1
df = pd.DataFrame([subsystem_total, subsystem_iKS1317, subsystem_sco4, subsystem_iAA1259, subsystem_other]).T
df.columns = ["Total", "iKS1317", "Sco4", "iAA1259", "Other"]
print(df)
def export_gene_pathway_list(model):
gene_pathway_list = []
maxlen = 0
for gene in model.genes:
if gene.id == "s0001":
# These are spontaneous reactions
continue
pathway_list = []
for r in gene.reactions:
try:
pathway = r.annotation["pathway"]
except:
continue
else:
pathway_list.append(pathway)
pathway_list = list(set(pathway_list))
if len(pathway_list):
for pathway in pathway_list:
gene_pathway_list.append([gene.id, pathway])
else:
gene_pathway_list.append([gene.id, None])
df = pd.DataFrame(gene_pathway_list, columns = ["Gene", "Pathway"])
df.to_csv(str(REPO_DIR / "model_gene_pathway_table.tsv"), sep = "\t")
if __name__ == '__main__':
model_fn = REPO_DIR / "ModelFiles" / "xml" / "Sco-GEM.xml"
model = cobra.io.read_sbml_model(str(model_fn))
if 0:
# Create file used for subsystem curation
biocyc_pwy_fn = str(REPO_DIR / "ComplementaryData" / "curation" / "pathway_and_subsystem" / "reaction_biocyc_pathway.csv")
biocyc_subsystem_fn = str(REPO_DIR / "ComplementaryData" / "curation" / "pathway_and_subsystem" / "All_pathways_of_MetaCyc.txt")
csv_fn = str(REPO_DIR / "ComplementaryData" / "curation" / "pathway_and_subsystem" / "subsystem.csv")
model = get_pathways_from_biocyc(model, biocyc_pwy_fn, biocyc_subsystem_fn)
model = get_pathways_from_KEGG(model)
export_reaction_subsystem_and_pathways(model, csv_fn)
if 1:
import sys
sys.path.append("C:/Users/snorres/git/scoGEM/ComplementaryScripts")
# import export
# update the subsystem annotations based on the curated csv-file"
subsystem_curated_csv = str(REPO_DIR / "ComplementaryData" / "curation" / "pathway_and_subsystem" / "subsystem_curation.csv")
update_subsystem_annotations(model, subsystem_curated_csv)
# export.export(model, formats = ["xml", "yml"])
if 0:
# Print subsystem numbers
print_subsystem_summary(model)
if 0:
export_gene_pathway_list(model)
| StarcoderdataPython |
3354312 | <filename>Sketch/sketchMe.py
# coding: utf-8
# python sketchMe.py (1)path/to/input (2)path/to/target
# In[1]:
from __future__ import print_function
import numpy as np
import pandas as pd
import cv2 as cv
import os
import h5py
import sys
#import matplotlib.pyplot as plt
import matplotlib
matplotlib.use('TkAgg')
from matplotlib import pyplot as plt
plt.interactive('True')
import scipy.misc
import scipy.ndimage
from tqdm import tqdm
from copy import deepcopy
from sklearn.preprocessing import StandardScaler
from keras.models import Sequential, Model
from keras.layers import Input, Dense, Conv2D, MaxPooling2D, UpSampling2D, ZeroPadding2D, Convolution2D, Deconvolution2D, merge
from keras.layers.core import Activation, Dropout, Flatten, Lambda
from keras.layers.normalization import BatchNormalization
from keras.optimizers import SGD, Adam, Nadam
from keras.utils import np_utils
from keras.callbacks import TensorBoard
from keras import objectives, layers
from keras.applications import vgg16
from keras.applications.vgg16 import preprocess_input
from keras import backend as K
import cv2
from PIL import Image
from scipy.misc import imresize
# In[2]:
np.random.seed(1337) # for reproducibility
# In[3]:
base_model = vgg16.VGG16(weights='imagenet', include_top=False)
vgg = Model(
input=base_model.input, output=base_model.get_layer('block2_conv2').output)
script_dir = os.path.dirname(os.path.realpath(__file__))
# In[4]:
# def load_file_names(path):
# return os.listdir(path)
# In[5]:
def imshow(x, gray=False):
plt.imshow(x, cmap='gray' if gray else None)
plt.show()
# In[6]:
def get_features(Y):
Z = deepcopy(Y)
Z = preprocess_vgg(Z)
features = vgg.predict(Z, batch_size=5, verbose=0)
return features
# In[7]:
def preprocess_vgg(x, data_format=None):
if data_format is None:
data_format = K.image_data_format()
assert data_format in {'channels_last', 'channels_first'}
x = 255. * x
if data_format == 'channels_first':
# 'RGB'->'BGR'
x = x[:, ::-1, :, :]
# Zero-center by mean pixel
x[:, 0, :, :] = x[:, 0, :, :] - 103.939
x[:, 1, :, :] = x[:, 1, :, :] - 116.779
x[:, 2, :, :] = x[:, 2, :, :] - 123.68
else:
# 'RGB'->'BGR'
x = x[:, :, :, ::-1]
# Zero-center by mean pixel
x[:, :, :, 0] = x[:, :, :, 0] - 103.939
x[:, :, :, 1] = x[:, :, :, 1] - 116.779
x[:, :, :, 2] = x[:, :, :, 2] - 123.68
return x
# In[8]:
def feature_loss(y_true, y_pred):
return K.sqrt(K.mean(K.square(y_true - y_pred)))
def pixel_loss(y_true, y_pred):
return K.sqrt(K.mean(
K.square(y_true - y_pred))) + 0.00001 * total_variation_loss(y_pred)
def adv_loss(y_true, y_pred):
return K.mean(K.binary_crossentropy(y_pred, y_true), axis=-1)
def total_variation_loss(y_pred):
if K.image_data_format() == 'channels_first':
a = K.square(y_pred[:, :, :m - 1, :n - 1] - y_pred[:, :, 1:, :n - 1])
b = K.square(y_pred[:, :, :m - 1, :n - 1] - y_pred[:, :, :m - 1, 1:])
else:
a = K.square(y_pred[:, :m - 1, :n - 1, :] - y_pred[:, 1:, :n - 1, :])
b = K.square(y_pred[:, :m - 1, :n - 1, :] - y_pred[:, :m - 1, 1:, :])
return K.sum(K.pow(a + b, 1.25))
# In[9]:
def preprocess_VGG(x, dim_ordering='default'):
if dim_ordering == 'default':
dim_ordering = K.image_dim_ordering()
assert dim_ordering in {'tf', 'th'}
# x has pixels intensities between 0 and 1
x = 255. * x
norm_vec = K.variable([103.939, 116.779, 123.68])
if dim_ordering == 'th':
norm_vec = K.reshape(norm_vec, (1, 3, 1, 1))
x = x - norm_vec
# 'RGB'->'BGR'
x = x[:, ::-1, :, :]
else:
norm_vec = K.reshape(norm_vec, (1, 1, 1, 3))
x = x - norm_vec
# 'RGB'->'BGR'
x = x[:, :, :, ::-1]
return x
# In[10]:
def generator_model(input_img):
# Encoder
x = Conv2D(32, (3, 3), activation='relu', padding='same')(input_img)
x = Conv2D(32, (2, 2), activation='relu', padding='same')(x)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(
64, (3, 3), activation='relu', padding='same', name='block2_conv1')(x)
x = Conv2D(64, (2, 2), activation='relu', padding='same')(x)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(
128, (3, 3), activation='relu', padding='same', name='block3_conv1')(x)
x = Conv2D(128, (3, 3), activation='relu', padding='same')(x)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(
256, (3, 3), activation='relu', padding='same', name='block4_conv1')(x)
res = Conv2D(256, (3, 3), activation='relu', padding='same')(x)
x = layers.add([x, res])
res = Conv2D(256, (3, 3), activation='relu', padding='same')(x)
encoded = layers.add([x, res])
# Decoder
res = Conv2D(
256, (3, 3), activation='relu', padding='same',
name='block5_conv1')(encoded)
x = layers.add([encoded, res])
res = Conv2D(256, (3, 3), activation='relu', padding='same')(x)
x = layers.add([x, res])
res = Conv2D(256, (3, 3), activation='relu', padding='same')(x)
x = layers.add([x, res])
x = Conv2D(
128, (2, 2), activation='relu', padding='same', name='block6_conv1')(x)
x = UpSampling2D((2, 2))(x)
x = Conv2D(
128, (3, 3), activation='relu', padding='same', name='block7_conv1')(x)
res = Conv2D(128, (3, 3), activation='relu', padding='same')(x)
x = layers.add([x, res])
res = Conv2D(128, (3, 3), activation='relu', padding='same')(x)
x = layers.add([x, res])
x = Conv2D(
64, (2, 2), activation='relu', padding='same', name='block8_conv1')(x)
x = UpSampling2D((2, 2))(x)
x = Conv2D(
64, (3, 3), activation='relu', padding='same', name='block9_conv1')(x)
res = Conv2D(64, (3, 3), activation='relu', padding='same')(x)
x = layers.add([x, res])
res = Conv2D(64, (3, 3), activation='relu', padding='same')(x)
x = layers.add([x, res])
x = Conv2D(
32, (2, 2), activation='relu', padding='same', name='block10_conv1')(x)
x = UpSampling2D((2, 2))(x)
x = Conv2D(
32, (3, 3), activation='relu', padding='same', name='block11_conv1')(x)
res = Conv2D(32, (3, 3), activation='relu', padding='same')(x)
x = layers.add([x, res])
decoded = Conv2D(3, (3, 3), activation='sigmoid', padding='same')(x)
return decoded
# In[11]:
def feat_model(img_input):
# extract vgg feature
vgg_16 = vgg16.VGG16(
include_top=False, weights='imagenet', input_tensor=None)
# freeze VGG_16 when training
for layer in vgg_16.layers:
layer.trainable = False
vgg_first2 = Model(
input=vgg_16.input, output=vgg_16.get_layer('block2_conv2').output)
Norm_layer = Lambda(preprocess_VGG)
x_VGG = Norm_layer(img_input)
feat = vgg_first2(x_VGG)
return feat
# In[12]:
def full_model():
input_img = Input(shape=(m, n, 1))
generator = generator_model(input_img)
feat = feat_model(generator)
model = Model(input=input_img, output=[generator, feat], name='architect')
return model
# In[13]:
def compute_vgg():
base_model = vgg16.VGG16(weights='imagenet', include_top=False)
model = Model(
input=base_model.input,
output=base_model.get_layer('block2_conv2').output)
num_batches = num_images // batch_size
for batch in range(num_batches):
_, Y = get_batch(batch, X=False)
Y = preprocess_vgg(Y)
features = model.predict(Y, verbose=1)
f = h5py.File('features/feat_%d' % batch, "w")
dset = f.create_dataset("features", data=features)
# In[14]:
m = 200
n = 200
sketch_dim = (m, n)
img_dim = (m, n, 3)
model = full_model()
optim = Adam(lr=1e-4, beta_1=0.9, beta_2=0.999, epsilon=1e-8)
model.compile(
loss=[pixel_loss, feature_loss], loss_weights=[1, 1], optimizer=optim)
# model.load_weights('newWeights/weights_77')
model.load_weights(script_dir + '/newWeights/weights_77')
# In[15]:
def predictAndPlot(input_path, label_path):
m = 200
n = 200
sketch_dim = (m, n)
img_dim = (m, n, 3)
sketch = cv.imread(input_path, 0)
sketch = imresize(sketch, sketch_dim)
sketch = sketch / 255.
sketch = sketch.reshape(1, m, n, 1)
actual = cv.imread(label_path)
actual = imresize(actual, img_dim)
result, _ = model.predict(sketch)
#### Plotting ####
# fig = plt.figure()
# a = fig.add_subplot(1, 3, 1)
# imgplot = plt.imshow(sketch[0].reshape(m, n), cmap='gray')
# a.set_title('Sketch')
# plt.axis("off")
# a = fig.add_subplot(1, 3, 2)
# imgplot = plt.imshow(result[0])
# a.set_title('Prediction')
# plt.axis("off")
# a = fig.add_subplot(1, 3, 3)
# plt.imshow(cv2.cvtColor(actual, cv2.COLOR_BGR2RGB))
# a.set_title('label')
# plt.axis("off")
# plt.show()
# In[16]:
# predictAndPlot('rsketch/f1-001-01-sz1.jpg','rphoto/f1-001-01.jpg')
# In[23]:
def predictAndPlot2(input_path='sdata',
label_path='pdata',
num_images=1,
trunc=4):
count = 0
m = 200
n = 200
sketch_dim = (m, n)
img_dim = (m, n, 3)
for file in os.listdir(input_path):
print(file)
sketch = cv.imread(str(input_path + '/' + file), 0)
print(sketch.shape)
sketch = imresize(sketch, sketch_dim)
sketch = sketch / 255.
sketch = sketch.reshape(1, m, n, 1)
actual = cv.imread(str(label_path + '/' + file[:-trunc] + '.jpg'))
print(str(label_path + '/' + file[:-trunc]))
actual = imresize(actual, img_dim)
result, _ = model.predict(sketch)
# fig = plt.figure()
# a = fig.add_subplot(1, 3, 1)
# imgplot = plt.imshow(sketch[0].reshape(m,n), cmap='gray')
# a.set_title('Sketch')
# plt.axis("off")
# a = fig.add_subplot(1, 3, 2)
# imgplot = plt.imshow(result[0])
# write_path1 = str('../images/prediction/' + file )
write_path = script_dir + str('/results/' + file)
plt.imsave(write_path, result[0])
# a.set_title('Prediction')
# plt.axis("off")
# a = fig.add_subplot(1, 3, 3)
act2 = cv2.cvtColor(actual, cv2.COLOR_BGR2RGB)
# plt.imsave(write_path, act2)
# plt.imshow(cv2.cvtColor(actual, cv2.COLOR_BGR2RGB))
# a.set_title('label')
# plt.axis("off")
# plt.show()
count += 1
if (count == num_images):
break
# In[25]:
#predictAndPlot2('../sdata', '../qdata',12)
# In[30]:
#predictAndPlot2('../sdata3', '../pdata3',4)
# In[ ]:
# predictAndPlot2('sdata', 'pdata', 10)
predictAndPlot2(sys.argv[1], sys.argv[2], 1)
| StarcoderdataPython |
76716 | <gh_stars>0
import config
config.init()
import argparse
import torch.backends.cudnn as cudnn
from data import *
from metrics import *
from utils import *
cudnn.benchmark = True
parser = argparse.ArgumentParser(description='Config')
for k in config.PARAM:
exec('parser.add_argument(\'--{0}\',default=config.PARAM[\'{0}\'], help=\'\')'.format(k))
args = vars(parser.parse_args())
for k in config.PARAM:
if config.PARAM[k] != args[k]:
exec('config.PARAM[\'{0}\'] = {1}'.format(k, args[k]))
def collate(input):
for k in input:
input[k] = torch.stack(input[k], 0)
return input
if __name__ == '__main__':
dataset = fetch_dataset('WikiText103')
print(len(dataset['train'].data),len(dataset['test'].data))
| StarcoderdataPython |
3211350 | <reponame>patientzero/timage-icann2019<gh_stars>1-10
from .resnet import ResNet152, ResNet50
network_models_classes = {
'resnet152': ResNet152,
'resnet50': ResNet50
}
| StarcoderdataPython |
3218455 | import unittest
from acme import Product
from acme_report import generate_products, adj, noun
class AcmeProductTests(unittest.TestCase):
"""Making sure Acme products are the tops!"""
def test_default_product_price(self):
"""Test default product price being 10."""
prod = Product('Test Product')
self.assertEqual(prod.price, 10)
def test_default_product_weight(self):
'''Test default weight being 20'''
prod = Product('Test Product')
self.assertEqual(prod.weight, 20)
def test_default_product_flammability(self):
'''Test default flammability being 0.5'''
prod = Product('Test Product')
self.assertEqual(prod.flammability, 0.5)
def test_explode(self):
'''Test explosiveness of product'''
prod1 = Product('Test Product', flammability=0.01, weight=5)
self.assertEqual(prod1.explode(), "...fizzle.")
prod2 = Product('Test Product')
self.assertEqual(prod2.explode(), "...boom!")
prod3 = Product('Test Product', weight=100, flammability=.75)
self.assertEqual(prod3.explode(), "...BABOOM!")
class AcmeReportTests(unittest.TestCase):
'''Making sure Acme Reports are accuracte'''
def test_default_num_products(self):
'''Ensure default number is 30'''
prod = generate_products()
self.assertEqual(len(prod), 30)
def test_legal_names(self):
'''Ensure product names are valid'''
prod = generate_products()
for product in prod:
name = product.name.split()
self.assertIn(name[0], adj)
self.assertIn(name[1], noun)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
1745055 | # PROGRAMMING EXERCISE
import csv
import re
def read_source_file():
temperatures = []
# Reading of user input in try-catch block to ensure no exception is thrown in case of wrong path provided by user
try:
# Test file available in data/ folder
# filename = "./data/temperatures.csv"
filename = input("Please put filepath plus name here, in format 'C:\\MyFiles\\temperatures.csv' ")
with open(filename, 'r', encoding="utf8") as f:
csv_data = csv.reader(f)
data_lines = list(csv_data)
for i in range(len(data_lines)):
temperatures.append(data_lines[i][0])
except:
print("Please provide a valid path and filename!")
print("Input temperature values: ")
print(temperatures)
return temperatures
def convert_celsius_values(temp_list):
pattern = r"(-?([0-9]+(\.[0-9][0-9]?)?))C"
converted_temperatures = []
# Check if each individual value matches the regex pattern for Celsius temperature
# If no match - value simply appended to final list
# If match - value is converted from C to F and appended to final list
for item in temp_list:
match = re.search(pattern, item)
if match:
celsius_value = float(match.group(1))
fahrenheit_value = (celsius_value * (9.0/5.0)) + 32
converted_temperatures.append(f"{fahrenheit_value:.2f}F")
else:
converted_temperatures.append(item)
return converted_temperatures
def save_final_file(final_list: list):
# Write to .txt
# with open('./data/outputFahrenheit.txt', mode='w', encoding='utf-8') as output:
# for value in convertedListItems:
# output.write(value + '\n')
# Write to .csv
with open('./data/outputFahrenheit.csv', mode='w', encoding='utf-8', newline='') as output:
csv_writer = csv.writer(output, delimiter=';')
for value in final_list:
# Saving with value casted to string and then called .split() method on it, as otherwise
# csv writer iterates over the string and puts a delimiter after each iterable character (e.g. 3;2;.;0;0;F)
csv_writer.writerow(str(value).split())
tempList = read_source_file()
convertedListItems = convert_celsius_values(tempList)
save_final_file(convertedListItems)
| StarcoderdataPython |
1798044 | """PyVogen命令行接口"""
import json
import vogen
from typing import List
import argparse
Parser=argparse.ArgumentParser
def main():
#显示默认帮助
def pyvogen_default(args):
print("PyVogen命令行工具\n\npm 包管理器\nversion 显示版本信息\n\n可在此找到更多帮助:https://gitee.com/oxygendioxide/vogen")
parser = Parser(prog='pyvogen')
#print(parser)
parser.set_defaults(func=pyvogen_default)
subparsers = parser.add_subparsers(help='sub-command help')
#显示版本信息
def showversion(args):
import sys
import onnxruntime
print("pyvogen version: {}".format(vogen.__version__))
print("onnxruntime version: {}".format(onnxruntime.__version__))
print("python version: {}".format(sys.version))
parser_version=subparsers.add_parser("version",help="显示版本信息")
parser_version.set_defaults(func=showversion)
#包管理器
parser_pm=subparsers.add_parser("pm",help="包管理器")
subparsers_pm=parser_pm.add_subparsers(help='')
#安装
def pm_install(args):
from vogen import pm
install_func=pm.install
if(args.local):
install_func=pm.install_local
elif(args.online):
install_func=pm.install_online
for i in args.name:
install_func(i,force=args.force)
parser_pm_install=subparsers_pm.add_parser("install",help="安装")
parser_pm_install.add_argument('name',type=str,nargs='+')
parser_pm_install.add_argument('-l',"--local",action='store_true',help='从本地包安装')
parser_pm_install.add_argument('-o',"--online",action='store_true',help="下载在线包并安装")
parser_pm_install.add_argument('-F',"--force",action='store_true',help="强制覆盖现有文件")
parser_pm_install.set_defaults(func=pm_install)
#列出已安装音源
def pm_list(args):
from vogen import pm
pkglist=pm.list()
if(args.json):
print(json.dumps([{"name":i} for i in pkglist]))
else:
print("\n".join(pkglist))
parser_pm_list=subparsers_pm.add_parser("list",help="列出已安装音源")
parser_pm_list.set_defaults(func=pm_list)
parser_pm_list.add_argument("-j","--json",action='store_true',help="以json格式输出")
#卸载
def pm_uninstall(args):
from vogen import pm
pm.uninstall(args.id)
parser_pm_uninstall=subparsers_pm.add_parser("uninstall",help="卸载")
parser_pm_uninstall.add_argument("id")
parser_pm_uninstall.set_defaults(func=pm_uninstall)
#设置
def config(args):#输出当前设置
from vogen import config
from tabulate import tabulate
if(args.json):
print(json.dumps(config.config))
else:
print(tabulate(config.config.items(),headers=["Key","Value"]))
parser_config=subparsers.add_parser("config",help="设置")
parser_config.set_defaults(func=config)
parser_config.add_argument("-j","--json",action='store_true',help="以json格式输出")
subparsers_config=parser_config.add_subparsers(help='')
#修改设置
def config_set(args):
from vogen import config
config.set(args.key,args.value)
parser_config_set=subparsers_config.add_parser("set",help="修改设置")
parser_config_set.set_defaults(func=config_set)
parser_config_set.add_argument('key',type=str)
parser_config_set.add_argument('value',type=str)
#合成
def synth(args):
import os
import wavio
from vogen import synth
from vogen.synth import utils
infile=args.infile
if(args.outfile==""):
outfile=infile[:-4]+".wav"
else:
outfile=args.outfile
#如果输出文件已存在
if(os.path.isfile(outfile)):
print(outfile+" 已存在,是否覆盖?\ny:覆盖并合成 n:保留并放弃合成")
instr=input()
while(len(instr)==0 or not(instr[0] in ("y","n","Y","N"))):
print("y:覆盖并合成 n:保留并放弃合成")
instr=input()
if(instr[0] in ("n","N")):
return
wavio.write(outfile,synth.synth(vogen.loadfile(infile,False)),utils.Params.fs)
parser_synth=subparsers.add_parser("synth",help="合成")
parser_synth.set_defaults(func=synth)
parser_synth.add_argument("infile",type=str,help="输入文件")
parser_synth.add_argument("outfile",type=str,nargs='?',default="",help="输出文件")
parser_synth.add_argument("-F,--force",action="store_true",help="强制覆盖现有文件")
args = parser.parse_args()
#print(args)
args.func(args)
if(__name__=="__main__"):
main() | StarcoderdataPython |
146305 | import unittest
from cred import Credential
class TestUser(unittest.TestCase):
'''
Test class that defines tes cases for the Credential class behaviours.
Args:
unittest.TestCase: TestCase class that helps in creating test cases
'''
def setUp(self):
'''
Set up method to run before each test cases.
'''
self.new_cred = Credential("Rehema","0708212463","shalomneema",)
def tearDown(self):
'''
tearDown method that does clean up after each case has run.
'''
Credential.credential_list = []
def test_init(self):
'''
test_init test case to test if the object is initialized properly
'''
self.assertEqual(self.new_cred.username,"Rehema")
self.assertEqual(self.new_cred.phone_number,"0708212463")
self.assertEqual(self.new_cred.password,"<PASSWORD>")
def test_save_credential(self):
'''
test_save_credentials test case to test if the credential
object is saved into the credential list
'''
self.new_cred.save_credential()
self.assertEqual(len(Credential.credential_list),1)
def test_save_multiple_credential(self):
'''
test_save_multiple_credential to check if we we can save
multiple contact
objects to our credential_list
'''
self.new_cred.save_credential()
test_credential= Credential("Rehema","0708212463","shalomneema")
test_credential.save_credential()
self.assertEqual(len(Credential.credential_list),2)
def delete_credential(self):
'''
delete_credential method deletes a saved contact from the contact_list
'''
Credential.credential_list.remove(self)
def test_find_credential_by_username(self):
'''
test to check if we can find a contact by username and
display information
'''
self.new_cred.save_credential()
test_credential =Credential("Rehema","0708212463","shalomneema")
test_credential.save_credential()
found_credential = Credential.find_by_username("Rehema")
self.assertEqual(found_credential.password,test_credential.password)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
3207673 | <gh_stars>10-100
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2018 ASLP@NPU <NAME>
from __future__ import absolute_import
from __future__ import division
from __future__ import absolute_import
import os
import sys
import numpy as np
sys.path.append(os.path.dirname(sys.path[0]) + '/utils')
from sigproc.dsp import stft
EPSILON = np.finfo(np.float32).eps
MAX_FLOAT = np.finfo(np.float32).max
def spectrum(signal,
sample_rate,
frame_length=32,
frame_shift=8,
window_type="hanning",
preemphasis=0.0,
use_log=False,
use_power=False,
square_root_window=False):
"""Compute spectrum magnitude.
Args:
signal: input speech signal
sample_rate: waveform data sample frequency (Hz)
frame_length: frame length in milliseconds
frame_shift: frame shift in milliseconds
window_type: type of window
square_root_window: square root window
"""
feat = stft(signal, sample_rate, frame_length, frame_shift,
window_type, preemphasis, square_root_window)
feat = np.absolute(feat)
if use_power:
feat = np.square(feat)
if use_log:
feat = np.clip(feat, a_min=EPSILON, a_max=MAX_FLOAT)
feat = np.log(feat)
return feat
| StarcoderdataPython |
29760 | from django.conf.urls.defaults import patterns, include, url
from django.contrib import admin
from board.feeds import EventFeed
from board.views import IndexView, ServiceView
admin.autodiscover()
urlpatterns = patterns('',
url(r'^$', IndexView.as_view(), name='index'),
url(r'^services/(?P<slug>[-\w]+)$', ServiceView.as_view(), name='service'),
url(r'^feed$', EventFeed(), name='feed'),
url(r'^admin/', include(admin.site.urls)),
)
| StarcoderdataPython |
3249157 | <reponame>fabriciopashaj/downloader-4anime
import os
from downloader_4anime import Stream, Status
from downloader_4anime.cacher import AnimeDescriptor
local = os.path.expandvars("$HOME/python/downloader-4anime")
stream = Stream(AnimeDescriptor('Naruto-Shippuden', 'v5.4animu.me', 750),
104, 1024 << 4)
print(stream)
stream.on('connect', lambda s: print('Listener 1'))
stream.on('connect', lambda s: print('Listener 2'))
print(stream.proxy)
print(stream.url)
print(stream.download(open('/sdcard/Naruto-Shippuden-104.mp4', 'ab+')))
| StarcoderdataPython |
1678478 | from rdr_service.api import check_ppi_data_api
from rdr_service.code_constants import FIRST_NAME_QUESTION_CODE
from rdr_service.dao.code_dao import CodeDao
from rdr_service.dao.participant_dao import ParticipantDao
from rdr_service.dao.participant_summary_dao import ParticipantSummaryDao
from rdr_service.model.participant import Participant
from rdr_service.model.utils import to_client_participant_id
from tests.test_data import email_code, first_name_code
from tests.helpers.unittest_base import BaseTestCase
class CheckPpiDataApiTest(BaseTestCase):
def setUp(self):
super().setUp()
CodeDao().insert(email_code())
CodeDao().insert(first_name_code())
def test_empty_request(self):
response = self.send_post("CheckPpiData", {"ppi_data": {}})
self.assertEqual({"ppi_results": {}}, response)
def test_result_to_json(self):
result = check_ppi_data_api._ValidationResult()
result.add_error("ez")
result.add_error("ea")
result.tests_count += 11
self.assertEqual({"tests_count": 11, "errors_count": 2, "error_messages": ["ez", "ea"]}, result.to_json())
def test_validation_no_answer(self):
self.participant = Participant(participantId=123, biobankId=555)
ParticipantDao().insert(self.participant)
self.participant_id = to_client_participant_id(self.participant.participantId)
summary = ParticipantSummaryDao().insert(self.participant_summary(self.participant))
result = check_ppi_data_api._get_validation_result(summary.email, {FIRST_NAME_QUESTION_CODE: "NotAnswered"})
self.assertEqual(1, result.tests_count)
self.assertEqual(1, result.errors_count)
self.assertEqual(1, len(result.messages))
self.assertIn(FIRST_NAME_QUESTION_CODE, result.messages[0])
# test using phone number as lookup value in API.
summary.loginPhoneNumber = "5555555555"
ParticipantSummaryDao().update(summary)
result = check_ppi_data_api._get_validation_result(
summary.loginPhoneNumber, {FIRST_NAME_QUESTION_CODE: "NotAnswered"}
)
self.assertEqual(1, result.tests_count)
self.assertEqual(1, result.errors_count)
self.assertEqual(1, len(result.messages))
self.assertIn(FIRST_NAME_QUESTION_CODE, result.messages[0])
| StarcoderdataPython |
4840123 | <gh_stars>0
import tensorflow as tf
from tensorflow.keras import layers
from custom_layers import Focus, Conv, BottleneckCSP, SPP, Bottleneck
from config import Configuration
cfg = Configuration()
class YOLOv5r5Backbone(tf.keras.Model):
def __init__(self, depth, width, **kwargs):
super(YOLOv5r5Backbone, self).__init__(name="Backbone", **kwargs)
self.focus = Focus(int(round(width * 64)),3)
self.conv1 = Conv(int(round(width * 128)), 3, 2)
self.conv2 = Conv(int(round(width * 256)), 3, 2)
self.conv3 = Conv(int(round(width * 512)), 3, 2)
self.conv4 = Conv(int(round(width * 1024)), 3, 2)
self.csp1 = BottleneckCSP(int(round(width * 128)), int(round(depth * 3)))
self.csp2 = BottleneckCSP(int(round(width * 256)), int(round(depth * 9)))
self.csp3 = BottleneckCSP(int(round(width * 512)), int(round(depth * 9)))
self.csp4 = BottleneckCSP(int(round(width * 1024)), int(round(depth * 3)), False)
self.spp = SPP(width * 1024)
def call(self, inputs, training=False):
x = self.focus(inputs)
x = self.conv1(x)
x = self.csp1(x)
x = self.conv2(x)
out_1 = x = self.csp2(x)
x = self.conv3(x)
out_2 = x = self.csp3(x)
x = self.conv4(x)
x = self.spp(x)
out_3 = x = self.csp4(x)
return [out_1,out_2,out_3] # resolution high to low
class YOLOv5r5Head(tf.keras.Model):
def __init__(self, depth, width, **kwargs):
super(YOLOv5r5Head, self).__init__(name="Head", **kwargs)
self.conv1 = Conv(int(round(width * 512)), 1, 1)
self.conv2 = Conv(int(round(width * 256)), 1, 1)
self.conv3 = Conv(int(round(width * 256)), 3, 2)
self.conv4 = Conv(int(round(width * 512)), 3, 2)
self.upsample1 = layers.UpSampling2D()
self.upsample2 = layers.UpSampling2D()
self.concat1 = layers.Concatenate(axis=-1)
self.concat2 = layers.Concatenate(axis=-1)
self.concat3 = layers.Concatenate(axis=-1)
self.concat4 = layers.Concatenate(axis=-1)
self.csp1 = BottleneckCSP(int(round(width * 512)), int(round(depth * 3)), False)
self.csp2 = BottleneckCSP(int(round(width * 256)), int(round(depth * 3)), False)
self.csp3 = BottleneckCSP(int(round(width * 512)), int(round(depth * 3)), False)
self.csp4 = BottleneckCSP(int(round(width * 1024)), int(round(depth * 3)), False)
def call(self, inputs, training=False):
bbf_1, bbf_2, bbf_3 = inputs # backbone features
x1 = x = self.conv1(bbf_3)
x = self.upsample1(x)
x = self.concat1([x,bbf_2])
x = self.csp1(x)
x2 = x = self.conv2(x)
x = self.upsample2(x)
x = self.concat2([x,bbf_1])
out_1 = x = self.csp2(x)
x = self.conv3(x)
x = self.concat3([x,x2])
out_2 = x = self.csp3(x)
x = self.conv4(x)
x = self.concat4([x,x1])
out_3 = x = self.csp4(x)
return [out_1, out_2, out_3] # resolution high to low
class YOLOv5(tf.keras.Model):
def __init__(self, version = 's', training = False, **kwargs):
super(YOLOv5, self).__init__(name="YOLOv5", **kwargs)
self.training = training
depth = cfg.depth[cfg.version.index(version)]
width = cfg.width[cfg.version.index(version)]
self.backbone = YOLOv5r5Backbone(depth, width)
self.head = YOLOv5r5Head(depth, width)
self.convs = [layers.Conv2D(cfg.anchors_per_stride * (cfg.num_classes + 5), 1, name=f'out_{1}',
kernel_initializer=tf.random_normal_initializer(stddev=0.01),
kernel_regularizer=tf.keras.regularizers.L2(5e-4)) for i in range(cfg.num_strides)]
self.reshape_layers = [layers.Lambda(lambda x: tf.reshape(x, (-1, tf.shape(x)[1], tf.shape(x)[2],
cfg.anchors_per_stride, cfg.num_classes + 5))) for i in range(cfg.num_strides)]
# self.box_processing = [layers.Lambda(lambda x: yolo_boxes(x, cfg.anchors[cfg.anchor_masks[i]], cfg.num_classes),
# name=f'yolo_boxes_{i}') for i in range(cfg.num_strides)]
# def build(self, input_shape):
def call(self, image, training=False):
backbone_features = self.backbone(image, training=training)
out_features = self.head(backbone_features)
for i in range(cfg.num_strides):
out_features[i] = self.convs[i](out_features[i])
out_features[i] = self.reshape_layers[i](out_features[i])
return out_features
# model = YOLOv5('s')
# out = model(tf.random.normal((2,640,640,3),dtype=tf.dtypes.float32), training=True)
# print(model.summary())
| StarcoderdataPython |
3301530 | <reponame>closerbibi/faster-rcnn_hha<gh_stars>0
import parseLoss as pl
import pdb
import matplotlib.pyplot as plt
import matplotlib as mt
#mt.use('Agg')
#plt.ioff()
logname = 'train_rankpooling' # remember to change this
path = '/home/closerbibi/bin/faster-rcnn/logfile/%s.log' % logname
loss = pl.loadfile(path)
fig = plt.figure()
plt.bar(range(len(loss)), loss.values(), align='center')
plt.xticks(range(len(loss)), loss.keys())
plt.ylabel('loss')
plt.xlabel('iteration')
fig.savefig('loss_%s.png'% logname)
#fig.savefig('~/Dropbox/schoolprint/lab/meeting/meeting19092016/loss_lrdot05.png')
| StarcoderdataPython |
52189 | <filename>mean_hr_bpm.py
def mean_beats(threshold=0.7, voltage_array=None, time_array=None):
"""returns avg_beats
This function calculates the average heart rate.
The function requires the peakutils package to determine the indexes
of every peak in the voltage array. The second line in the function
calculates the number of peaks in the strip. The third line
determines the total duration of the strip. The fourth line
calculates the average heart rate.
:param threshold: value for peak voltage threshold
:param voltage_array: array of voltage values
:param time_array: array of time values
:type threshold: float
:type voltage_array: ndarray, none
:type time_array: ndarray, none
:return: avg_beats
:rtype: float
"""
import peakutils
import logging
from logging import config
logging.config.fileConfig('logger_config.ini', disable_existing_loggers=False)
indexes = peakutils.indexes((voltage_array), thres=threshold)
number_beats = len(indexes)
duration_beats = time_array[len(time_array) - 1]
avg_beats = (number_beats/duration_beats)*60
logging.info(avg_beats)
return avg_beats
| StarcoderdataPython |
1690912 | <gh_stars>0
from django.contrib.postgres.fields import JSONField
from django.db import models
from django.utils import timezone
from mptt.models import MPTTModel, TreeForeignKey
from waherb.utils import AuditMixin, ActiveMixin, smart_truncate
class Reference(AuditMixin, ActiveMixin):
"""A reference from which taxonomic-relevant information is extracted and used to classify
specimens into taxonomic groups.
May or may not be associated with a Reference object in the NSL database.
"""
title = models.CharField(
max_length=1024, help_text='The full title of the publication in which this reference was published.')
nsl_url = models.URLField(max_length=256, blank=True, null=True)
metadata = JSONField(default=dict, blank=True)
def __str__(self):
return smart_truncate(self.title)
TAXONOMIC_RANK_CHOICES = (
('Class', 'Class'),
('Division', 'Division'),
('Family', 'Family'),
('Form', 'Form'),
('Genus', 'Genus'),
('Kingdom', 'Kingdom'),
('Order', 'Order'),
('Phylum', 'Phylum'),
('Species', 'Species'),
('Subclass', 'Subclass'),
('Subfamily', 'Subfamily'),
('Subspecies', 'Subspecies'),
('Subvariety', 'Subvariety'),
('Unknown', 'Unknown'),
('Variety', 'Variety'),
)
class Name(MPTTModel, AuditMixin):
"""This model represents a name for a taxonomic grouping that has been published in the
scientific literature.
May or may not be associated with a Name object in the NSL database.
NOTE: we can't use ActiveMixin with this model class, as the custom Manager messes with the
TreeManager class that MPTTModel provides.
We just add the effective_to field on the model manually, plus any other methods needed.
"""
name = models.CharField(
max_length=512, unique=True, help_text='A name that has been validly published in a reference.')
rank = models.CharField(
max_length=64, db_index=True, choices=TAXONOMIC_RANK_CHOICES,
help_text='The relative position of a taxon in the taxonomic hierarchy.')
parent = TreeForeignKey('self', on_delete=models.PROTECT, blank=True, null=True, related_name='children')
basionym = models.ForeignKey('self', on_delete=models.PROTECT, blank=True, null=True, related_name='basionym_of')
references = models.ManyToManyField(
Reference, blank=True, help_text='Published references containing an instance of this name.')
nsl_url = models.URLField(max_length=256, blank=True, null=True)
metadata = JSONField(default=dict, blank=True)
effective_to = models.DateTimeField(null=True, blank=True)
def __str__(self):
return self.name
def delete(self, *args, **kwargs):
if 'delete_permanent' in kwargs and kwargs['delete_permanent']:
kwargs.pop('delete_permanent')
super().delete(*args, **kwargs)
else:
self.effective_to = timezone.now()
super().save(*args, **kwargs)
| StarcoderdataPython |
1628462 | from enum import Enum
from types import ModuleType, new_class
from inspect import Signature, Parameter
from .error import UnknownType
from .util import TypeWrapper
class RecordType:
pass
def create_enum_implementation(enum, name_converter):
name = name_converter.enum_name(enum.name)
enum_dict = {name_converter.enum_field_name(value): idx
for idx, value in enumerate(enum.values)}
return Enum(name, enum_dict) # Note: python stdlib Enum!
def create_record_implementation(record, name_converter):
name = name_converter.record_name(record.name)
params = [Parameter(name_converter.parameter_name(name),
Parameter.POSITIONAL_OR_KEYWORD,
annotation=TypeWrapper(tp, name_converter))
for tp, name in record.fields]
params.insert(0, Parameter('self', Parameter.POSITIONAL_ONLY,
annotation=TypeWrapper(record, name_converter)))
__signature__ = Signature(parameters=params)
def __init__(self, *args, **kwargs):
bound_values = __signature__.bind(self, *args, **kwargs)
for key, val in bound_values.arguments.items():
setattr(self, key, val)
__init__.__signature__ = __signature__
return new_class(name, (RecordType,), {},
lambda ns: ns.update(dict(__init__=__init__)))
class EnumRecordImplementation:
def __init__(self, schema, name_converter):
self.name_converter = name_converter
self.types = {}
for enum in schema.enums:
self.types[enum] = create_enum_implementation(enum, name_converter)
for record in schema.records:
self.types[record] = create_record_implementation(record,
name_converter)
self.impl = ModuleType('enum_record_implementation')
for typ, impl in self.types.items():
setattr(self.impl, impl.__name__, impl)
def __call__(self, typ):
if typ not in self.types:
raise UnknownType(typ)
return self.types[typ]
| StarcoderdataPython |
3283378 | <gh_stars>1-10
"""XPath lexing rules.
To understand how this module works, it is valuable to have a strong
understanding of the `ply <http://www.dabeaz.com/ply/>` module.
"""
from __future__ import unicode_literals
operator_names = {
'or': 'OR_OP',
'and': 'AND_OP',
'div': 'DIV_OP',
'mod': 'MOD_OP',
'intersect': 'INTERSECT_OP',
'stream': 'PIPELINE_OP'
}
tokens = [
'PATH_SEP',
'ABBREV_PATH_SEP',
'ABBREV_STEP_SELF',
'ABBREV_STEP_PARENT',
'AXIS_SEP',
'ABBREV_AXIS_AT',
'OPEN_PAREN',
'CLOSE_PAREN',
'OPEN_BRACKET',
'CLOSE_BRACKET',
'UNION_OP',
'EQUAL_OP',
'REL_OP',
'INTERSECT_OP',
'PLUS_OP',
'MINUS_OP',
'MULT_OP',
'STAR_OP',
'COMMA',
'LITERAL',
'FLOAT',
'INTEGER',
'NCNAME',
'NODETYPE',
'FUNCNAME',
'AXISNAME',
'COLON',
'DOLLAR',
] + list(operator_names.values())
t_PATH_SEP = r'/'
t_ABBREV_PATH_SEP = r'//'
t_ABBREV_STEP_SELF = r'\.'
t_ABBREV_STEP_PARENT = r'\.\.'
t_AXIS_SEP = r'::'
t_ABBREV_AXIS_AT = r'@'
t_OPEN_PAREN = r'\('
t_CLOSE_PAREN = r'\)'
t_OPEN_BRACKET = r'\['
t_CLOSE_BRACKET = r'\]'
t_UNION_OP = r'\|'
t_EQUAL_OP = r'!?='
t_REL_OP = r'[<>]=?'
t_PLUS_OP = r'\+'
t_MINUS_OP = r'-'
t_COMMA = r','
t_COLON = r':'
t_DOLLAR = r'\$'
t_STAR_OP = r'\*'
t_ignore = ' \t\r\n'
# NOTE: some versions of python cannot compile regular expressions that
# contain unicode characters above U+FFFF, which are allowable in NCNames.
# These characters can be used in Python 2.6.4, but can NOT be used in 2.6.2
# (status in 2.6.3 is unknown). The code below accounts for that and excludes
# the higher character range if Python can't handle it.
# Monster regex derived from:
# http://www.w3.org/TR/REC-xml/#NT-NameStartChar
# http://www.w3.org/TR/REC-xml/#NT-NameChar
# EXCEPT:
# Technically those productions allow ':'. NCName, on the other hand:
# http://www.w3.org/TR/REC-xml-names/#NT-NCName
# explicitly excludes those names that have ':'. We implement this by
# simply removing ':' from our regexes.
# NameStartChar regex without characters about U+FFFF
NameStartChar = r'[A-Z]|_|[a-z]|\xc0-\xd6]|[\xd8-\xf6]|[\xf8-\u02ff]|' + \
r'[\u0370-\u037d]|[\u037f-\u1fff]|[\u200c-\u200d]|[\u2070-\u218f]|' + \
r'[\u2c00-\u2fef]|[\u3001-\uD7FF]|[\uF900-\uFDCF]|[\uFDF0-\uFFFD]'
# complete NameStartChar regex
Full_NameStartChar = r'(' + NameStartChar + r'|[\U00010000-\U000EFFFF]' + r')'
# additional characters allowed in NCNames after the first character
NameChar_extras = r'[-.0-9\xb7\u0300-\u036f\u203f-\u2040]'
try:
import re
# test whether or not re can compile unicode characters above U+FFFF
re.compile(r'[\U00010000-\U00010001]')
# if that worked, then use the full ncname regex
NameStartChar = Full_NameStartChar
except:
# if compilation failed, leave NameStartChar regex as is, which does not
# include the unicode character ranges above U+FFFF
pass
NCNAME_REGEX = r'(' + NameStartChar + r')(' + \
NameStartChar + r'|' + NameChar_extras + r')*'
NODE_TYPES = set(['comment', 'text', 'processing-instruction', 'node'])
t_NCNAME = NCNAME_REGEX
def t_LITERAL(t):
r""""[^"]*"|'[^']*'"""
t.value = t.value[1:-1]
return t
def t_FLOAT(t):
r'\d+\.\d*|\.\d+'
t.value = float(t.value)
return t
def t_INTEGER(t):
r'\d+'
t.value = int(t.value)
return t
def t_error(t):
raise TypeError("Unknown text '%s'" % (t.value,))
| StarcoderdataPython |
64663 | import math
import torch
import torch.nn as nn
from onmt.utils.misc import aeq
from onmt.utils.loss import LossComputeBase
def collapse_copy_scores(scores, batch, tgt_vocab, src_vocabs,
batch_dim=1, batch_offset=None):
"""
Given scores from an expanded dictionary
corresponeding to a batch, sums together copies,
with a dictionary word when it is ambiguous.
"""
offset = len(tgt_vocab)
for b in range(scores.size(batch_dim)):
blank = []
fill = []
batch_id = batch_offset[b] if batch_offset is not None else b
index = batch.indices.data[batch_id]
src_vocab = src_vocabs[index]
for i in range(1, len(src_vocab)):
sw = src_vocab.itos[i]
ti = tgt_vocab.stoi[sw]
if ti != 0:
blank.append(offset + i)
fill.append(ti)
if blank:
blank = torch.Tensor(blank).type_as(batch.indices.data)
fill = torch.Tensor(fill).type_as(batch.indices.data)
score = scores[:, b] if batch_dim == 1 else scores[b]
score.index_add_(1, fill, score.index_select(1, blank))
score.index_fill_(1, blank, 1e-10)
return scores
class CopyGenerator(nn.Module):
"""An implementation of pointer-generator networks
:cite:`DBLP:journals/corr/SeeLM17`.
These networks consider copying words
directly from the source sequence.
The copy generator is an extended version of the standard
generator that computes three values.
* :math:`p_{softmax}` the standard softmax over `tgt_dict`
* :math:`p(z)` the probability of copying a word from
the source
* :math:`p_{copy}` the probility of copying a particular word.
taken from the attention distribution directly.
The model returns a distribution over the extend dictionary,
computed as
:math:`p(w) = p(z=1) p_{copy}(w) + p(z=0) p_{softmax}(w)`
.. mermaid::
graph BT
A[input]
S[src_map]
B[softmax]
BB[switch]
C[attn]
D[copy]
O[output]
A --> B
A --> BB
S --> D
C --> D
D --> O
B --> O
BB --> O
Args:
input_size (int): size of input representation
output_size (int): size of output vocabulary
pad_idx (int)
"""
def __init__(self, input_size, output_size, pad_idx, conv_first):
super(CopyGenerator, self).__init__()
self.linear = nn.Linear(input_size, output_size)
self.linear_copy = nn.Linear(input_size, 1)
self.pad_idx = pad_idx
self.conv_first = conv_first
if conv_first:
self.conv_transpose = nn.ConvTranspose1d(1, 1, kernel_size=3, stride=3)
self.conv_transpose_pad1 = nn.ConvTranspose1d(1, 1, kernel_size=3, stride=3, output_padding=1)
self.conv_transpose_pad2 = nn.ConvTranspose1d(1, 1, kernel_size=3, stride=3, output_padding=2)
def forward(self, hidden, attn, src_map):
"""
Compute a distribution over the target dictionary
extended by the dynamic dictionary implied by copying
source words.
Args:
hidden (FloatTensor): hidden outputs ``(batch x tlen, input_size)``
attn (FloatTensor): attn for each ``(batch x tlen, input_size)``
src_map (FloatTensor):
A sparse indicator matrix mapping each source word to
its index in the "extended" vocab containing.
``(src_len, batch, extra_words)``
"""
if self.conv_first:
attn = torch.unsqueeze(attn, 1)
original_seq_len = src_map.shape[0]
if original_seq_len % 3 == 0:
attn = self.conv_transpose(attn)
elif original_seq_len % 3 == 1:
attn = self.conv_transpose_pad1(attn)
else:
attn = self.conv_transpose_pad2(attn)
attn = torch.squeeze(attn, 1)
# CHECKS
batch_by_tlen, _ = hidden.size()
batch_by_tlen_, slen = attn.size()
slen_, batch, cvocab = src_map.size()
aeq(batch_by_tlen, batch_by_tlen_)
aeq(slen, slen_)
# Original probabilities.
logits = self.linear(hidden)
logits[:, self.pad_idx] = -float('inf')
prob = torch.softmax(logits, 1)
# Probability of copying p(z=1) batch.
p_copy = torch.sigmoid(self.linear_copy(hidden))
# Probability of not copying: p_{word}(w) * (1 - p(z))
out_prob = torch.mul(prob, 1 - p_copy)
mul_attn = torch.mul(attn, p_copy)
copy_prob = torch.bmm(
mul_attn.view(-1, batch, slen).transpose(0, 1),
src_map.transpose(0, 1)
).transpose(0, 1)
copy_prob = copy_prob.contiguous().view(-1, cvocab)
return torch.cat([out_prob, copy_prob], 1)
class CopyGeneratorLoss(nn.Module):
"""Copy generator criterion."""
def __init__(self, vocab_size, force_copy, unk_index=0,
ignore_index=-100, eps=1e-20):
super(CopyGeneratorLoss, self).__init__()
self.force_copy = force_copy
self.eps = eps
self.vocab_size = vocab_size
self.ignore_index = ignore_index
self.unk_index = unk_index
def forward(self, scores, align, target):
"""
Args:
scores (FloatTensor): ``(batch_size*tgt_len)`` x dynamic vocab size
whose sum along dim 1 is less than or equal to 1, i.e. cols
softmaxed.
align (LongTensor): ``(batch_size x tgt_len)``
target (LongTensor): ``(batch_size x tgt_len)``
"""
# probabilities assigned by the model to the gold targets
vocab_probs = scores.gather(1, target.unsqueeze(1)).squeeze(1)
# probability of tokens copied from source
copy_ix = align.unsqueeze(1) + self.vocab_size
copy_tok_probs = scores.gather(1, copy_ix).squeeze(1)
# Set scores for unk to 0 and add eps
copy_tok_probs[align == self.unk_index] = 0
copy_tok_probs += self.eps # to avoid -inf logs
# find the indices in which you do not use the copy mechanism
non_copy = align == self.unk_index
if not self.force_copy:
non_copy = non_copy | (target != self.unk_index)
probs = torch.where(
non_copy, copy_tok_probs + vocab_probs, copy_tok_probs
)
if math.isnan(probs.log().sum()):
probs = probs - torch.min(probs) + self.eps
loss = -probs.log() # just NLLLoss; can the module be incorporated?
# Drop padding.
loss[target == self.ignore_index] = 0
return loss
class CopyGeneratorLossCompute(LossComputeBase):
"""Copy Generator Loss Computation."""
def __init__(self, criterion, generator, tgt_vocab, normalize_by_length):
super(CopyGeneratorLossCompute, self).__init__(criterion, generator)
self.tgt_vocab = tgt_vocab
self.normalize_by_length = normalize_by_length
def _make_shard_state(self, batch, output, range_, attns):
"""See base class for args description."""
if getattr(batch, "alignment", None) is None:
raise AssertionError("using -copy_attn you need to pass in "
"-dynamic_dict during preprocess stage.")
return {
"output": output,
"target": batch.tgt[range_[0] + 1: range_[1], :, 0],
"copy_attn": attns.get("copy"),
"align": batch.alignment[range_[0] + 1: range_[1]]
}
def _compute_loss(self, batch, output, target, copy_attn, align):
"""Compute the loss.
The args must match :func:`self._make_shard_state()`.
Args:
batch: the current batch.
output: the predict output from the model.
target: the validate target to compare output with.
copy_attn: the copy attention value.
align: the align info.
"""
target = target.view(-1)
align = align.view(-1)
scores = self.generator(
self._bottle(output), self._bottle(copy_attn), batch.src_map
)
loss = self.criterion(scores, align, target)
# print ("loss: {}".format(loss))
# this block does not depend on the loss value computed above
# and is used only for stats
scores_data = collapse_copy_scores(
self._unbottle(scores.clone(), batch.batch_size),
batch, self.tgt_vocab, batch.dataset.src_vocabs)
scores_data = self._bottle(scores_data)
# this block does not depend on the loss value computed above
# and is used only for stats
# Correct target copy token instead of <unk>
# tgt[i] = align[i] + len(tgt_vocab)
# for i such that tgt[i] == 0 and align[i] != 0
target_data = target.clone()
unk = self.criterion.unk_index
correct_mask = (target_data == unk) & (align != unk)
offset_align = align[correct_mask] + len(self.tgt_vocab)
target_data[correct_mask] += offset_align
# Compute sum of perplexities for stats
stats = self._stats(loss.sum().clone(), scores_data, target_data)
# this part looks like it belongs in CopyGeneratorLoss
if self.normalize_by_length:
# Compute Loss as NLL divided by seq length
tgt_lens = batch.tgt[:, :, 0].ne(self.padding_idx).sum(0).float()
# Compute Total Loss per sequence in batch
loss = loss.view(-1, batch.batch_size).sum(0)
# Divide by length of each sequence and sum
loss = torch.div(loss, tgt_lens).sum()
else:
loss = loss.sum()
return loss, stats
| StarcoderdataPython |
3263501 | <reponame>dawidePl/Linux-SysInfo
import psutil
import platform
from datetime import datetime
class SysInfo(object):
def __init__(self):
self.units = ["", "K", "M", "G", "T", "P"]
self.factor = 1024
self.func_dict = {
'system': self.get_system,
'uptime': self.get_uptime,
'cpu': self.get_cpu_data,
'ram': self.get_ram_data,
'disk': self.get_disk_data
}
self.sys_args = ['system', 'uptime', 'cpu', 'ram', 'disk']
def get_size(self, bytes: int, suffix="B") -> str:
for unit in self.units:
if bytes < self.factor:
return f"{bytes:.2f}{unit}{suffix}"
bytes /= self.factor
def percentage(self, part, whole, precision = 1) -> str:
return f"{float(part)/float(whole):.{int(precision)}%}"
def get_system(self) -> str:
distro = platform.linux_distribution()
return f"{platform.system()} {' '.join(distro)}"
def get_uptime(self) -> str:
bt = datetime.fromtimestamp(psutil.boot_time())
return f"{bt.day} days {bt.hour}h {bt.minute}m {bt.second}s"
def get_cpu_data(self) -> str:
usage = f"{psutil.cpu_percent()}%"
frequency = f"{psutil.cpu_freq().current:.2f}Mhz"
return f"CPU usage: {usage}\nCPU Frequency: {frequency}"
def get_ram_data(self) -> str:
ram = psutil.virtual_memory()
total = self.get_size(ram.total)
used = self.get_size(ram.used)
used_percent = self.percentage(ram.used, ram.total)
return f"{used} of {total} ( {used_percent} ) of RAM is used."
def get_disk_data(self, show_partitions : bool = False) -> str:
partitions = psutil.disk_partitions()
if show_partitions:
partition_info = []
for partition in partitions:
try:
partition_usage = psutil.disk_usage(partition.mountpoint)
except PermissionError:
continue
total = self.get_size(partition_usage.total)
used = self.get_size(partition_usage.used)
used_percentage = self.get_size(partition_usage.percent)
partition_info.append(f"{used} of {total} ( {used_percentage} ) of disk space is used.")
return "\n".join(partition_info)
else:
sum_total = 0
sum_used = 0
for partition in partitions:
try:
partition_usage = psutil.disk_usage(partition.mountpoint)
except PermissionError:
continue
sum_total += partition_usage.total
sum_used += partition_usage.used
sum_used_percent = self.percentage(sum_used, sum_total)
sum_total = self.get_size(sum_total)
sum_used = self.get_size(sum_used)
return f"{sum_used} of {sum_total} ( {sum_used_percent} ) of disk space is used."
# ----------------------------------------------------
def data(self) -> str:
system = self.get_system()
uptime = self.get_uptime()
cpu_data = self.get_cpu_data()
ram_data = self.get_ram_data()
disk_data = self.get_disk_data()
return f"{system}\n\nUptime: {uptime}\n\n{cpu_data}\n\nRAM data:\n{ram_data}\n\nDisk data:\n{disk_data}"
def get_data(self, sys_arg: str) -> str:
if sys_arg == "help":
available_args = []
for key in self.func_dict:
available_args.append(key)
return "Available arguments:\n{}".format('\n'.join(available_args))
elif sys_arg in self.sys_args:
return self.func_dict[sys_arg]()
else:
return self.data() | StarcoderdataPython |
3208478 | <gh_stars>1000+
from pythonforandroid.recipes.kivy import KivyRecipe
assert KivyRecipe.depends == ['sdl2', 'pyjnius', 'setuptools', 'python3']
assert KivyRecipe.python_depends == ['certifi']
class KivyRecipePinned(KivyRecipe):
# kivy master 2020-12-10 (2.0.0 plus a few bugfixes)
version = "2debbc3b1484b14824112986cb03b1072a60fbfc"
sha512sum = "6cabb77860e63059ab4b0663b87f6396fa9133839b42db754628fc9a55f10b8d759466110e0763fd8dac40a49a03af276cb93b05076471d12db796e679f33d1d"
# mv "python_depends" into "depends" to ensure we can control what versions get installed
depends = [*KivyRecipe.depends, *KivyRecipe.python_depends]
python_depends = []
recipe = KivyRecipePinned()
| StarcoderdataPython |
3237516 | <reponame>mgthometz/advent-of-code-2021
import sys, collections
from grid import gridsource as grid
from util import findints
Target = collections.namedtuple('Target', 'xmin xmax ymin ymax')
def main():
f = open(sys.argv[1] if len(sys.argv) > 1 else 'in')
target = Target(*findints(f.read()))
result = 0
for xvel in inclusive_range(0, target.xmax):
for yvel in inclusive_range(target.ymin, -target.ymin - 1):
if is_hit((xvel, yvel), target):
result += 1
print(result)
def inclusive_range(lo, hi):
return range(lo, hi + 1)
def is_hit(velocity, target):
for pos in trajectory(velocity, target):
if (
target.xmin <= pos[0] <= target.xmax and
target.ymin <= pos[1] <= target.ymax
):
return True
return False
def trajectory(velocity, target):
pos = (0, 0)
while pos[0] <= target.xmax and pos[1] >= target.ymin:
yield pos
pos = grid.addvec(pos, velocity)
velocity = (
max(0, velocity[0] - 1),
velocity[1] - 1
) | StarcoderdataPython |
3271948 | # raw trade data as returned by ccxt for kraken
kraken_trades = [{
'amount': 0.02,
'datetime': '2017-02-02T18:00:20.000Z',
'fee': {
'cost': 0.05,
'currency': 'EUR'
},
'id': 'ABCDEF-GHIJK-LMNOPQ',
'info': {
'cost': '20.8',
'fee': '0.05',
'id': 'ABCDEF-GHIJK-LMNOPQ',
'margin': '0.00000',
'misc': '',
'ordertxid': 'XXXXXX-XXXXX-XXXXXX',
'ordertype': 'stop market',
'pair': 'XXBTZEUR',
'price': '1000.0',
'time': 1486058420.0,
'type': 'buy',
'vol': '0.02'
},
'order': 'XXXXXX-XXXXX-XXXXXX',
'price': 1000.0,
'side': 'buy',
'symbol': 'BTC/EUR',
'timestamp': 1486058420,
'type': 'stop market'
}, {
'amount': 15.3,
'datetime': '2017-02-02T18:00:21.000Z',
'fee': {
'cost': 0.02,
'currency': 'USD'
},
'id': 'AAAAAA-AAAAA-AAAAAA',
'info': {
'cost': '250.00000',
'fee': '0.02',
'id': 'AAAAAA-AAAAA-AAAAAA',
'margin': '0.00000',
'misc': '',
'ordertxid': 'XXXXXX-XXXXX-XXXXXX',
'ordertype': 'stop market',
'pair': 'XETHZUSD',
'price': '1900.2',
'time': 1486058421.0,
'type': 'sell',
'vol': '15.3'
},
'order': 'XXXXXX-XXXXX-XXXXXX',
'price': 150.1,
'side': 'sell',
'symbol': 'ETH/USD',
'timestamp': 1486058421,
'type': 'stop market'
}]
| StarcoderdataPython |
1646832 | <filename>logger_es_cli/__main__.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# __main__.py
# @Author : <NAME> (<EMAIL>)
# @Link :
from logger_es_cli.cli_driver import app
import sys
def main():
app(prog_name="logger-es-cli")
if __name__ == "__main__":
sys.exit(main())
| StarcoderdataPython |
33239 | #!/usr/bin/env python
# coding: utf-8
# Copyright 2014 The Crashpad Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import re
import subprocess
import sys
def FixUserImplementation(implementation):
"""Rewrites a MIG-generated user implementation (.c) file.
Rewrites the file at |implementation| by adding “__attribute__((unused))” to
the definition of any structure typedefed as “__Reply” by searching for the
pattern unique to those structure definitions. These structures are in fact
unused in the user implementation file, and this will trigger a
-Wunused-local-typedefs warning in gcc unless removed or marked with the
“unused” attribute.
"""
file = open(implementation, 'r+')
contents = file.read()
pattern = re.compile('^(\t} __Reply);$', re.MULTILINE)
contents = pattern.sub(r'\1 __attribute__((unused));', contents)
file.seek(0)
file.truncate()
file.write(contents)
file.close()
def FixServerImplementation(implementation):
"""Rewrites a MIG-generated server implementation (.c) file.
Rewrites the file at |implementation| by replacing “mig_internal” with
“mig_external” on functions that begin with “__MIG_check__”. This makes these
functions available to other callers outside this file from a linkage
perspective. It then returns, as a list of lines, declarations that can be
added to a header file, so that other files that include that header file will
have access to these declarations from a compilation perspective.
"""
file = open(implementation, 'r+')
contents = file.read()
# Find interesting declarations.
declaration_pattern = \
re.compile('^mig_internal (kern_return_t __MIG_check__.*)$',
re.MULTILINE)
declarations = declaration_pattern.findall(contents)
# Remove “__attribute__((__unused__))” from the declarations, and call them
# “mig_external” or “extern” depending on whether “mig_external” is defined.
attribute_pattern = re.compile(r'__attribute__\(\(__unused__\)\) ')
declarations = ['#ifdef mig_external\nmig_external\n#else\nextern\n#endif\n' +
attribute_pattern.sub('', x) +
';\n' for x in declarations]
# Rewrite the declarations in this file as “mig_external”.
contents = declaration_pattern.sub(r'mig_external \1', contents);
# Crashpad never implements the mach_msg_server() MIG callouts. To avoid
# needing to provide stub implementations, set KERN_FAILURE as the RetCode
# and abort().
routine_callout_pattern = re.compile(
r'OutP->RetCode = (([a-zA-Z0-9_]+)\(.+\));')
routine_callouts = routine_callout_pattern.findall(contents)
for routine in routine_callouts:
contents = contents.replace(routine[0], 'KERN_FAILURE; abort()')
# Include the header for abort().
contents = '#include <stdlib.h>\n' + contents
file.seek(0)
file.truncate()
file.write(contents)
file.close()
return declarations
def FixHeader(header, declarations=[]):
"""Rewrites a MIG-generated header (.h) file.
Rewrites the file at |header| by placing it inside an “extern "C"” block, so
that it declares things properly when included by a C++ compilation unit.
|declarations| can be a list of additional declarations to place inside the
“extern "C"” block after the original contents of |header|.
"""
file = open(header, 'r+')
contents = file.read()
declarations_text = ''.join(declarations)
contents = '''\
#ifdef __cplusplus
extern "C" {
#endif
%s
%s
#ifdef __cplusplus
}
#endif
''' % (contents, declarations_text)
file.seek(0)
file.truncate()
file.write(contents)
file.close()
def main(args):
parser = argparse.ArgumentParser()
parser.add_argument('--developer-dir', help='Path to Xcode')
parser.add_argument('--sdk', help='Path to SDK')
parser.add_argument('--include',
default=[],
action='append',
help='Additional include directory')
parser.add_argument('defs')
parser.add_argument('user_c')
parser.add_argument('server_c')
parser.add_argument('user_h')
parser.add_argument('server_h')
parsed = parser.parse_args(args)
command = ['mig',
'-user', parsed.user_c,
'-server', parsed.server_c,
'-header', parsed.user_h,
'-sheader', parsed.server_h,
]
if parsed.developer_dir is not None:
os.environ['DEVELOPER_DIR'] = parsed.developer_dir
if parsed.sdk is not None:
command.extend(['-isysroot', parsed.sdk])
for include in parsed.include:
command.extend(['-I' + include])
command.append(parsed.defs)
subprocess.check_call(command)
FixUserImplementation(parsed.user_c)
server_declarations = FixServerImplementation(parsed.server_c)
FixHeader(parsed.user_h)
FixHeader(parsed.server_h, server_declarations)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| StarcoderdataPython |
1726297 | #!/usr/bin/env python
"""
First run convert_strips.py to convert 16bit pansharpened multispectral to 8bit pansharpened RGB. Then run this script to reproject.
"""
import os
import subprocess
names = [
'01_rio',
'02_vegas',
'03_paris',
'04_shanghai',
'05_khartoum',
'06_atlanta',
'07_moscow',
'08_mumbai',
'09_san',
'10_dar',
'11_rotterdam',
]
epsgs = [
32723,
32611,
32631,
32651,
32636,
32616,
32637,
32643,
32620,
32737,
32631,
]
for aoi in range(1, 1+11):
name = names[aoi-1]
epsg = epsgs[aoi-1]
print('AOI:', aoi)
cmd_string = (
'gdalwarp'
+ ' -t_srs "EPSG:' + str(epsg) + '"'
+ ' -tr .3 .3'
+ ' -r lanczos'
+ ' -srcnodata None -dstnodata None'
+ ' /local_data/geoloc/sat/psrgb/' + name + '.tif'
+ ' /local_data/geoloc/sat/utm/' + name + '.tif'
)
print(cmd_string)
os.system(cmd_string)
#subprocess.check_output(cmd_string, shell=True)
| StarcoderdataPython |
4835358 | <gh_stars>1-10
import sys
if sys.version_info < (3,):
raise RuntimeError("libhxl requires Python 3 or higher")
__version__="0.1"
| StarcoderdataPython |
1667302 | <reponame>gdyp/bert-awesome
# /user/bin/python3.6
# -*-coding: utf-8-*-
from pytorch_pretrained_bert.tokenization import BertTokenizer
from pathlib import Path
import torch
from tqdm import tqdm_notebook as tqdm
import os
from tqdm import tqdm
import sys
import random
import numpy as np
# import apex
from tensorboardX import SummaryWriter
from utils.args import Args
from classification.models import BertForMultiLabelSequenceClassification, CyclicLR
from inputters import MultiLabelTextProcessor, convert_examples_to_features
module_path = os.path.abspath(os.path.join('..'))
if module_path not in sys.path:
sys.path.append(module_path)
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
from pytorch_pretrained_bert.optimization import BertAdam
import logging
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:S',
level=logging.INFO)
logger = logging.getLogger(__name__)
# CUDA_VISIBLE_DEVICES = 1
DATA_PATH = Path('/data/gump/bert_chinese/data/')
DATA_PATH.mkdir(exist_ok=True)
PATH = Path('/data/gump/bert_chinese/')
PATH.mkdir(exist_ok=True)
CLAS_DATA_PATH = PATH / 'class'
CLAS_DATA_PATH.mkdir(exist_ok=True)
OUTPUT_DIR = Path('/data/gump/bert_chinese/model/')
model_state_dict = None
BERT_PRETRAINED_PATH = Path('/data/gump/bert_chinese/chinese_L-12_H-768_A-12/')
PYTORCH_PRETRAINED_BERT_CACHE = BERT_PRETRAINED_PATH / 'cache/'
PYTORCH_PRETRAINED_BERT_CACHE.mkdir(exist_ok=True)
args = Args(full_data_dir=DATA_PATH, data_dir=PATH, bert_model=BERT_PRETRAINED_PATH,
output_dir=OUTPUT_DIR).args
processors = {
"intent_multilabel": MultiLabelTextProcessor
}
if args["local_rank"] == -1 or args["no_cuda"]:
device = torch.device("cuda:0" if torch.cuda.is_available() and not args["no_cuda"] else "cpu")
# n_gpu = torch.cuda.device_count()
n_gpu = 1
else:
torch.cuda.set_device(args['local_rank'])
device = torch.device("cuda", args['local_rank'])
n_gpu = 1
torch.distributed.init_process_group(backend='nccl')
logger.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format(
device, n_gpu, bool(args['local_rank'] != -1), args['fp16']))
args['train_batch_size'] = int(args['train_batch_size'] / args['gradient_accumulation_steps'])
random.seed(args['seed'])
np.random.seed(args['seed'])
torch.manual_seed(args['seed'])
if n_gpu > 0:
torch.cuda.manual_seed_all(args['seed'])
task_name = args['task_name'].lower()
if task_name not in processors:
raise ValueError("Task not found: %s" % (task_name))
processor = processors[task_name](args['data_dir'])
# label_list = processor.get_labels()
label_list = ['alarm', 'bag', 'chat', 'command', 'face', 'greet', 'intelligent_home', 'machine', 'food',
'music', 'news', 'query', 'radio', 'sleep', 'story', 'time', 'volume', 'weather', 'study']
num_labels = len(label_list)
tokenizer = BertTokenizer.from_pretrained(args['bert_model'])
train_examples = None
num_train_steps = None
if args['do_train']:
train_examples = processor.get_train_examples(args['full_data_dir'], size=args['train_size'])
# train_examples = processor.get_train_examples(args['data_dir'], size=args['train_size'])
num_train_steps = int(
len(train_examples) / args['train_batch_size'] / args['gradient_accumulation_steps'] * args['num_train_epochs'])
# pdb.set_trace()
if model_state_dict:
model = BertForMultiLabelSequenceClassification.from_pretrained(
'/data/gump/bert_chinese/chinese_L-12_H-768_A-12', num_labels=num_labels, state_dict=model_state_dict)
else:
model = BertForMultiLabelSequenceClassification.from_pretrained(
'/data/gump/bert_chinese/chinese_L-12_H-768_A-12', num_labels=num_labels)
if args['fp16']:
model.half()
model.to(device)
if args['local_rank'] != -1:
try:
from apex.parallel import DistributedDataParallel as DDP
except ImportError:
raise ImportError(
"Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
model = DDP(model)
elif n_gpu > 1:
model = torch.nn.DataParallel(model)
# Prepare optimizer
model.unfreeze_bert_encoder()
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
t_total = num_train_steps
if args['local_rank'] != -1:
t_total = t_total // torch.distributed.get_world_size()
if args['fp16']:
try:
from apex.optimizers import FP16_Optimizer
from apex.optimizers import FusedAdam
except ImportError:
raise ImportError(
"Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
optimizer = FusedAdam(optimizer_grouped_parameters,
lr=args['learning_rate'],
bias_correction=False,
max_grad_norm=1.0)
if args['loss_scale'] == 0:
optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
else:
optimizer = FP16_Optimizer(optimizer, static_loss_scale=args['loss_scale'])
else:
optimizer = BertAdam(optimizer_grouped_parameters,
lr=args['learning_rate'],
warmup=args['warmup_proportion'],
t_total=t_total)
for state in optimizer.state.values():
for k, v in state.items():
if torch.is_tensor(v): state[k] = v.cuda(device)
scheduler = CyclicLR(optimizer, base_lr=2e-5, max_lr=5e-5, step_size=2500, last_batch_iteration=0)
def accuracy(out, labels):
outputs = np.argmax(out, axis=1)
return np.sum(outputs == labels)
# Eval Fn
eval_examples = processor.get_dev_examples(args['full_data_dir'], size=args['val_size'])
# writer = SummaryWriter()
# eval_examples = processor.get_test_examples('/home/data/peter/intent_classification_code/intent_classification/KFold_data/',
# 'test.csv', size=args['val_size'])
def eval(epoch):
args['output_dir'].mkdir(exist_ok=True)
eval_features = convert_examples_to_features(
eval_examples, label_list, args['max_seq_length'], tokenizer)
logger.info("***** Running evaluation *****")
logger.info(" Num examples = %d", len(eval_examples))
logger.info(" Batch size = %d", args['eval_batch_size'])
all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_ids for f in eval_features], dtype=torch.float)
eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args['eval_batch_size'])
all_logits = None
all_labels = None
model.eval()
eval_loss, eval_accuracy = 0, 0
nb_eval_steps, nb_eval_examples = 0, 0
for input_ids, input_mask, segment_ids, label_ids in eval_dataloader:
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
label_ids = label_ids.to(device)
with torch.no_grad():
tmp_eval_loss = model(input_ids, segment_ids, input_mask, label_ids)
logits = model(input_ids, segment_ids, input_mask)
logits = logits.detach().cpu().numpy()
label_ids = label_ids.view(-1).to('cpu').numpy()
tmp_eval_accuracy = accuracy(logits, label_ids)
# tmp_eval_accuracy = accuracy_thresh(logits, label_ids)
# if all_logits is None:
# all_logits = logits.detach().cpu().numpy()
# else:
# all_logits = np.concatenate((all_logits, logits.detach().cpu().numpy()), axis=0)
# if all_labels is None:
# all_labels = label_ids.detach().cpu().numpy()
# else:
# all_labels = np.concatenate((all_labels, label_ids.detach().cpu().numpy()), axis=0)
eval_loss += tmp_eval_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0)
nb_eval_steps += 1
eval_loss = eval_loss / nb_eval_steps
eval_accuracy = eval_accuracy / nb_eval_examples
# ROC-AUC calcualation
# Compute ROC curve and ROC area for each class
# fpr = dict()
# tpr = dict()
# roc_auc = dict()
# for i in range(num_labels):
# fpr[i], tpr[i], _ = roc_curve(all_labels[:, i], all_logits[:, i])
# roc_auc[i] = auc(fpr[i], tpr[i])
# # Compute micro-average ROC curve and ROC area
# fpr["micro"], tpr["micro"], _ = roc_curve(all_labels.ravel(), all_logits.ravel())
# roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
result = {'eval_loss': eval_loss,
'eval_accuracy': eval_accuracy}
# 'loss': tr_loss/nb_tr_steps}
# 'roc_auc': roc_auc }
output_eval_file = os.path.join(args['output_dir'], "eval_results.txt")
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results *****")
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
# writer.write("%s = %s\n" % (key, str(result[key])))
if epoch>5:
model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self
output_model_file = os.path.join(PYTORCH_PRETRAINED_BERT_CACHE, "finetuned_pytorch_model" + str(epoch) + ".bin")
torch.save(model_to_save.state_dict(), output_model_file)
return result
def warmup_linear(x, warmup=0.002):
if x < warmup:
return x / warmup
return 1.0 - x
train_features = convert_examples_to_features(
train_examples, label_list, args['max_seq_length'], tokenizer)
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_examples))
logger.info(" Batch size = %d", args['train_batch_size'])
logger.info(" Num steps = %d", num_train_steps)
all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_ids for f in train_features], dtype=torch.float)
train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
if args['local_rank'] == -1:
train_sampler = RandomSampler(train_data)
else:
train_sampler = DistributedSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args['train_batch_size'])
def fit(num_epocs=args['num_train_epochs']):
global_step = 0
model.train()
for i_ in tqdm(range(int(num_epocs)), desc="Epoch"):
tr_loss = 0
nb_tr_examples, nb_tr_steps = 0, 0
for step, batch in enumerate(tqdm(train_dataloader, desc='train')):
batch = tuple(t.to(device) for t in batch)
input_ids, input_mask, segment_ids, label_ids = batch
loss = model(input_ids, segment_ids, input_mask, label_ids)
if n_gpu > 1:
loss = loss.mean()
if args['gradient_accumulation_steps'] > 1:
loss = loss / args['gradient_accumulation_steps']
if args['fp16']:
optimizer.backward(loss)
else:
loss.backward()
tr_loss += loss.item()
nb_tr_examples += input_ids.size(0)
nb_tr_steps += 1
if (step + 1) % args['gradient_accumulation_steps'] == 0:
lr_this_step = args['learning_rate'] * warmup_linear(global_step / t_total, args['warmup_proportion'])
for param_group in optimizer.param_groups:
param_group['lr'] = lr_this_step
optimizer.step()
optimizer.zero_grad()
global_step += 1
logger.info('Loss after epoc {}'.format(tr_loss / nb_tr_steps))
logger.info('Eval after epoc {}'.format(i_ + 1))
if i_ > 5:
model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self
output_model_file = os.path.join(PYTORCH_PRETRAINED_BERT_CACHE,
"finetuned_pytorch_model" + str(i_) + ".bin")
torch.save(model_to_save.state_dict(), output_model_file)
# result = eval(epoch=i_)
writer.add_scalar('scalar/loss', tr_loss / nb_tr_steps, i_)
# writer.add_scalar('scalar/loss', result['eval_loss'], i_)
# writer.add_scalar('scalar/acc', result['eval_accuracy'], i_)
# model.unfreeze_bert_encoder()
if __name__ == '__main__':
writer = SummaryWriter()
fit()
writer.close()
# model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self
# output_model_file = os.path.join(PYTORCH_PRETRAINED_BERT_CACHE, "finetuned_pytorch_model.bin")
# torch.save(model_to_save.state_dict(), output_model_file)
| StarcoderdataPython |
171723 | <filename>IL_method/prototype.py
from torch.nn import functional as F
import torch
import torch.nn as nn
import os
import pickle
from torch.utils.data.dataloader import DataLoader
from torchvision import transforms
# my package
from retinanet.losses import calc_iou
from retinanet.dataloader import IL_dataset, Resizer, Augmenter, Normalizer, collater, AspectRatioBasedSampler
from preprocessing.params import create_dir
class ProtoTyper(object):
def __init__(self, il_trainer, thresold = 0.5):
self.il_trainer = il_trainer
self.thresold = thresold
self.num_anchors = self.il_trainer.model.classificationModel.num_anchors
self.prototype_features = None
def _get_positive(self, anchors, annotations):
batch_size = annotations.shape[0]
positive_indices = []
targets = []
for j in range(batch_size):
bbox_annotation = annotations[j, :, :]
bbox_annotation = bbox_annotation[bbox_annotation[:, 4] != -1]
IoU = calc_iou(anchors[0, :, :], bbox_annotation[:, :4]) # shape=(num_anchors, num_annotations)
IoU_max, IoU_argmax = torch.max(IoU, dim=1) # shape=(num_anchors x 1)
pos_indices = torch.ge(IoU_max, self.thresold).view(-1, self.num_anchors)
target = bbox_annotation[IoU_argmax, 4].view(-1, self.num_anchors)
#store the postive anchor and its class
positive_indices.append(pos_indices.unsqueeze(dim=0))
targets.append(target.long().unsqueeze(dim=0))
positive_indices = torch.cat(positive_indices)
targets = torch.cat(targets)
return positive_indices, targets
def _cal_features(self, feature_temp_path:str, state:int):
"""calculate the features for each image and store in feature_temp_path
Args:
feature_temp_path(str): the path for store the features for each image
"""
dataset = IL_dataset(self.il_trainer.params,
transform=transforms.Compose([Normalizer(), Resizer()]),
start_state=state)
# create the dataloader for cal the features
sampler = AspectRatioBasedSampler(dataset, batch_size = self.il_trainer.params['batch_size'], drop_last=False, shuffle=False)
dataloader = DataLoader(dataset, num_workers=2, collate_fn=collater, batch_sampler=sampler)
model = self.il_trainer.model
num_classes = model.classificationModel.num_classes
batch_size = self.il_trainer.params['batch_size']
for idx, data in enumerate(dataloader):
with torch.no_grad():
img_batch = data['img'].float().cuda()
annot = data['annot'].cuda()
# get features from the classification head
features, anchors = model.get_classification_feature(img_batch)
positive_indices, targets = self._get_positive(anchors, annot)
for batch_id in range(batch_size):
# init data for each img
count = torch.zeros(num_classes, self.num_anchors, 1).cuda()
prototype_features = torch.zeros(num_classes, self.num_anchors, 256 * self.num_anchors).cuda()
iter_num = idx * batch_size + batch_id
# get each img's data in minibatch
feature = features[batch_id,...]
pos = positive_indices[batch_id,...]
target = targets[batch_id,...]
# get the positive anchor
mask = pos.any(dim=1)
feature = feature[mask,:]
pos = pos[mask,:]
target = target[mask,:]
# accumulate the features and num anchors
for i in range(feature.shape[0]):
count[target[i][pos[i]], pos[i],:] += 1
prototype_features[target[i][pos[i]], pos[i],:] += feature[i]
# store the features and positive anchors's number
with open(os.path.join(feature_temp_path, 'f_{}.pickle'.format(iter_num)), 'wb') as f:
pickle.dump((prototype_features.cpu(), count.cpu()), f)
del dataloader
del dataset
def init_prototype(self, state:int):
num_classes = self.il_trainer.model.classificationModel.num_classes
feature_temp_path = os.path.join(self.il_trainer.params['ckp_path'], 'state{}'.format(state), 'features')
create_dir(feature_temp_path)
path = os.path.join(self.il_trainer.params['ckp_path'], 'state{}'.format(state))
file_name = "prototype_features.pickle"
if os.path.isfile(os.path.join(path, file_name)):
# load the prototype
with open(os.path.join(path, file_name), "rb") as f:
self.prototype_features = pickle.load(f)
else:
# calculate the features for all training data
self._cal_features(feature_temp_path, state)
# use the temp file for features to calculate the prototype
count = torch.zeros(num_classes, self.num_anchors, 1)
self.prototype_features = torch.zeros(num_classes, self.num_anchors, 256 * self.num_anchors) #TODO 256 auto get
num_files = len(os.listdir(feature_temp_path))
for i in range(num_files):
with open(os.path.join(feature_temp_path,'f_{}.pickle'.format(i)), 'rb') as f:
_ , num = pickle.load(f)
count += num
for i in range(num_files):
with open(os.path.join(feature_temp_path,'f_{}.pickle'.format(i)), 'rb') as f:
feat, _ = pickle.load(f)
self.prototype_features += (feat / torch.clamp(count, min=1))
# store the prototype
with open(os.path.join(path, file_name), "wb") as f:
pickle.dump(self.prototype_features, f)
def cal_examplar(self, state:int):
def distance_fun(a, b):
return torch.norm(a - b, dim=3)
file_path = os.path.join(self.il_trainer.params['ckp_path'], 'state{}'.format(state))
file_name = "classification_herd_samples.pickle"
# if temp file exists, then return
if os.path.isfile(os.path.join(file_path, file_name)):
return
feature_temp_path = os.path.join(self.il_trainer.params['ckp_path'], 'state{}'.format(state), 'features')
create_dir(feature_temp_path)
num_classes = len(self.il_trainer.params.states[state]['knowing_class']['id'])
num_new_classes = len(self.il_trainer.params.states[state]['new_class']['id'])
if num_classes != self.il_trainer.model.classificationModel.num_classes:
raise ValueError("Current model has {} classes, but state file has {} classes.".format(self.il_trainer.model.classificationModel.num_classes, num_classes))
# get the number of the feature files
num_files = len(os.listdir(feature_temp_path))
# if the feature temp file not exits, then calculate features
if num_files == 0 or self.prototype_features == None:
self.init_prototype(state)
if num_files == 0:
num_files = len(os.listdir(feature_temp_path))
if num_files == 0:
raise ValueError("Unknowing Error in cal_examplar")
feats = []
count = torch.zeros(num_classes, self.num_anchors, 1)
for i in range(num_files):
with open(os.path.join(feature_temp_path,'f_{}.pickle'.format(i)), 'rb') as f:
feat, num = pickle.load(f)
feats.append((feat / torch.clamp(num, min=1)).unsqueeze(dim=0))
count += num
feats = torch.cat(feats)
has_target_mask = ~(torch.sum(feats, dim=3) == 0)
distance_target = torch.zeros_like(has_target_mask).long()
distance_target[has_target_mask] = 1
distance = distance_fun(feats, self.prototype_features.unsqueeze(dim=0))
distance *= distance_target
dataset = IL_dataset(self.il_trainer.params,
transform=transforms.Compose([Normalizer(), Resizer()]),
start_state=state)
sampler = AspectRatioBasedSampler(dataset, batch_size = self.il_trainer.params['batch_size'], drop_last=False, shuffle=False)
# mapping index to real image id
img_ids = []
for group in sampler.groups:
img_ids.extend(group)
for i in range(len(img_ids)):
img_ids[i] = dataset.image_ids[img_ids[i]]
img_ids = torch.tensor(img_ids)
sample_file = dict()
for class_id in range(num_classes - num_new_classes, num_classes):
coco_id = dataset.label_to_coco_label(class_id)
sample_file[coco_id] = dict()
for anchor_id in range(self.num_anchors):
cur_distance = distance[:,class_id, anchor_id]
nonzero_ids = cur_distance.nonzero().squeeze()
sorted_ids = nonzero_ids[cur_distance[nonzero_ids].argsort()] # nonzero_ids.gather(0, cur_distance[nonzero_ids].argsort())
sorted_ids = img_ids[sorted_ids]
sample_file[coco_id][anchor_id] = sorted_ids.tolist()
with open(os.path.join(file_path, file_name),'wb') as f:
pickle.dump((sample_file, count), f)
def __del__(self):
if self.prototype_features != None:
del self.prototype_features | StarcoderdataPython |
96886 | <reponame>zh012/flask-dropin<gh_stars>10-100
#!/usr/bin/env python
from setuptools import setup
options = dict(
name='Flask-DropIn',
version='0.0.1',
description='Flask-DropIn let you easily organize large flask project.',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/zh012/flask-dropin.git',
packages=['flask_dropin'],
license='MIT',
zip_safe=False,
platforms='any',
install_requires=[
'Flask',
'click',
'six',
],
tests_require=[
'pytest>=2.7.1',
'pytest-cov>=2.2.0',
'tox',
],
entry_points={
'console_scripts': []
},
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
],
)
setup(**options)
| StarcoderdataPython |
187854 | <reponame>Monia234/NCI-GwasQc
from pathlib import Path
from typing import Optional
from pydantic import BaseModel, Field
class ReferenceFiles(BaseModel):
"""A list of reference files used by the pipeline.
.. code-block:: yaml
reference_files:
illumina_manifest_file: /path/to/bpm/file/GSAMD-24v1-0_20011747_A1.bpm
thousand_genome_vcf: /path/to/thousand/genome/ALL.wgs.phase3_shapeit2_mvncall_integrated_v5.20130502.sites.vcf.gz
thousand_genome_tbi: /path/to/thousand/genome/ALL.wgs.phase3_shapeit2_mvncall_integrated_v5.20130502.sites.vcf.gz.tbi
"""
illumina_manifest_file: Path = Field(..., description="Path to the Illumina provided BPM file.")
illumina_cluster_file: Optional[Path] = Field(
None, description="Path to the array cluster EGT file."
)
thousand_genome_vcf: Path = Field(..., description="Path to the 1000 Genomes VCF file.")
thousand_genome_tbi: Path = Field(
..., description="Path to the corresponding index for the 1000 Genomes VCF file."
)
@staticmethod
def schema_rst():
"""Tweak schema for rendering in Sphinx."""
import copy
import json
content = copy.deepcopy(ReferenceFiles.schema())
content["title"] = "Reference Files"
return json.dumps(content, indent=2)
| StarcoderdataPython |
1767588 | import sys
import json
from sklearn import preprocessing
from sklearn import feature_extraction
from iologreg import IOLogisticRegression
features = []
labels = {}
invlabels = {}
# read labels and associated features
for line in open(sys.argv[1]):
(label, f) = line.strip().split('\t')
invlabels[len(labels)] = label
labels[label] = len(labels)
features.append(json.loads(f))
label_dict = feature_extraction.DictVectorizer()
label_features = label_dict.fit_transform(features).toarray()
sys.stderr.write(' LABELS: %s\n' % ' '.join(labels.keys()))
sys.stderr.write('LABEL-FEATURES: %s\n' % ' '.join(label_dict.get_feature_names()))
out_dim = len(label_dict.get_feature_names())
ids = {}
X = []
N = []
# read training instances and neighborhoods
for line in open(sys.argv[2]):
(id, xfeats, n) = line.strip().split('\t')
ids[id] = len(ids)
X.append(json.loads(xfeats))
neighborhood = json.loads(n)['N']
if len(neighborhood) == 0:
sys.stderr.write('[ERROR] empty neighborhood in line:\n%s' % line)
sys.exit(1)
if len(neighborhood) == 1:
sys.stderr.write('[WARNING] neighborhood for id="%s" is singleton: %s\n' % (id, str(neighborhood)))
n = [labels[x] for x in neighborhood]
N.append(n)
X_dict = feature_extraction.DictVectorizer()
X = X_dict.fit_transform(X).toarray()
sys.stderr.write(' rows(X): %d\n' % len(X))
sys.stderr.write('INPUT-FEATURES: %s\n' % ' '.join(X_dict.get_feature_names()))
in_dim = len(X_dict.get_feature_names())
# read gold labels
Y = [0 for x in xrange(len(X))]
for line in open(sys.argv[3]):
(id, y) = line.strip().split('\t')
Y[ids[id]] = labels[y]
assert len(X) == len(N)
assert len(Y) == len(X)
model = IOLogisticRegression()
model.fit(in_dim, out_dim, X, N, Y, label_features, len(labels), iterations = 1000, minibatch_size=10)
D = model.predict_proba(X, N)
for row in D:
dist = {}
for i in range(len(row)):
if row[i] > 0.0: dist[invlabels[i]] = row[i]
print dist
| StarcoderdataPython |
3272036 | <filename>paginas/urls.py
from django.urls import path
from . import views
urlpatterns = [
path('pagina/<str:slug>', views.pagina, name="pagina"),
]
| StarcoderdataPython |
3306179 | <reponame>paulorssalves/coletivo-rexiste
from django.apps import AppConfig
class RexisteConfig(AppConfig):
name = 'rexiste'
| StarcoderdataPython |
3223152 | #!/usr/bin/env python
"""
CREATED AT: 2022/1/4
Des:
https://leetcode.com/problems/complement-of-base-10-integer/
GITHUB: https://github.com/Jiezhi/myleetcode
Difficulty: Easy
Tag: Bit
See:
Time Spent: min
"""
class Solution:
def bitwiseComplement(self, n: int) -> int:
"""
Runtime: 43 ms, faster than 9.05%
Memory Usage: 14.3 MB, less than 38.43%
0 <= n < 10^9
:param n:
:return:
"""
ret = []
n_str = format(n, 'b')
for c in n_str:
if c == '1':
ret.append('0')
else:
ret.append('1')
return int(''.join(ret), 2)
def test():
assert Solution().bitwiseComplement(n=5) == 2
assert Solution().bitwiseComplement(n=7) == 0
assert Solution().bitwiseComplement(n=10) == 5
if __name__ == '__main__':
test()
| StarcoderdataPython |
1637264 | <filename>sources/genericStatusPage.py
import requests
from utils import getName, getGlobalStatus
def getMetrics(url: str) -> dict:
data = requests.get(url).json()
metrics = {}
base = getName(data["page"]["name"]) + "_"
for c in data["components"]:
metrics[base + getName(c["name"])] = getStatus(c["status"])
metrics[base + "global"] = getGlobalStatus(metrics)
return metrics
def getStatus(txt: str) -> int:
if txt == "partial_outage":
return 1
elif txt == "operational":
return 0
else:
return 2
| StarcoderdataPython |
4839660 | #!/usr/bin/env python
"""
Refactoring tests work a little bit similar to integration tests. But the idea
is here to compare two versions of code. If you want to add a new test case,
just look at the existing ones in the ``test/refactor`` folder and copy them.
"""
import os
import platform
import re
from parso import split_lines
from functools import reduce
import jedi
from .helpers import test_dir
class RefactoringCase(object):
def __init__(self, name, code, line_nr, index, path, kwargs, type_, desired_result):
self.name = name
self._code = code
self._line_nr = line_nr
self._index = index
self._path = path
self._kwargs = kwargs
self.type = type_
self._desired_result = desired_result
def get_desired_result(self):
if platform.system().lower() == 'windows' and self.type == 'diff':
# Windows uses backslashes to separate paths.
lines = split_lines(self._desired_result, keepends=True)
for i, line in enumerate(lines):
if re.search(' import_tree/', line):
lines[i] = line.replace('/', '\\')
return ''.join(lines)
return self._desired_result
@property
def refactor_type(self):
f_name = os.path.basename(self._path)
return f_name.replace('.py', '')
def refactor(self, environment):
project = jedi.Project(os.path.join(test_dir, 'refactor'))
script = jedi.Script(self._code, path=self._path, project=project, environment=environment)
refactor_func = getattr(script, self.refactor_type)
return refactor_func(self._line_nr, self._index, **self._kwargs)
def __repr__(self):
return '<%s: %s:%s>' % (self.__class__.__name__,
self.name, self._line_nr - 1)
def _collect_file_tests(code, path, lines_to_execute):
r = r'^# -{5,} ?([^\n]*)\n((?:(?!\n# \+{5,}).)*\n)' \
r'# \+{5,}\n((?:(?!\n# -{5,}).)*\n)'
match = None
for match in re.finditer(r, code, re.DOTALL | re.MULTILINE):
name = match.group(1).strip()
first = match.group(2)
second = match.group(3)
# get the line with the position of the operation
p = re.match(r'((?:(?!#\?).)*)#\? (\d*)( error| text|) ?([^\n]*)', first, re.DOTALL)
if p is None:
raise Exception("Please add a test start.")
continue
until = p.group(1)
index = int(p.group(2))
type_ = p.group(3).strip() or 'diff'
if p.group(4):
kwargs = eval(p.group(4))
else:
kwargs = {}
line_nr = until.count('\n') + 2
if lines_to_execute and line_nr - 1 not in lines_to_execute:
continue
yield RefactoringCase(name, first, line_nr, index, path, kwargs, type_, second)
if match is None:
raise Exception("Didn't match any test")
if match.end() != len(code):
raise Exception("Didn't match until the end of the file in %s" % path)
def collect_dir_tests(base_dir, test_files):
for f_name in os.listdir(base_dir):
files_to_execute = [a for a in test_files.items() if a[0] in f_name]
lines_to_execute = reduce(lambda x, y: x + y[1], files_to_execute, [])
if f_name.endswith(".py") and (not test_files or files_to_execute):
path = os.path.join(base_dir, f_name)
with open(path, newline='') as f:
code = f.read()
for case in _collect_file_tests(code, path, lines_to_execute):
yield case
| StarcoderdataPython |
172435 | """Test ThroughputReporter"""
import pytest
@pytest.fixture
def klass():
"""Return CUT."""
from agile_analytics import ThroughputReporter
return ThroughputReporter
def test_title(klass):
"""Ensure the title gets set."""
r = klass(
title="Weekly Throughput"
)
assert r.title == "Weekly Throughput"
def test_period(klass):
"""Ensure the period can be set."""
r = klass(title="Weekly Throughput")
r.period = "weekly"
assert r.period == "weekly"
def test_date_assignment(klass, days_ago):
"""Ensure the range can be set."""
r = klass(title="Weekly Throughput")
r.start_date = days_ago(30)
r.end_date = days_ago(0)
assert r.start_date == days_ago(30)
assert r.end_date == days_ago(0)
def test_date_range_reconcile(klass, datetime, tzutc):
"""Ensure the right dates are set when passed two dates and a weekly period arg."""
r = klass(title="Weekly Throughput")
r.period = "weekly"
r.start_date = datetime(2016, 5, 21, 0, 0, 0)
r.end_date = datetime(2016, 6, 21, 11, 59, 59)
assert r.start_date == datetime(2016, 5, 15, 0, 0, 0, tzinfo=tzutc) # Sunday
assert r.end_date == datetime(2016, 6, 25, 11, 59, 59, tzinfo=tzutc) # Saturday
def test_date_reconcile_post_hoc(klass, datetime, tzutc):
"""When you set the period after the dates, the dates should be adjusted."""
r = klass(title="Weekly Throughput")
r.start_date = datetime(2016, 5, 21, 0, 0, 0)
r.end_date = datetime(2016, 6, 21, 11, 59, 59)
r.period = "weekly"
assert r.start_date == datetime(2016, 5, 15, 0, 0, 0, tzinfo=tzutc) # Sunday
assert r.end_date == datetime(2016, 6, 25, 11, 59, 59, tzinfo=tzutc) # Saturday
def test_report_summary(klass, datetime, tzutc):
"""report_on returns an object with metadata about the report."""
r = klass(
title="Weekly Throughput",
start_date=datetime(2016, 5, 15, 0, 0, 0),
end_date=datetime(2016, 6, 25, 11, 59, 59),
period="weekly",
)
expected = dict(
title="Weekly Throughput",
start_date=datetime(2016, 5, 15, 0, 0, 0, tzinfo=tzutc),
end_date=datetime(2016, 6, 25, 11, 59, 59, tzinfo=tzutc),
period="weekly",
)
report = r.report_on([])
assert report.summary == expected
def test_report_summary_table(klass, datetime, date, AnalyzedAgileTicket, tzutc):
"""report_on returns an object with metadata about the report."""
r = klass(
title="Weekly Throughput",
start_date=datetime(2016, 5, 15, 0, 0, 0),
end_date=datetime(2016, 6, 25, 11, 59, 59),
period="weekly",
)
analyzed_issues = [
AnalyzedAgileTicket("KEY-1", {}, {}, dict(state="FOO", entered_at=datetime(2016, 5, 16, 0, 0, 0, tzinfo=tzutc))),
AnalyzedAgileTicket("KEY-2", {}, {}, dict(state="FOO", entered_at=datetime(2016, 5, 17, 0, 0, 0, tzinfo=tzutc))),
AnalyzedAgileTicket("KEY-3", {}, {}, dict(state="FOO", entered_at=datetime(2016, 5, 17, 0, 0, 0, tzinfo=tzutc))),
AnalyzedAgileTicket("KEY-4", {}, {}, dict(state="FOO", entered_at=datetime(2016, 5, 20, 0, 0, 0, tzinfo=tzutc))),
AnalyzedAgileTicket("KEY-5", {}, {}, dict(state="FOO", entered_at=datetime(2016, 6, 8, 0, 0, 0, tzinfo=tzutc))),
AnalyzedAgileTicket("KEY-6", {}, {}, dict(state="FOO", entered_at=datetime(2016, 6, 8, 0, 0, 0, tzinfo=tzutc))),
AnalyzedAgileTicket("KEY-7", {}, {}, dict(state="FOO", entered_at=datetime(2016, 6, 8, 0, 0, 0, tzinfo=tzutc))),
AnalyzedAgileTicket("KEY-8", {}, {}, dict(state="FOO", entered_at=datetime(2016, 6, 8, 0, 0, 0, tzinfo=tzutc))),
AnalyzedAgileTicket("KEY-7", {}, dict(state="FOO", entered_at=datetime(2016, 6, 8, 0, 0, 0, tzinfo=tzutc)), {}), # Started, but not finished this week
]
expected = [
["Week", "Completed"],
[date(2016, 5, 15), 4],
[date(2016, 5, 22), 0],
[date(2016, 5, 29), 0],
[date(2016, 6, 5), 4],
[date(2016, 6, 12), 0],
[date(2016, 6, 19), 0],
]
report = r.report_on(analyzed_issues)
assert report.table[0] == expected[0]
assert len(report.table) == len(expected)
for i in range(0, len(expected)):
expected_row = expected[i]
actual_row = report.table[i]
assert expected_row[0] == actual_row[0]
assert expected_row[1] == actual_row[1]
| StarcoderdataPython |
48650 | import typing as t
import numpy as np
import pandas as pd
from house_prices_regression_model import __version__ as VERSION
from house_prices_regression_model.processing.data_manager import load_pipeline
from house_prices_regression_model.config.core import load_config_file, SETTINGS_PATH
from house_prices_regression_model.processing.data_validation import validate_inputs
# Config files
config = load_config_file(SETTINGS_PATH)
PIPELINE_ARTIFACT_NAME = config["PIPELINE_ARTIFACT_NAME"]
pipeline_file_name = f"{PIPELINE_ARTIFACT_NAME}_v{VERSION}.pkl"
cb_pipe = load_pipeline(file_name=pipeline_file_name)
#Function
def make_prediction(*,input_data: t.Union[pd.DataFrame, dict],) -> list:
"""Make a prediction using a saved model pipeline."""
df = pd.DataFrame(input_data)
validated_df, error_dict = validate_inputs(input_data=df)
errors_list = list(error_dict.values())
results = {'model_output': None}
if error_dict == {}:
log_predictions = cb_pipe.predict(validated_df)
predictions = [np.exp(pred) for pred in log_predictions]
results['model_output'] = predictions
else:
results['model_output'] = 'Errors making prediction:' + ' '.join(map(str, errors_list))
return results
| StarcoderdataPython |
3327215 | import networkx as nx
import networkx.convert as convert
# Class "GraphQW" is a NetworkX graph with generated individual attributes
class GraphQW(nx.DiGraph):
def __init__(self, g = None, q = None, dq = None, limcoal = None, dw = None):
self.graph = {}
self._node = self.node_dict_factory()
self._adj = self.adjlist_outer_dict_factory() # empty adjacency dict
self._pred = self.adjlist_outer_dict_factory() # predecessor
self._succ = self._adj # successor
if g is not None:
self.graph.update(g.graph)
self._node.update((n, dd.copy()) for n, dd in g.nodes.items())
self._adj.update(g._adj)
if not nx.is_directed(g):
g = nx.DiGraph(g)
self._pred.update(g._pred)
self._succ.update(g._succ)
nx.set_node_attributes(self, name='indeg', values=dict(self.in_degree(weight='weight')))
self.set_quota(q, dq)
self.set_size(dw)
self.coal = limcoal
for edge in self.edges(data = True):
if 'weight' not in edge[2]:
self.set_edge_attr('weight', 1)
break
def set_edge_attr(self, name, value):
edges = self.edges()
edge_list = dict(zip(edges, [value] * len(edges)))
nx.set_edge_attributes(self, edge_list, name)
def set_size(self, dw):
if dw is None:
pp = self.out_degree(weight='weight')
nx.set_node_attributes(self, name='size', values=dict(self.out_degree(weight='weight')))
else:
self.set_param('size', dw)
def set_quota(self, q, dq):
if dq is None:
ql = dict()
d = self.in_degree(weight='weight')
for x, y in d:
ql[x] = y * q / 100
nx.set_node_attributes(self, name='q', values=ql)
else:
self.set_param('q', dq)
def set_param(self, name, data):
if data is not None:
if isinstance(data, dict):
nx.set_node_attributes(self, name=name, values=data)
elif isinstance(data, list):
if len(self.nodes()) == len(data):
nx.set_node_attributes(self, name=name, values=dict(zip(self.nodes(), data)))
elif isinstance(data, float) or isinstance(data, int):
nx.set_node_attributes(self, name=name, values=dict(zip(self.nodes(), [data] * len(self.nodes()))))
def aggregate(self, name=''):
pers = dict(zip(self.nodes(), [0] * len(self)))
for edge in self.edges(data=True):
pers[edge[0]] += self.nodes[edge[1]]['size']*edge[2]['weight']
pers = self.normalize(pers)
self.set_param(name, pers)
return pers
@staticmethod
def normalize(arr):
s = sum(arr.values())
if s != 0:
for el in arr:
arr[el] /= s
return arr
def write_centrality(self, filename, separator=';', mode='a+', additional_attr=[], additional_headers=[]):
"""
Write SRIC/LRIC centrality to file
:param filename: filename
:param separator: line separator
:param mode: mode while opening a file. If not provided, it defaults to 'a' (append)
:param additional_attr: list of additional parameters which will be appended to each line
:param additional_headers: headers of additional parameters
:return: None
"""
self.write(self.nodes(data=True), filename, separator, mode, 1, additional_attr, additional_headers)
def write_edgelist(self, filename, separator=';', mode='a+', additional_attr=[], additional_headers=[]):
"""
Write edges from SRIC/LRIC graph to file
:param filename: filename
:param separator: line separator
:param mode: mode while opening a file. If not provided, it defaults to 'a' (append)
:param additional_attr: list of additional parameters which will be appended to each line
:param additional_headers: headers of additional parameters
:return:
"""
self.write(self.edges(data=True), filename, separator, mode, 0, additional_attr, additional_headers)
@staticmethod
def write(row_dict, filename, separator=';', mode='a+', type_header=0, additional_attr=[], additional_headers=[]):
f = open(filename, mode)
lim = 1000
arr = []
if type_header == 0:
arr.append(separator.join(additional_headers+['From', 'To', 'Edge Type', 'Value']))
elif type_header == 1:
arr.append(separator.join(['Node', 'Centrality', 'Value']))
for row in row_dict:
for attr in row[-1]:
if attr not in ['indeg', 'q', 'size']:
if len(row) == 3:
arr.append(separator.join(additional_attr+[str(row[0]), str(row[1]), attr, str(row[2][attr])]))
else:
arr.append(separator.join(additional_attr+[str(row[0]), attr, str(row[1][attr])]))
if len(arr) > lim:
f.write('\n'.join(arr))
arr = []
f.write('\n'.join(arr))
| StarcoderdataPython |
1688302 | # -*- coding: utf-8 -*-
from setuptools import setup, find_packages
with open('README.md') as f:
readme = f.read()
with open('LICENSE') as f:
license = f.read()
setup(
name='rsabias',
version='0.1',
description='Tool to analyse RSA key generation and classification',
long_description=readme,
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/crocs-muni/RSABias',
license=license,
packages=find_packages(),
entry_points={
'console_scripts': [
'rsabias=rsabias.cli:main'
]
},
install_requires=[
'cycler==0.10.0',
'gmpy2==2.0.8',
'kiwisolver==1.2.0',
'matplotlib==3.1.0',
'mpmath==1.1.0',
'numpy==1.22.0',
'pandas==0.24.2',
'pycryptodome==3.9.8',
'pyparsing==2.4.7',
'python-dateutil==2.8.1',
'pytz==2020.1',
'scipy==1.3.0',
'seaborn==0.9.0',
'six==1.15.0',
'sympy==1.6.1',
'xarray==0.12.1'
],
include_package_data=True
)
| StarcoderdataPython |
1794208 | #!/usr/bin/env python3
import sys
sys.path.insert( 0, '..' )
# this will later be a session multiplexer object in a module abstraction library
from Engines.POF_com import Session as POFSession
def Main():
config = POFSession.Config("config.ini")
testSession = POFSession(config)
testSession.login()
users = testSession.searchUsers(config, 5, online_only=True)
print("Total Users Found: {0}".format( len(users) ) )
testSession.broadcastMessage(users, "hey whats up")
if __name__ == '__main__':
Main()
| StarcoderdataPython |
3389270 | <reponame>finbyz/finbyz_dashboard
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe, json
from frappe import _
from frappe.utils import flt, cint, getdate, now, date_diff
# from frappe.utils.dashboard import cache_source
from finbyz_dashboard.finbyz_dashboard.dashboard_overrides.dashboard_chart import cache_source
from erpnext.stock.utils import get_stock_value_from_bin
from finbyz_dashboard.finbyz_dashboard.dashboard_overrides.data import get_timespan_date_range
@frappe.whitelist()
def get(chart_name = None, chart = None, no_cache = None, filters = None, from_date = None,
to_date = None, timespan = None, time_interval = None, heatmap_year = None):
labels, datapoints = [], []
filters = frappe.parse_json(filters)
from_date, to_date = get_timespan_date_range(filters.timespan)
bal_or_qty = 'actual_qty' if filters.get('bal_or_qty') == "Balance Qty" else 'stock_value'
# stock_ledger_entries = frappe.get_list("Stock Ledger Entry", fields=['item_code','actual_qty'], filters=extra_filters, order_by='posting_date,posting_time,creation,actual_qty')
where_con = where_con_bin = join_con = join_con_bin = ''
if filters.get('company'):
where_con += " and sle.company = '{}'".format(filters.get('company'))
join_con_bin += " JOIN `tabWarehouse` as w on w.name = bin.warehouse"
where_con_bin += " and w.company = '{}'".format(filters.get('company'))
if filters.get('item_group'):
join_con = " JOIN `tabItem` as i on i.name = sle.item_code"
where_con += " and i.item_group = '{}'".format(filters.get('item_group'))
join_con_bin = " JOIN `tabItem` as i on i.name = bin.item_code"
where_con_bin += " and i.item_group = '{}'".format(filters.get('item_group'))
stock_ledger_entries = frappe.db.sql("""
select sle.item_code, sum(abs(sle.stock_value_difference)) as stock_value_difference
from `tabStock Ledger Entry` as sle
{join_con}
where sle.actual_qty < 0 and sle.docstatus < 2 and is_cancelled = 0 and sle.posting_date between '{from_date}' AND '{to_date}'{where_con}
group by sle.item_code
order by stock_value_difference DESC
limit 10
""".format(join_con=join_con, from_date=from_date,to_date=to_date,where_con=where_con), as_dict=1)
if not stock_ledger_entries:
return []
item_code_tuple = []
for sle in stock_ledger_entries:
item_code_tuple.append(sle.item_code)
tuple_item_code = ", ".join("'{}'".format(i) for i in item_code_tuple)
balance_qty_dict = frappe.db.sql("""
select bin.item_code, sum(bin.{bal_or_qty}) as balance
from `tabBin` as bin
{join_con_bin}
where bin.item_code in ({tuple_item_code}){where_con_bin}
group by bin.item_code
""".format(bal_or_qty=bal_or_qty, join_con_bin=join_con_bin, tuple_item_code=tuple_item_code,where_con_bin=where_con_bin), as_dict=1)
datapoint2 = []
for sle in balance_qty_dict:
labels.append(_(sle.get("item_code")))
datapoints.append(sle.get("balance"))
return{
"labels": labels,
"datasets": [{
"name": filters.get('bal_or_qty'),
"values": datapoints
}],
"type": "bar"
} | StarcoderdataPython |
3276308 | <gh_stars>0
import numpy as np
def measure_frequency_response(filter_function, freqs=np.logspace(-3,0,1024), input_length=2**18):
"""
Empericaly measure frequency response of a filtering function using sine waves.
Parameters
----------
filter_function : callable
freqs : array of floats
normalized frequencies at which to measure response (0 = DC, 1.0 = Nyquist)
input_length : int
number of samples in each sinewave
Returns
-------
freqs : array of floats
the frequency array (for convenience)
mags : array of floats
the measured output power at each frequency
"""
mags = []
tt = np.arange(input_length)
for k,freq in enumerate(freqs):
x = np.sin(np.pi*tt*freq+np.random.uniform(0,2*np.pi)).astype('float32')
result = filter_function(x)
nout = result.shape[0]
result = result[nout//4:3*nout//4]
mags.append(np.sum(np.abs(result)**2))
return freqs, np.array(mags)
| StarcoderdataPython |
3328726 | <reponame>openeuler-mirror/pkgship
#!/usr/bin/python3
# ******************************************************************************
# Copyright (c) Huawei Technologies Co., Ltd. 2020-2020. All rights reserved.
# licensed under the Mulan PSL v2.
# You can use this software according to the terms and conditions of the Mulan PSL v2.
# You may obtain a copy of Mulan PSL v2 at:
# http://license.coscl.org.cn/MulanPSL2
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR
# PURPOSE.
# See the Mulan PSL v2 for more details.
# ******************************************************************************/
# -*- coding:utf-8 -*-
"""
The installdep CSV to download the unit tests
"""
from packageship.application.core.depend import DispatchDepend
from test.cli.download_csv import DownloadDeppendTestBase
class TestInstallDownload(DownloadDeppendTestBase):
binary_file = "os-version-binary.json"
component_file = "os-version-binary-component.json"
def test_install_download_bin_level_0(self):
"""
The installation depends on the download test,
The database and query hierarchy are not specified
"""
parameter = {"packagename": ["Judy"],
"depend_type": "installdep",
"parameter": {}
}
result, error = self.validate_data(parameter)
depend = DispatchDepend.execute(**result)
folder_path = depend.download_depend_files()
csv_depend = "installdep_Judy_level_0"
csv_folder_path = self.get_csv_file_path(csv_depend)
self.comparison_data(csv_folder_path, folder_path)
def test_install_download_bin_level_2(self):
"""
The installation depends on the download test,
level is two
"""
parameter = {"packagename": ["Judy"],
"depend_type": "installdep",
"parameter": {
"level": 2
}
}
result, error = self.validate_data(parameter)
depend = DispatchDepend.execute(**result)
folder_path = depend.download_depend_files()
csv_depend = "installdep_Judy_level_2"
csv_folder_path = self.get_csv_file_path(csv_depend)
self.comparison_data(csv_folder_path, folder_path)
def test_install_download_bin_db_level_0(self):
"""
The installation depends on the download test,
Specify database, do not specify hierarchy
"""
parameter = {"packagename": ["Judy"],
"depend_type": "installdep",
"parameter": {
"db_priority": ["os-version"]
}
}
result, error = self.validate_data(parameter)
depend = DispatchDepend.execute(**result)
folder_path = depend.download_depend_files()
csv_depend = "installdep_Judy_db_level_0"
csv_folder_path = self.get_csv_file_path(csv_depend)
self.comparison_data(csv_folder_path, folder_path)
def test_install_download_bin_db_level_2(self):
"""
The installation depends on the download test,
Specifies the database, level is 2
"""
parameter = {"packagename": ["Judy"],
"depend_type": "installdep",
"parameter": {
"db_priority": ["os-version"],
"level": 2
}
}
result, error = self.validate_data(parameter)
depend = DispatchDepend.execute(**result)
folder_path = depend.download_depend_files()
csv_depend = "installdep_Judy_db_level_2"
csv_folder_path = self.get_csv_file_path(csv_depend)
self.comparison_data(csv_folder_path, folder_path)
| StarcoderdataPython |
115278 | from app import app
from .conf import tasks_api
from . import tasks # initialize routes of tasks
tasks_api.init_app(app)
| StarcoderdataPython |
1692542 | from django.apps import AppConfig
class ReferenciaConfig(AppConfig):
name = 'referencia'
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.