index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
993,000 | da8e7d5ad85f8736e4aadd118314b3ec39e2090b | import pytest
from gumo.core.domain.entity_key import KeyPair
from gumo.core.domain.entity_key import NoneKey
from gumo.core.domain.entity_key import EntityKey
from gumo.core.domain.entity_key import EntityKeyFactory
from gumo.core.domain.entity_key import IncompleteKey
class TestKeyPair:
def test_invalid_name(self):
with pytest.raises(ValueError, match='kind must be an instance of str,'):
KeyPair(kind=12345, name='name')
with pytest.raises(ValueError, match='name must be an instance of str or int'):
KeyPair(kind='Kind', name={'key': 'value'})
class TestKeyPairWithStr:
key_pair = KeyPair(kind='Kind', name='name')
def test_valid_name(self):
assert isinstance(self.key_pair, KeyPair)
assert self.key_pair == KeyPair(kind='Kind', name='name')
assert self.key_pair.is_name()
assert not self.key_pair.is_id()
def test_invalid_kind_or_name(self):
with pytest.raises(ValueError, match='do not include quotes'):
KeyPair(kind='Kind', name='name"')
with pytest.raises(ValueError, match='do not include quotes'):
KeyPair(kind='Kind', name="name'with-single-quote")
with pytest.raises(ValueError, match='do not include quotes'):
KeyPair(kind='Kind"with-double-quote', name='name')
with pytest.raises(ValueError, match='do not include quotes'):
KeyPair(kind="Kind-with-'single-quote", name='name')
def test_key_pair_literal(self):
assert self.key_pair.key_pair_literal() == "'Kind', 'name'"
class TestKeyPairWithID:
id_key_pair = KeyPair(kind='Kind', name=1234567)
def test_valid_name_int(self):
assert isinstance(self.id_key_pair, KeyPair)
assert self.id_key_pair == KeyPair(kind='Kind', name=self.id_key_pair.name)
assert self.id_key_pair != KeyPair(kind='Kind', name=str(self.id_key_pair.name))
assert self.id_key_pair.is_id()
assert not self.id_key_pair.is_name()
def test_key_pair_literal(self):
assert self.id_key_pair.key_pair_literal() == "'Kind', 1234567"
def test_build_implicit_id_str_convert(self):
assert KeyPair(kind='Kind', name=12345) == KeyPair.build(kind='Kind', name='12345', implicit_id_str=True)
assert KeyPair(kind='Kind', name=12345) != KeyPair.build(kind='Kind', name='12345', implicit_id_str=False)
assert KeyPair.build(kind='Kind', name='1234567').key_pair_literal() == "'Kind', 1234567"
class TestNoneKey:
def test_eq_none(self):
assert NoneKey() == NoneKey()
def test_parent(self):
assert NoneKey().parent() == NoneKey()
assert not NoneKey().has_parent()
def test_values(self):
o = NoneKey()
assert o.kind() is None
assert o.name() is None
assert o.flat_pairs() == []
assert o.pairs() == []
assert o.key_literal() is None
assert o.key_path() is None
assert o.key_path_urlsafe() is None
class TestEntityKeyWithStringName:
factory = EntityKeyFactory()
sample_key_pairs = [
('Book', 'name'),
('BookComment', 'comment'),
]
def test_zero_length_pairs(self):
with pytest.raises(ValueError):
self.factory.build_from_pairs(pairs=[])
def test_pairs_to_key(self):
key = self.factory.build_from_pairs(pairs=self.sample_key_pairs)
assert isinstance(key, EntityKey)
assert len(key.pairs()) == 2
assert key.kind() == 'BookComment'
assert key.name() == 'comment'
assert key.has_parent()
parent = key.parent()
assert isinstance(parent, EntityKey)
assert len(parent.pairs()) == 1
assert parent.kind() == 'Book'
assert parent.name() == 'name'
assert not parent.has_parent()
none = parent.parent()
assert isinstance(none, NoneKey)
assert len(none.pairs()) == 0
assert none.kind() is None
assert none.name() is None
assert none.parent() == none
def test_dict_pairs_to_key(self):
key = self.factory.build_from_pairs(pairs=[
{'kind': 'Book', 'name': 'name'},
{'kind': 'BookComment', 'name': 'comment'},
])
assert isinstance(key, EntityKey)
assert key.flat_pairs() == ['Book', 'name', 'BookComment', 'comment']
def test_flat_pairs(self):
key = self.factory.build_from_pairs(pairs=self.sample_key_pairs)
assert key.flat_pairs() == ['Book', 'name', 'BookComment', 'comment']
def test_build(self):
key = EntityKeyFactory().build(kind='Book', name='name')
assert key.kind() == 'Book'
assert key.name() == 'name'
assert isinstance(key.parent(), NoneKey)
def test_build_for_new(self):
key = self.factory.build_for_new(kind='Book')
assert key.kind() == 'Book'
assert isinstance(key.name(), str)
assert len(key.name()) == 26
assert isinstance(key.parent(), NoneKey)
def test_entity_key_literal(self):
key = self.factory.build(kind='Book', name='name')
assert key.key_literal() == "Key('Book', 'name')"
def test_entity_key_path(self):
key = self.factory.build(kind='Book', name='name')
child = self.factory.build(kind='Comment', name='comment', parent=key)
assert key.key_path() == 'Book:name'
assert key.key_path_urlsafe() == 'Book%3Aname'
assert child.key_path() == 'Book:name/Comment:comment'
assert child.key_path_urlsafe() == 'Book%3Aname%2FComment%3Acomment'
assert self.factory.build_from_key_path(key.key_path()) == key
assert self.factory.build_from_key_path(key.key_path_urlsafe()) == key
assert self.factory.build_from_key_path(child.key_path()) == child
assert self.factory.build_from_key_path(child.key_path_urlsafe()) == child
def test_entity_key_url(self):
key = self.factory.build(kind='Book', name='name')
child = self.factory.build(kind='Comment', name='comment', parent=key)
assert key.key_url() == 'Book/name'
assert child.key_url() == 'Book/name/Comment/comment'
assert self.factory.build_from_key_url(key.key_url()) == key
assert self.factory.build_from_key_url(child.key_url()) == child
def test_entity_key_hashable(self):
key = self.factory.build(kind='Book', name='name')
d = {}
d[key] = 'Book'
assert len(d) == 1
class TestEntityKeyWithIntID:
factory = EntityKeyFactory()
sample_key_pairs = [
('Book', 1234567890),
('BookComment', 9991234567890999)
]
def test_pairs_to_key(self):
key = self.factory.build_from_pairs(pairs=self.sample_key_pairs)
assert isinstance(key, EntityKey)
assert len(key.pairs()) == 2
assert key.kind() == 'BookComment'
assert key.name() == 9991234567890999
parent = key.parent()
assert isinstance(parent, EntityKey)
assert len(parent.pairs()) == 1
assert parent.kind() == 'Book'
assert parent.name() == 1234567890
grand_parent = parent.parent()
assert isinstance(grand_parent, NoneKey)
assert grand_parent == NoneKey()
def test_entity_key_literal(self):
key = self.factory.build_from_pairs(pairs=self.sample_key_pairs)
assert key.key_literal() == "Key('Book', 1234567890, 'BookComment', 9991234567890999)"
def test_entity_key_path(self):
key = self.factory.build_from_pairs(pairs=self.sample_key_pairs)
assert key.key_path() == 'Book:1234567890/BookComment:9991234567890999'
assert key.key_path_urlsafe() == 'Book%3A1234567890%2FBookComment%3A9991234567890999'
assert self.factory.build_from_key_path(key.key_path()) == key
assert self.factory.build_from_key_path(key.key_path_urlsafe()) == key
def test_entity_key_url(self):
key = self.factory.build_from_pairs(pairs=self.sample_key_pairs)
assert key.key_url() == 'Book/1234567890/BookComment/9991234567890999'
assert self.factory.build_from_key_url(key_url=key.key_url()) == key
class TestIncompleteKey:
def test_build(self):
key = IncompleteKey('Book')
assert isinstance(key, IncompleteKey)
assert key.parent is None
assert key.key_literal() == 'IncompleteKey(Book)'
def test_hashable(self):
key = IncompleteKey('Book')
d = {key: 'Book'}
assert len(d) == 1
|
993,001 | a92627e32e367ce8104f04f87ea328e2d1e26278 | # -*- coding: utf-8 -*-
"""
Created on Wed Nov 02 18:57:40 2016
@author: marco
"""
from manual_tests.models.linear_models import MIMO2x2, StabilizationMIMO2x2
from yaocptool.methods import IndirectMethod
# model = VanDerPol()
# problem = VanDerPolStabilization(model)
model = MIMO2x2()
problem = StabilizationMIMO2x2(model)
indir_method = IndirectMethod(
problem,
degree=3,
finite_elements=20,
# integrator_type='implicit',
# initial_guess_heuristic='problem_info',
# discretization_scheme='multiple-shooting',
discretization_scheme='collocation')
solution = indir_method.solve()
solution.plot([{'x': 'all'}, {'u': 'all'}])
|
993,002 | 0ee04b3aedc5f4689aab7779af13851105ede703 | # from django.shortcuts import render
from django.views.generic import ListView
from habratest.models import Post
class Posts(ListView):
"""
Список всех доступных статей
"""
# Нижеуказанные параметры можно также передать
# данному отображению через метод as_view()
# url(r'^$', Posts.as_view(
# context_object_name='posts',
# template_name='posts.html')
# )
model = Post
# Под данным именем наш список статей будет доступен в шаблоне
context_object_name = 'posts'
# Название шаблона
template_name = 'habratest/posts.html'
# Количество объектов на 1 страницу
paginate_by = 10
def get_queryset(self):
qs = Post.objects.filter(is_delete=False).order_by('-created_at')
if not self.request.user.is_authenticated:
return qs.exclude(is_private=True)
return qs
class PostsIndex(Posts):
"""
Список статей для главной страницы
"""
template_name = 'habratest/index.html'
def get_queryset(self):
return super(PostsIndex, self).get_queryset().exclude(rating__lt=2)
|
993,003 | eb87513c1159dd5638efcdc08f9a5d6659911c33 | # Generated by Django 3.0.4 on 2020-08-12 16:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('clients', '0003_clients_email'),
]
operations = [
migrations.AlterField(
model_name='clients',
name='email',
field=models.CharField(blank=True, max_length=95, null=True, verbose_name='correo electronico'),
),
]
|
993,004 | 270bc56ac4ffa2eeea3ddb0edc65850c55c901fb | import os.path
import urllib, os
from ConfigParser import ConfigParser
from XPLMPlugin import *
from XPLMProcessing import *
from XPLMDataAccess import *
from XPLMDefs import *
from XPLMMenus import *
from XPLMDisplay import *
from XPLMGraphics import *
from XPLMPlanes import *
from XPWidgetDefs import *
from XPWidgets import *
from XPStandardWidgets import *
try:
from pygame import midi
WITH_PYGAME = False #TODO set to true once we have a UI for selecting the device
except:
WITH_PYGAME = False
SCRIPTS_PATH = os.path.join(os.getcwd(),"Resources","plugins","PythonScripts")
SLEEP_TIME = 0.1
# XPlane Dataref Datatypes
INT_TYPE = 1
FLOAT_TYPE = 2
DOUBLE_TYPE = 4
FLOATARRAY_TYPE = 8
INTARRAY_TYPE = 16
# the default midi in devices if none were specified
DEFAULT_DEVICES = [3]
# midi states
NOTE_ON = 144
NOTE_OFF = 128
CC = 176
PROG_CHANGE = 192
# init pyGame's midi module
if WITH_PYGAME:
try:
midi.init()
except:
WITH_PYGAME = False
# array of active midi inputs
global midiIns
midiIns = []
def get_all_signals(verbose = False):
global midiIns
signals = []
for midiIn in midiIns:
signals.extend(get_signals(midiIn,verbose))
return signals
def get_signals(midiIn,verbose = False):
signals = []
if midiIn.poll():
# print out midi signals
buf = midiIn.read(100)
t = buf[-1][1] - (SLEEP_TIME_MS)
for signal in buf:
if signal[1] >= t: # only return latest signals
if signal[0][0] == NOTE_ON:
signals.append(("NOTE_ON",signal[0][1],signal[0][2]))
elif signal[0][0] == NOTE_OFF:
signals.append(("NOTE_OFF",signal[0][1],signal[0][2]))
elif signal[0][0] == CC:
signals.append(("CC",signal[0][1],signal[0][2]))
elif signal[0][0] == PROG_CHANGE:
signals.append(("PROG_CHANGE",signal[0][1],signal[0][2]))
if len(signals) and verbose:
print("%s n:%d value:%d" % (signals[-1][0],signals[-1][1],signals[-1][2]))
return signals
def device_exists(device):
if midi.get_count()-1>=device:
return True
else:
return False
def init_devices(devices):
global midiIns
midiIns = []
for device in devices:
midiIns.append(midi.Input(device))
def uninit_devices():
global midiIns
for midiIn in midiIns:
midiIn.close()
def translate(value, leftMin, leftMax, rightMin, rightMax):
# Figure out how 'wide' each range is
leftSpan = leftMax - leftMin
rightSpan = rightMax - rightMin
# Convert the left range into a 0-1 range (float)
valueScaled = float(value - leftMin) / float(leftSpan)
# Convert the 0-1 range into a value in the right range.
return rightMin + (valueScaled * rightSpan)
def within(v,vFrom,vTo):
vMin = min(vFrom,vTo)
vMax = max(vFrom,vTo)
return v>=vMin and v<=vMax
def getDataref(dataref,index = 0):
dref = XPLMFindDataRef(dataref)
if type(dref).__name__=='int':
drefType = XPLMGetDataRefTypes(dref)
if drefType and drefType!=0:
if XPLMCanWriteDataRef(dref) and drefType in (INT_TYPE,FLOAT_TYPE,DOUBLE_TYPE,INTARRAY_TYPE,FLOATARRAY_TYPE):
if drefType == INT_TYPE:
return XPLMGetDatai(dref)
elif drefType == FLOAT_TYPE:
return XPLMGetDataf(dref)
elif drefType == DOUBLE_TYPE:
return XPLMGetDatad(dref)
elif drefType == INTARRAY_TYPE:
va = []
XPLMGetDatavi(dref,va,index,1)
return va[0]
elif drefType == FLOATARRAY_TYPE:
va = []
XPLMGetDatavf(dref,va,index,1)
return va[0]
return None
def setDataref(dataref,value,index = 0,length = 1):
dref = XPLMFindDataRef(dataref)
if type(dref).__name__=='int':
drefType = XPLMGetDataRefTypes(dref)
if drefType and drefType!=0:
if XPLMCanWriteDataRef(dref) and drefType in (INT_TYPE,FLOAT_TYPE,DOUBLE_TYPE,INTARRAY_TYPE,FLOATARRAY_TYPE):
if drefType == INT_TYPE:
return XPLMGetDatai(dref)
elif drefType == FLOAT_TYPE:
return XPLMGetDataf(dref)
elif drefType == DOUBLE_TYPE:
return XPLMGetDatad(dref)
elif drefType == INTARRAY_TYPE:
va = []
for i in range(0,length): va.append(int(value))
return XPLMSetDatavi(dref,va,index,length)
elif drefType == FLOATARRAY_TYPE:
va = []
for i in range(0,length): va.append(float(value))
return XPLMSetDatavf(dref,va,index,length)
return None
class PythonInterface:
def XPluginStart(self):
self.Name = "MidiControl"
self.Sig = "OndrejBrinkel.Python.MidiControl"
self.Desc = "Control Datarefs with midi device"
# create menu
self.SubMenuItem = XPLMAppendMenuItem(XPLMFindPluginsMenu(), "Midi Control", 0, 1)
self.MenuHandlerCB = self.MenuHandlerCallback
self.Menu = XPLMCreateMenu(self, "Midi Control", XPLMFindPluginsMenu(), self.SubMenuItem, self.MenuHandlerCB, 0)
XPLMAppendMenuItem(self.Menu, "Reload configuration", 0, 1)
XPLMAppendMenuItem(self.Menu, "Toggle midi input monitoring", 1, 1)
# midi input window
self.MidiInWidget = None
#self.MidiInWidgetCB = self.MidiInWidget
# midi devices Window
self.devices = []
return self.Name, self.Sig, self.Desc
def Uninit(self):
if self.MidiInWidget:
XPDestroyWidget(self,self.MidiInWidget,1)
self.MidiInWidget = None
if self.Menu:
XPLMDestroyMenu(self,self.Menu)
self.Menu = None
def XPluginStop(self):
self.Uninit()
pass
def XPluginEnable(self):
self.midiInBuffer = ''
if WITH_PYGAME:
if len(self.devices)==0:
init_devices(DEFAULT_DEVICES)
else:
init_devices(self.devices)
self.FlightLoopCB = self.Update
XPLMRegisterFlightLoopCallback(self,self.FlightLoopCB,1.0,0)
self.ReloadInis()
return 1
def MenuHandlerCallback(self, inMenuRef, inItemRef):
if inItemRef==0:
self.ReloadInis()
elif inItemRef==1:
if self.MidiInWidget:
if(XPIsWidgetVisible(self.MidiInWidget)):
XPHideWidget(self.MidiInWidget)
else:
XPShowWidget(self.MidiInWidget)
pass
else:
self.CreateMidiInWidget(50, 600, 200, 100)
if(not XPIsWidgetVisible(self.MidiInWidget)):
XPShowWidget(self.MidiInWidget)
pass
def CreateMidiInWidget(self,x,y,w,h):
x2 = x+w
y2 = y-h
# Create the Main Widget window
self.MidiInWidget = XPCreateWidget(x, y, x2, y2, 1, 'Midi Control - last input:', 1, 0, xpWidgetClass_MainWindow)
# Add Close Box decorations to the Main Widget
XPSetWidgetProperty(self.MidiInWidget, xpProperty_MainWindowHasCloseBoxes, 1)
# Create the Sub Widget1 window
# MidiInWindow = XPCreateWidget(x+10, y-20, x2-10, y2+10,
# 1, # Visible
# "", # desc
# 0, # root
# self.MidiInWidget,
# xpWidgetClass_SubWindow)
# Set the style to sub window
# XPSetWidgetProperty(MidiInWindow, xpProperty_SubWindowType, xpSubWindowStyle_SubWindow)
# Assignments text
self.MidiInWidgetCaption = XPCreateWidget(x+20, y-30, x2-20, y2+20,1, self.midiInBuffer, 0, self.MidiInWidget,xpWidgetClass_Caption)
# Register our widget handler
self.MidiInHandlerCB = self.MidiInHandler
XPAddWidgetCallback(self, self.MidiInWidget, self.MidiInHandlerCB)
def MidiInHandler(self, inMessage, inWidget, inParam1, inParam2):
if (inMessage == xpMessage_CloseButtonPushed):
XPHideWidget(self.MidiInWidget)
return 1
return 0
def ReloadInis(self):
self.LoadPresets(os.path.join(SCRIPTS_PATH,'midi_control_presets.ini'))
self.LoadMidiBindings(os.path.join(SCRIPTS_PATH,'midi_control.ini'))
plane_path = XPLMGetNthAircraftModel(0)
plane_path = os.path.dirname(plane_path[1])
presets_file = os.path.join(plane_path,'midi_control_presets.ini')
bindings_file = os.path.join(plane_path,'midi_control.ini')
if os.path.exists(presets_file) and os.path.isfile(presets_file):
self.LoadPresets(presets_file,True)
if os.path.exists(bindings_file) and os.path.isfile(bindings_file):
self.LoadMidiBindings(bindings_file,True)
def XPluginDisable(self):
if WITH_PYGAME: uninit_devices()
XPLMUnregisterFlightLoopCallback(self,self.FlightLoopCB,0)
pass
def XPluginReceiveMessage(self, inFromWho, inMessage, inParam):
if inMessage == XPLM_MSG_PLANE_LOADED and inParam == 0: # user plane loaded, so load midi bindings of the plane on top of base bindings
self.ReloadInis()
def UpdateDatarefs(self,signals):
for signal in signals:
signalType = signal[0]
n = str(signal[1])
if signalType in self.bindings and n in self.bindings[signalType]:
for binding in self.bindings[signalType][n]:
self.UpdateDataref(signal,binding)
def UpdateDataref(self,signal,binding):
# check if the signal type and value is in bindings
signalType = signal[0]
n = str(signal[1])
if len(signal)>2:
v = signal[2]
else:
if signalType == 'NOTE_ON':
v = 127
elif signalType == 'NOTE_OFF':
v = 0
else:
v = 0
# ignore values out of midi range
if within(v,binding['midi_range'][0],binding['midi_range'][1]):
dref = binding['dataref']
drefType = binding['dataref_type']
drefValue_orig = self.GetDataref(dref,drefType,binding['dataref_index'][0],binding['dataref_index'][1])
if type(drefValue_orig).__name__ == 'list':
for i in range(0,len(drefValue_orig)):
drefValue = self.GetUpdatedValue(v,dref,drefType,binding,drefValue_orig[i])
self.SetDataref(dref,drefType,drefValue,binding['dataref_index'][0]+i,1)
self.RunExecute(binding['post_execute'],drefValue_orig[i],drefValue)
else:
drefValue = self.GetUpdatedValue(v,dref,drefType,binding,drefValue_orig)
self.SetDataref(dref,drefType,drefValue,binding['dataref_index'][0],binding['dataref_index'][1])
self.RunExecute(binding['post_execute'],drefValue_orig,drefValue)
def GetUpdatedValue(self,v,dref,drefType,binding,drefValue_orig):
# determine value by linear mapping
value = translate(v,float(binding['midi_range'][0]),float(binding['midi_range'][1]),binding['data_range'][0],binding['data_range'][1])
if binding['steps']:
step_size = float(binding['data_range'][1] - binding['data_range'][0])/float(binding['steps'])
step = round((value-binding['data_range'][0])/step_size)
value = float(binding['data_range'][0])+(step*step_size)
if binding['toggle']:
if drefValue_orig == binding['data_range'][0]:
value = binding['data_range'][1]
else:
value = binding['data_range'][0]
elif binding['relative']:
value = self.RunAction(binding['pre_action'],drefValue_orig,value)
self.RunExecute(binding['pre_execute'],drefValue_orig,value)
value = drefValue_orig + value
else:
# get data range min and max
data_min = min(binding['data_range'][0],binding['data_range'][1])
data_max = max(binding['data_range'][0],binding['data_range'][1])
# clip value to range
if value<data_min:
value = data_min
elif value>data_max:
value = data_max
if binding['additive']:
value_add = value - binding['last_value']
value = self.RunAction(binding['pre_action'],drefValue_orig,value)
self.RunExecute(binding['pre_execute'],drefValue_orig,value)
binding['last_value'] = value
value = drefValue_orig + value_add
else:
value = self.RunAction(binding['pre_action'],drefValue_orig,value)
self.RunExecute(binding['pre_execute'],drefValue_orig,value)
# run post action
drefValue = self.RunAction(binding['post_action'],drefValue_orig,value)
# clip to min and max
if binding['data_min'] and drefValue<binding['data_min']: drefValue = binding['data_min']
if binding['data_max'] and drefValue>binding['data_max']: drefValue = binding['data_max']
# finally return the new value
return drefValue
def RunExecute(self,execute,data,value):
if execute:
exec execute
def RunAction(self,action,data,value):
if action:
return eval(action)
else: return value
def GetDataref(self,dref,drefType,index = 0, length = 1):
if drefType == INT_TYPE:
return XPLMGetDatai(dref)
elif drefType == FLOAT_TYPE:
return XPLMGetDataf(dref)
elif drefType == DOUBLE_TYPE:
return XPLMGetDatad(dref)
elif drefType == INTARRAY_TYPE:
va = []
XPLMGetDatavi(dref,va,index,length)
return va
elif drefType == FLOATARRAY_TYPE:
va = []
XPLMGetDatavf(dref,va,index,length)
return va
def SetDataref(self,dref,drefType,value,index = 0, length = 1):
if drefType == INT_TYPE:
return XPLMSetDatai(dref,int(value))
elif drefType == FLOAT_TYPE:
return XPLMSetDataf(dref,float(value))
elif drefType == DOUBLE_TYPE:
return XPLMSetDatad(dref,float(value))
elif drefType == INTARRAY_TYPE:
va = []
for i in range(0,length): va.append(int(value))
return XPLMSetDatavi(dref,va,index,length)
elif drefType == FLOATARRAY_TYPE:
va = []
for i in range(0,length): va.append(float(value))
return XPLMSetDatavf(dref,va,index,length)
def Update(self,inFlightLoopCallback, inInterval,inRelativeToNow, inRefcon):
if WITH_PYGAME:
signals = get_all_signals()
else:
signals = []
try:
sock = urllib.urlopen('http://localhost:8000')
json = sock.read()
sock.close()
signals = eval(json)
except:
self.midiInBuffer="Midi control server not reachable."
if signals and len(signals)>0:
# update midi-in buffer
self.midiInBuffer = ''
for signal in signals:
self.midiInBuffer+=str(signal)+'\n'
self.UpdateDatarefs(signals)
# update widget if any
if self.MidiInWidget:
XPSetWidgetDescriptor(self.MidiInWidgetCaption, self.midiInBuffer)
return SLEEP_TIME
def ApplyPreset(self,options):
if 'preset' in options and options['preset'] in self.presets:
for option in self.presets[options['preset']]:
if option not in options:
options[option] = self.presets[options['preset']][option]
return options
def LoadPresets(self,iniFile, overwrite = False):
if overwrite == False:
self.presets = {}
cp = ConfigParser()
cp.read(iniFile)
for section in cp.sections():
self.presets[section] = {}
for option in cp.options(section):
self.presets[section][option] = cp.get(section,option)
def LoadMidiBindings(self,iniFile,overwrite = False):
self.ini = {}
if overwrite == False:
self.bindings = {'CC':{},'NOTE_ON':{},'NOTE_OFF':{}}
cp = ConfigParser()
cp.read(iniFile)
for section in cp.sections():
# remove optional prefix from section
parts = section.split('|',1)
if len(parts)>1:
dataref = parts[1]
else: dataref = section
if dataref not in self.ini:
self.ini[dataref] = []
options = {}
for option in cp.options(section):
options[option] = cp.get(section,option)
self.ini[dataref].append(options)
for dataref in self.ini:
for options in self.ini[dataref]:
options = self.ApplyPreset(options)
if 'type' in options and 'n' in options:
parts = dataref.split(' ',1)
if len(parts)>1:
dataref = parts[0]
dataref_index = parts[1].split('/',1)
dataref_index[0] = int(dataref_index[0])
if len(dataref_index)<2:
dataref_index.append(1)
else: dataref_index[1] = int(dataref_index[1])
else: dataref_index = [None,None]
dref = XPLMFindDataRef(dataref)
if type(dref).__name__=='int':
drefType = XPLMGetDataRefTypes(dref)
if drefType and drefType!=0:
if XPLMCanWriteDataRef(dref) and drefType in (INT_TYPE,FLOAT_TYPE,DOUBLE_TYPE,INTARRAY_TYPE,FLOATARRAY_TYPE):
binding = {'dataref':dref,'dataref_type':drefType,'dataref_index':dataref_index}
# set ranges
if 'midi_range' in options:
binding['midi_range'] = eval(options['midi_range'])
else:
binding['midi_range'] = [0,127]
if 'data_range' in options:
binding['data_range'] = eval(options['data_range'])
else: # TODO: automaticly find out the type of dataref and min, max values
binding['data_range'] = [0.0,1.0]
# is this an absolute or relative control? Defaults to absolute
if 'relative' in options:
binding['relative'] = options['relative']
else: binding['relative'] = False
# will changes of this control apply in an additive manner? Defaults to no
if 'additive' in options:
binding['last_value'] = 0.0
binding['additive'] = options['additive']
else: binding['additive'] = False
# toggle action?
if 'toggle' in options:
binding['toggle'] = options['toggle']
else: binding['toggle'] = False
# value snapping
if 'steps' in options:
binding['steps'] = options['steps']
else: binding['steps'] = None
# data min, max
if 'data_min' in options:
binding['data_min'] = float(options['data_min'])
else: binding['data_min'] = None
if 'data_max' in options:
binding['data_max'] = float(options['data_max'])
else: binding['data_max'] = None
if 'pre_action' in options:
binding['pre_action'] = options['pre_action']
else: binding['pre_action'] = None
if 'post_action' in options:
binding['post_action'] = options['post_action']
else: binding['post_action'] = None
if 'pre_execute' in options:
binding['pre_execute'] = options['pre_execute']
else: binding['pre_execute'] = None
if 'post_execute' in options:
binding['post_execute'] = options['post_execute']
else: binding['post_execute'] = None
# add binding to the list, create one if not already present
if options['n'] not in self.bindings[options['type']]:
self.bindings[options['type']][options['n']] = []
self.bindings[options['type']][options['n']].append(binding) |
993,005 | e51d600a0243a1d1bc9ffdf61c08a996f0056b55 | import cv2
import zbar
from PIL import Image
import numpy as np
import socket
import urllib
bytes = bytes()
def get_frame(stream):
global bytes
bytes += stream.read(1024)
a = bytes.find(b'\xff\xd8')
b = bytes.find(b'\xff\xd9')
if a != -1 and b != -1:
jpg = bytes[a:b+2]
bytes = bytes[b+2:]
img = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8), cv2.IMREAD_COLOR)
return img
return None
scanner = zbar.ImageScanner()
scanner.parse_config('enable')
hc_methods = {
'face': cv2.CascadeClassifier('/usr/local/share/OpenCV/haarcascades/haarcascade_frontalface_default.xml'),
'upperbody': cv2.CascadeClassifier('/usr/local/share/OpenCV/haarcascades/haarcascade_upperbody.xml'),
'fullbody': cv2.CascadeClassifier('/usr/local/share/OpenCV/haarcascades/haarcascade_fullbody.xml')
}
def process_video(stream, tb_ip='192.168.4.1', tb_port=8787, detect=True, feature='qrcode'):
message = None
rect_center = None
detection = False
cmd = {}
img = get_frame(stream)
while img == None:
img = get_frame(stream)
if not detect:
ret_enc, jpeg = cv2.imencode('.jpg', img)
return cmd, jpeg.tobytes()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
center = (img.shape[1]/2, img.shape[0]/2)
cv2.circle(img, center, 3, (0, 255, 0), 1)
if feature != "qrcode":
hc = hc_methods[feature]
detections = hc.detectMultiScale(gray, 1.3, 5)
for (x, y, w, h) in detections:
detection = True
cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)
rect_center = (x + w/2, y + h/2)
cv2.circle(img, rect_center, 3, (255, 0, 0), 1)
cv2.line(img, center, rect_center, (0, 255, 0))
break
else:
pil = Image.fromarray(gray)
width, height = pil.size
raw = pil.tobytes()
image = zbar.Image(width, height, 'Y800', raw)
scanner.scan(image)
for symbol in image:
detection = True
try:
cv2.rectangle(img, symbol.location[0], symbol.location[2], (255, 0, 0), 2)
except:
# cv2.Rectangle tuple error, skipping...
detection = False
break
x = symbol.location[0][0]
y = symbol.location[0][1]
w = symbol.location[2][0] - symbol.location[0][0]
h = symbol.location[2][1] - symbol.location[0][1]
rect_center = (x + w/2, y + h/2)
cv2.circle(img, rect_center, 3, (255, 0, 0), 1)
cv2.line(img, center, rect_center, (0, 255, 0))
break
if detection:
x_distance = center[0] - rect_center[0]
# y_distance = center[1] - rect_center[1]
# message = str(x_distance) + ',' + str(y_distance)
cmd['delta_x'] = x_distance
# cmd['delta_y'] = y_distance
ret_enc, jpeg = cv2.imencode('.jpg', img)
return cmd, jpeg.tobytes()
|
993,006 | be6c48b8b15d0bcac33aabbfa809355a77e59163 | lista = []
maiores = []
soma = 0
media = 0.0
for i in range(10):
num = int(input('Digite um número: '))
lista.append(num)
soma = soma + num
num_elementos = len(lista)
media = soma / num_elementos
for i in range(10):
if lista[i] > media:
maiores.append(lista[i])
print('A média dos números é: ', media)
print('Os números', maiores, 'são maiores que a média') |
993,007 | 87ca91fb425906a2ee533072dfee26f584a0dbd8 | # 럭키 스트레이트
def luckyStraight():
s = input()
mid = len(s) // 2
left = list(s[:mid])
right = list(s[mid:])
leftSum, rightSum = 0, 0
for i in range(len(left)):
leftSum += int(left[i])
rightSum += int(right[i])
if leftSum == rightSum:
print("LUCKY")
else:
print("READY")
# luckyStraight()
# ---
# 문자열 재정렬
def resorted():
s = input()
nums = ["1", "2", "3", "4", "5", "6", "7", "8", "9", "0"]
result = 0
result2 = ""
target = list(s)
target.sort()
for item in target:
if item in nums:
result += int(item)
else:
result2 += item
print(result2 + str(result))
# resorted()
# ----
# 문자열 압축
def solution(s):
if len(s) == 1:
return 1
target = []
result = []
for i in range(1, (len(s) // 2) + 1):
sList = [s[j : j + i] for j in range(0, len(s), i)]
target.append(sList)
for item in target:
count = 1
temp = ""
for i in range(0, len(item)):
if i != len(item) - 1 and item[i] == item[i + 1]:
count += 1
else:
if count > 1:
temp += str(count) + item[i]
else:
temp += item[i]
count = 1
result.append(temp)
result.sort(key=len)
return len(result[0])
# ------
# 자물쇠와 열쇠
import copy
# M*M 사이즈 key를 회전해서 만든 4가지 key 배열을 반환
def rotate(key):
ret = []
m = len(key)
for i in range(4):
temp = [[0 for i in range(m)] for j in range(m)] # 0으로 초기화
for i in range(m):
for j in range(m):
temp[m - j - 1][i] = key[i][j]
key = temp
ret.append(temp)
return ret
# 열리는지 확인하는 함수
# (zero padding map, key list, start index1, start index2, key size, lock size)
def isUnLock(lock_map, keys, i, j, key_size, lock_size):
# zero padding map에서 lock부분의 시작과 끝 인덱스
lock_start = key_size - 1
lock_end = lock_size + key_size - 2
# 4가지 key에 대해서 시작 인덱스에서 key와 lock 값을 더한 map만듬
for key in keys:
temp = copy.deepcopy(lock_map)
for x in range(i, i + key_size):
for y in range(j, j + key_size):
if (
x >= lock_start
and x <= lock_end
and y >= lock_start
and y <= lock_end
):
temp[x][y] += key[x - i][y - j]
# lock 영역이 모두 1인지 체크
flag = True
for x in range(lock_start, lock_end + 1):
for y in range(lock_start, lock_end + 1):
if temp[x][y] != 1:
flag = False
break
if not flag:
break
if flag:
return True
return False
def solution(key, lock):
answer = False
m = len(key)
n = len(lock)
# zero padding
lock_map = [[0 for i in range(n + m * 2 - 2)] for j in range(n + m * 2 - 2)]
for i in range(m - 1, n + m - 1):
for j in range(m - 1, n + m - 1):
lock_map[i][j] = lock[i - m + 1][j - m + 1]
# 모든 영역돌면서 체크
keyList = rotate(key)
for i in range(n + m - 1):
for j in range(n + m - 1):
if isUnLock(lock_map, keyList, i, j, m, n):
return True
return answer
# 자물쇠와 열쇠 book solution
def rotate(key):
n = len(key)
m = len(key[0])
result = [[0] * n for _ in range(m)]
for i in range(n):
for j in range(m):
result[j][n - i - 1] = key[i][j]
return result
def check(new_lock):
lock_length = len(new_lock) // 3
for i in range(lock_length, lock_length * 2):
for j in range(lock_length, lock_length * 2):
if new_lock[i][j] != 1:
return False
return True
def solution(key, lock):
n = len(lock)
m = len(key)
# 자물쇠를 기존의 크기의 3배로 설정
new_lock = [[0] * (n * 3) for _ in range(n * 3)]
# 새로운 자물쇠의 중앙 부분에 기존의 자물쇠 넣기
for i in range(n):
for j in range(n):
new_lock[i + n][j + n] = lock[i][j]
# 4가지 방향에 대해서 확인
for rotation in range(4):
key = rotate(key) # rotate key
for x in range(n * 2):
for y in range(n * 2):
for i in range(m):
for j in range(m):
new_lock[x + i][y + j] += key[i][j]
# 새로운 자물쇠에 열쇠가 정확히 들어맞는지 검사
if check(new_lock) == True:
return True
for i in range(m):
for j in range(m):
new_lock[x + i][y + j] -= key[i][j]
return False
# 기둥과 보
# 초기 답 - 오답
def solution(n, build_frame):
answer = []
map = [[0] * n for _ in range(len(build_frame))]
for item in build_frame:
site = item[0:2]
types = item[2]
status = item[3]
if status == 1: # 설치
if types == 0: # 기둥 설치
if item[1] < 1:
answer.append(item[0:3])
elif [item[0] - 1, item[1], 1] in answer or [
item[0],
item[1] - 1,
0,
] in answer:
answer.append(item[0:3])
else: # 보 설치
if item[1] != 0: # 보가 바닥에
answer.append(item[0:3])
# 삭제
else:
# 기둥삭제
if types == 0:
if (
item[1] == 0
or [item[0], item[1], 1] in answer
and [item[0] - 1, item[1] + 1, 1] in answer
):
answer.remove(item[0:3])
# 보삭제
else:
if [item[0] + 1, item[1] - 1, 0] in answer:
answer.remove(item[0:3])
answer.sort()
return answer
def chicken():
from itertools import combinations
N,M = map(int, input().split())
arr = [list(map(int, input().split())) for _ in range(N)]
home = []
chickenHome = []
distance = 0
distanceZip = []
for i in range(N):
for j in range(N):
if arr[i][j] == 1:
home.append([i,j])
elif arr[i][j] == 2:
chickenHome.append([i,j])
result = chickenHome
if M != 1:
result = list(combinations(chickenHome, M))
for resultItem in result:
distance = 0
for homeItem in home:
temp = []
for resultItem2 in resultItem:
temp.append(abs(resultItem2[0]-homeItem[0]) + abs(resultItem2[1]-homeItem[1]))
distance += min(temp)
distanceZip.append(distance)
else:
for resultItem in result:
distance = 0
for homeItem in home:
distance += abs(resultItem[0] - homeItem[0]) + abs(resultItem[1] - homeItem[1])
distanceZip.append(distance)
print(min(distanceZip))
# chicken()
def wallInspection(n, weak, dist):
from itertools import permutations
length = len(weak)
# 길이를 두배로 늘려서 일자 형태로 변경
for i in range(length):
weak.append(weak[i]+n)
answer = len(dist) + 1 # 투입할 친구 수의 최솟값을 찾아야 하므로 len(dist)+1로 초기화
# 0부터 length-1 까지의 위치를 각각의 시작점으로 설정
for start in range(length):
# 친구를 나열하는 모든 경우의 수 각각에 대하여 확인
for friends in list(permutations(dist, len(dist))):
count = 1 # 투입할 친구의 수
position = weak[start]+friends[count-1] # 해당 친구가 점검할 수 있는 마지막 위치
for index in range(start, start+length): # 시작점부터 모든 취약지점을 확인
if position<weak[index]: # 점검할 수 있는 위치를 벗어나는 경우
count+=1 # 새로운 친구 투입
if count>len(dist): # 더 ㅌ투입이 불가능 하다면 종료
break
position = weak[index]+friends[count-1]
answer = min(answer, count) # 최솟값 계산
# 다 투입해도 취약 지점을 전부 보수할 수 없는 경우.
if answer > len(dist):
return -1
return answer |
993,008 | c6d5c57bd57bf34d564de12803a0679509dec81e | from flask import Flask, render_template, request, make_response
from flask_sqlalchemy import SQLAlchemy
from config import app_config
#app = Flask(__name__)
#app.config.from_object(app_config[env_name])
#JWT_SECRET_KEY = hhgaghhgsdhdhdd
def create_app(env_name):
"""
Create app
"""
# app initiliazation
app = Flask(__name__)
app.config.from_object(app_config[env_name])
return app
|
993,009 | 15b809ad99fd0adc20227ebea1abcf53575b0d92 | def find_it(seq):
dic = {}
for x in seq:
if dic.__contains__(x):
dic[x] += 1
else:
dic[x] = 1
for key, value in dic.items():
if value % 2 != 0:
return key
|
993,010 | 231d384ab3680eadddd62f5896c87b13a34b5cf8 | # OAuth app keys
DROPBOX_KEY = 'changeme'
DROPBOX_SECRET = 'changeme'
DROPBOX_AUTH_CSRF_TOKEN = 'dropbox-auth-csrf-token'
|
993,011 | 3f7a8bb7030829545b382a17f5d393510703c32c | print("Maior número da Tupla")
def maiorTupla (tupla):
maior = max(tupla)
return maior
tupla = (1, 2, 3, 4, 5, 6, 7, 80, 9, 10)
print(maiorTupla(tupla)) |
993,012 | 0ca991e780b1cc8686fe586af81e948a9cffe229 | # SheetManager Exceptions
class GoogleConnectionException(Exception):
pass
class InitSheetManagerException(Exception):
pass
class AuthSheetManagerException(Exception):
pass
class CreateSheetException(Exception):
pass
class LoadSheetException(Exception):
pass
# Holdings Exceptions
class HoldingsQueryException(Exception):
pass
class BadBibstemException(Exception):
pass
#Tasks Exceptions
class DBCommitException(Exception):
"""Non-recoverable Error with making database commits."""
pass
class DBReadException(Exception):
"""Non-recoverable Error with making database selection."""
pass
class InvalidTableException(Exception):
pass
class TableCheckinException(Exception):
pass
class TableCheckoutException(Exception):
pass
#Utils Exceptions
class ReadBibstemException(Exception):
pass
class ReadCanonicalException(Exception):
pass
class ReadEncodingException(Exception):
pass
class RequestsException(Exception):
pass
class ReadRefsourcesException(Exception):
pass
|
993,013 | 5c7766b0fd1f77cb0ab89c54e08e283f895660d8 | #!/usr/bin/python2.7
def transform(x):
return int(str(x)[::-1]) + x
def is_palindrome(x):
x_str = str(x)
return x_str == x_str[::-1]
def is_lychrel(x):
pal = transform(x)
for i in xrange(0, 50):
if (is_palindrome(pal)):
return False
else:
pal = transform(pal)
return True
count = 0
for i in xrange(1, 10000):
if (is_lychrel(i)):
count += 1
print(count)
|
993,014 | 30c363919055bded5bff501d259751b35652e81b | from django.db import models
# Data model for Authors
class Author(models.Model):
first_name = models.TextField()
last_name = models.TextField()
def __str__(self):
return self.last_name + ", " + self.first_name
# Data model for Books
class Book(models.Model):
name = models.TextField()
isbn = models.TextField()
author = models.ForeignKey(Author, on_delete=models.CASCADE)
def __str__(self):
return self.author.last_name + ". '" + self.name + "' ISBN " + self.isbn
|
993,015 | 61ea95c81e8aeb1c275d1564a5b6b08ee518f1dd | #!/usr/bin/env python
# coding=utf-8
# Copyright 2021-Present The THUAlign Authors
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import collections
def parse_args():
parser = argparse.ArgumentParser(description="Create vocabulary")
parser.add_argument("corpus", help="input corpus")
parser.add_argument("output", default="vocab.txt",
help="Output vocabulary name")
parser.add_argument("--limit", default=0, type=int, help="Vocabulary size")
parser.add_argument("--control", type=str, default="<pad>,<eos>,<unk>",
help="Add control symbols to vocabulary. "
"Control symbols are separated by comma.")
return parser.parse_args()
def count_words(filename):
counter = collections.Counter()
with open(filename, "rb") as fd:
for line in fd:
words = line.strip().split()
counter.update(words)
count_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0]))
words, counts = list(zip(*count_pairs))
return words, counts
def control_symbols(string):
if not string:
return []
else:
symbs = string.strip().split(",")
return [sym.encode("ascii") for sym in symbs]
def save_vocab(name, vocab):
if name.split(".")[-1] != "txt":
name = name + ".txt"
pairs = sorted(vocab.items(), key=lambda x: (x[1], x[0]))
words, _ = list(zip(*pairs))
with open(name, "wb") as f:
for word in words:
f.write(word)
f.write("\n".encode("ascii"))
def main(args):
vocab = {}
limit = args.limit
count = 0
words, counts = count_words(args.corpus)
ctl_symbols = control_symbols(args.control)
for sym in ctl_symbols:
vocab[sym] = len(vocab)
for word, freq in zip(words, counts):
if limit and len(vocab) >= limit:
break
if word in vocab:
print("Warning: found duplicate token %s, ignored" % word)
continue
vocab[word] = len(vocab)
count += freq
save_vocab(args.output, vocab)
print("Total words: %d" % sum(counts))
print("Unique words: %d" % len(words))
print("Vocabulary coverage: %4.2f%%" % (100.0 * count / sum(counts)))
if __name__ == "__main__":
main(parse_args())
|
993,016 | 75c2d37bd9456de71b6933c6ba67b9eb3f189446 | import pandas as pd
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
# This removes ASCII characters, and writes them to a new, edited file.
with open('/Users/jif/Repos/Automation/Files/sample.txt') as fp:
edit_file = open('/Users/jif/Repos/Automation/Files/sample_v2.txt', "w")
for line in fp:
edit_file.write(line)
edit_file.close()
|
993,017 | 8c31379b0fa8a98502068cd17209bb51459d2388 | from hash40 import Hash40
class NameHash40:
def __init__(self, name, hash40):
self.name = name
self.hash40 = hash40
HashList = []
ArticleList = []
namesFile = open('scriptNames.txt', 'r')
#Game
HashList.append(NameHash40("game_".strip(), Hash40.CreateFromString("game_".lower().strip())))
HashList.append(NameHash40("sound_".strip(), Hash40.CreateFromString("sound_".lower().strip())))
HashList.append(NameHash40("effect_".strip(), Hash40.CreateFromString("effect_".lower().strip())))
HashList.append(NameHash40("expression_".strip(), Hash40.CreateFromString("expression_".lower().strip())))
for s in namesFile:
if(s != "\n"):
sc = s
s = "game_" + s
HashList.append(NameHash40(s.strip(), Hash40.CreateFromString(s.lower().strip())))
if 'Special' in s or 'Final' in s:
HashList.append(NameHash40(s.replace('Special','SpecialAir').replace('Final','FinalAir').strip(), Hash40.CreateFromString(s.replace('Special','SpecialAir').replace('Final','FinalAir').lower().strip())))
s = "sound_" + sc
HashList.append(NameHash40(s.strip(), Hash40.CreateFromString(s.lower().strip())))
if 'Special' in s or 'Final' in s:
HashList.append(NameHash40(s.replace('Special','SpecialAir').replace('Final','FinalAir').strip(), Hash40.CreateFromString(s.replace('Special','SpecialAir').replace('Final','FinalAir').lower().strip())))
s = "effect_" + sc
HashList.append(NameHash40(s.strip(), Hash40.CreateFromString(s.lower().strip())))
if 'Special' in s or 'Final' in s:
HashList.append(NameHash40(s.replace('Special','SpecialAir').replace('Final','FinalAir').strip(), Hash40.CreateFromString(s.replace('Special','SpecialAir').replace('Final','FinalAir').lower().strip())))
s = "expression_" + sc
HashList.append(NameHash40(s.strip(), Hash40.CreateFromString(s.lower().strip())))
if 'Special' in s or 'Final' in s:
HashList.append(NameHash40(s.replace('Special','SpecialAir').replace('Final','FinalAir').strip(), Hash40.CreateFromString(s.replace('Special','SpecialAir').replace('Final','FinalAir').lower().strip())))
articlesFile = open('articles.txt','r')
for s in articlesFile:
if(s != "\n"):
s = s.replace("WEAPON_KIND_", "")
ArticleList.append(NameHash40(s.lower().strip(), Hash40.CreateFromString(s.lower().strip())))
class Article: # Character / Weapon
def __init__(self, article, hashes = []):
self.article = article
self.scriptsHash = hashes
def addScriptHash(self, hash, address):
self.scriptsHash.append(ScriptHash(hash, address))
def findHashValue(self):
find = next((x for x in ArticleList if self.article.hash == x.hash40.hash and self.article.length == x.hash40.length), None)
if find:
return find.name
else:
return self.article.hash40
class ScriptHash:
def __init__(self, hash, address):
self.hash = hash
self.address = address
def getAddress(self):
return hex(self.address)
def findHashValue(self):
find = next((x for x in HashList if self.hash.hash == x.hash40.hash and self.hash.length == x.hash40.length), None)
if find:
return find.name
else:
return self.hash.hash40 |
993,018 | 3d3a01ac76d206dbbfef3f0ab73b1aecfed2b407 | """
Rebuilds database: deletes old database.db and creates tables.
"""
import os
import sqlite3
os.remove('database.db')
CONNECTION = sqlite3.connect('database.db')
CONNECTION.row_factory = sqlite3.Row
CURSOR = CONNECTION.cursor()
CURSOR.execute(
"""
CREATE TABLE groups
(name text)
"""
)
CURSOR.execute(
"""
CREATE TABLE users
(name text, password text, is_admin integer, group_id integer)
"""
)
CURSOR.execute(
"""
CREATE TABLE exams
(name text, duration integer, published integer, group_id integer)
"""
)
CURSOR.execute(
"""
CREATE TABLE questions
(type text, statement text, correct text, maxsubs integer, maxscore integer, exam_id integer)
"""
)
CURSOR.execute(
"""
CREATE TABLE examrequests
(student_id integer, exam_id integer, start integer, end integer)
"""
)
CURSOR.execute(
"""
CREATE TABLE submissions
(student_id integer, exam_id integer, question_id integer, answer text, share real)
"""
)
# CURSOR.execute(
# "INSERT INTO groups VALUES ('m20')"
# )
# CURSOR.execute(
# "INSERT INTO users VALUES ('Фёдор Куянов', 'ab1dbbf93be316a67cb38ad2916ed1cd9e3af3a4', 0, 1)"
# )
# CURSOR.execute(
# "INSERT INTO users VALUES ('Админ', 'ab1dbbf93be316a67cb38ad2916ed1cd9e3af3a4', 1, 1)"
# )
CONNECTION.commit()
|
993,019 | 79398ad5ef2ef8863f3cd9a5bc23ebe97b9a2c01 |
# 测试输入空字符串的情况,int 时会报错
port = input("input name :")
print("1:")
print(port)
if port:
print("非空字符串")
else:
print("空字符串")
port = int(port)
print("2:")
print((port))
if port:
print("非零")
else:
print("零")
|
993,020 | 7da4235f6df904a58f01ecaf7fa4d23274347a31 | """DB_Ubereats URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.urls import path
from uber_eat.views import SingUpView
from uber_eat import views
urlpatterns = [
path('signup/', SingUpView.as_view(), name='signup'),
path('', views.home, name='home'),
path('login/', views.login, name='login'),
path('logout/', views.logout, name='logout'),
path('store/', views.show_store_page, name='show_store_page'),
path('user_show_order/', views.user_show_order, name='user_show_order'),
path('add_order_post/', views.add_order_post, name='add_order_post'),
path('del_order/', views.del_order, name='del_order'),
path('store_page/', views.store_page, name='store_page'),
path('edit_order/', views.edit_order, name='edit_order'),
path('mod_Ostatus_post/', views.mod_Ostatus_post, name='mod_Ostatus_post'),
path('mod_Odeliver_post/', views.mod_Odeliver_post, name='mod_Odeliver_post'),
path('user_show_order_deteil/', views.user_show_order_deteil, name='user_show_order_deteil')
]
|
993,021 | 4252a2ae2c09ebe8af535335b74506bef8a3e581 | """
sentry.utils.files
~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import zlib
from sentry import features, options
from sentry.models import MAX_FILE_SIZE
def compress_file(fp, level=6):
compressor = zlib.compressobj(level)
z_chunks = []
chunks = []
for chunk in fp.chunks():
chunks.append(chunk)
z_chunks.append(compressor.compress(chunk))
return (b''.join(z_chunks) + compressor.flush(), b''.join(chunks))
def get_max_file_size(organization):
"""Returns the maximum allowed debug file size for this organization."""
if features.has('organizations:large-debug-files', organization):
return MAX_FILE_SIZE
else:
return options.get('system.maximum-file-size')
|
993,022 | 0ebd3346aa5f066d77c11bd86b79abb22b551f36 | import pyvisa
from .driver import Driver
class Keithley6221Driver(Driver):
def __init__(self, resource):
super().__init__(resource)
try:
# Try to setup the device for serial operation
resource.baud_rate = 19200
resource.data_bits = 8
resource.parity = pyvisa.constants.Parity.none
resource.stop_bits = pyvisa.constants.StopBits.one
resource.read_termination = resource.CR
except AttributeError:
pass
def set_data_elements(self, elements):
"""
FORMat:ELEMents <item list>
Specify which elements are included in the data string returned from "Read commands"
Valid elements:
READing = Reading (Delta, Pulse Delta, or Differential Conductance). Overflow and NaN are returned as +9.9E37
TSTamp = Timestamp of the measurement
UNITs = Measurement units
RNUMber = Reading number
SOURce = Current source level
COMPliance = State of compliance. If in compliance, "T" (or 1 if in double data format) is returned.
If not in compliance "F" (or 0 if in double data format) is returned.
AVOLtage = Average voltage (Differential Conductance)
ALL = Include all the above elements
DEFault = Includes READing and TSTamp only
The elements parameter should be a list of strings
"""
self.write("FORM:ELEM {}".format(",".join(elements)))
def get_pre_math_data(self):
"""
SENSe:DATA:FRESh?
Read the latest pre-math reading. The return reading will be filtered if the averaging filter is enabled.
Once a reading is returned, it cannot be returned again (due to the FRESh command). This guarantees that
each reading gets returned only once. If a new (fresh) reading is not available when SENS:DATA:FRES? is sent,
error -230 Data corrupt or stale will occur.
"""
return self.query("SENS:DATA:FRES")
def get_post_math_data(self):
"""
CALCulate:DATA:FRESh?
Read the latest post-math reading. The return reading will be filtered if the averaging filter is enabled.
Once a reading is returned, it cannot be returned again (due to the FRESh command). This guarantees that
each reading gets returned only once. If a new (fresh) reading is not available when SENS:DATA:FRES? is sent,
error -230 Data corrupt or stale will occur.
"""
return self.query("CALC:DATA:FRES")
def set_compliance(self, compliance):
"""
SOURce:CURRent:COMPliance
Set the compliance (volts). Valid values are 0.1 to 105 inclusive.
"""
self.write("SOUR:CURR:COMP {}".format(str(compliance)))
def get_compliance(self):
"""
SOURce:CURRent:COMPliance?
Query the voltage compliance
"""
return self.query("SOUR:CURR:COMPL?")
def set_filter_state(self, state):
"""
SOURce:FILTer:STATe <b>
Enable or disable the analog filter
"""
return self.query("SOUR:FILT:STAT {}".format(str(state)))
def get_filter_state(self):
"""
SOURce:FILTer:STATe?
Query the state of the analog filter
"""
return self.query("SOUR:FILT:STAT?")
|
993,023 | 1ed53c9fcca3b817ae66de83535fe2c0e1fdfda0 | import numpy as np
from math import pow
import Eff
TankR = 10026.35e-3
Height = 10026.35e-3
PPM = [1, 1, 1, 1, 1, 1]
Iso = ['U238', 'Th232', 'U235', 'U238_l', 'Th232_l', 'U235_l']
IsoDecay = [['Pa234', 'Pb214', 'Bi214', 'Bi210',
'Tl210'], #U238
['Ac228', 'Pb212', 'Bi212', 'Tl208'],
#Th232
['Th231', 'Fr223', 'Pb211', 'Bi211',
'Tl207'], #U235
['Pa234', 'Pb214', 'Bi214', 'Bi210',
'Tl210'], #U238_l
['Ac228', 'Pb212', 'Bi212', 'Tl208'],
#Th232_l
['Th231', 'Fr223', 'Pb211', 'Bi211',
'Tl207']] #U235_l
GDIsoEff = [Eff.GDU238, #U238
Eff.GDTh232, #Th232
Eff.GDU235, #U235
Eff.GDU238, #U238_l
Eff.GDTh232, #Th232_l
Eff.GDU235] #U235_l
GDEffErr = [Eff.GDU238Err, #U238
Eff.GDTh232Err, #Th232
Eff.GDU235Err, #U235
Eff.GDU238Err, #U238_l
Eff.GDTh232Err, #Th232_l
Eff.GDU235Err] #U235_l
GDErr = [[], #U238
[], #Th232
[], #U235
[], #U238_l
[], #Th232_l
[]] #U235_l
GDIsoBG = [[], #U238
[], #Th232
[], #U235
[], #U238_l
[], #Th232_l
[]] #U235_l
def ErrProp(EffErr, Eff, BG):
if Eff != 0:
centErr = EffErr/Eff
Err = BG*centErr
else:
Err = 0
return Err
def GDAct(PPM):
mass = np.pi*pow(TankR, 2)*(2*Height)*1e3
const = mass*0.002
IsoAct = list(range(len(PPM)))
for i in range(len(PPM)):
IsoAct[i] = PPM[i]*const
print('Activit of ' + Iso[i] + ' = %.5e x %.5e = %.5e' % (PPM[i], const, IsoAct[i]))
return IsoAct
Activity = GDAct(PPM)
def BGrate():
for i in range(len(IsoDecay)):
for x in range(len(IsoDecay[i])):
if IsoDecay[i][x] == 'Tl210':
GDIsoBG[i].append(Activity[i]*GDIsoEff[i][x]*0.002)
else:
GDIsoBG[i].append(Activity[i]*GDIsoEff[i][x])
#print(GDEffErr[i][x])
#print(GDIsoEff[i][x])
#print(GDIsoBG[i][x])
GDErr[i].append(ErrProp(GDEffErr[i][x], GDIsoEff[i][x], GDIsoBG[i][x]))
#print(GDErr)
print('BG rate for ' + IsoDecay[i][x] + ' = %.5e +/- %.5e' % (GDIsoBG[i][x], GDErr[i][x]))
BGrate()
|
993,024 | e8ee4ff881ac0c9d24ac7600eaabcd5af6c35e46 | from math import factorial
import numpy as np
from xspline.typing import PolyParams, NDArray
from xspline.xfunction import BundleXFunction, XFunction
def poly_val(params: PolyParams, x: NDArray) -> NDArray:
"""Polynomial value function.
Parameters
----------
params
Polynomial coefficients.
x
Data points.
Returns
-------
describe
Polynomial function values.
"""
return np.polyval(params, x)
def poly_der(params: PolyParams, x: NDArray, order: int) -> NDArray:
"""Polynomial derivative function.
Parameters
----------
params
Polynomial coefficients.
x
Data points.
order
Order of differentiation.
Returns
-------
describe
Polynomial derivative values.
"""
return np.polyval(np.polyder(params, order), x)
def poly_int(params: PolyParams, x: NDArray, order: int) -> NDArray:
"""Polynomial definite integral function.
Parameters
----------
params
Polynomial coefficients.
x
Data points.
order
Order of integration. Here we use negative integer.
Returns
-------
describe
Polynomial definite integral values.
"""
return np.polyval(np.polyint(params, -order), x)
class Poly(BundleXFunction):
"""Polynomial function. A simple wrapper for the numpy poly functions.
Parameters
----------
params
This a tuple contains coefficients for the terms in polynomial.
Example
-------
>>> poly = Poly((1.0, 0.0))
>>> poly([0.0, 1.0])
array([0.0, 1.0])
>>> poly([0.0, 1.0], order=1)
array([1.0, 1.0])
>>> poly([0.0, 1.0], order=2)
array([0.0, 0.0])
>>> poly([0.0, 1.0]], order=-1)
array([0.0, 0.5])
"""
def __init__(self, params: PolyParams) -> None:
super().__init__(params, poly_val, poly_der, poly_int)
def get_poly_params(fun: XFunction, x: float, degree: int) -> tuple[float, ...]:
"""Solve polynomial (taylor) coefficients provided the ``XFunction``.
Parameters
----------
fun
Provided ``XFunction`` to be approximated.
x
The point where we want to approximate ``XFunction`` by the polynomial.
degree
Degree of the approximation polynomial.
Returns
-------
describe
The approximation polynomial coefficients.
"""
if degree < 0:
return (0.0,)
rhs = np.array([fun(x, order=i) for i in range(degree, -1, -1)])
mat = np.zeros((degree + 1, degree + 1))
for i in range(degree + 1):
for j in range(i + 1):
mat[i, j] = factorial(degree - j) / factorial(i - j) * x ** (i - j)
return tuple(np.linalg.solve(mat, rhs))
def get_poly_fun(fun: XFunction, x: float, degree: int) -> Poly:
"""Get the approximation polynomial function.
Parameters
----------
fun
Provided ``XFunction`` to be approximated.
x
The point where we want to approximate ``XFunction`` by the polynomial.
degree
Degree of the approximation polynomial.
Returns
-------
describe
Instance of the ``Poly`` class to approximate provided ``XFunction``.
"""
params = get_poly_params(fun, x, degree)
return Poly(params)
|
993,025 | 7c5df548f0e1254aa1d14309a120d74af9ede05a | from hashlist import Hashlist
class Hash:
def __init__(self):
self.taken = 0
self.list = Hashlist(4)
def __setitem__(self,index,newItem):
if self.taken/4 > len(self.list)/3:
self.list = Hashlist(len(self.list)*2, self.list)
old = self.list.items[self.list.toIndex(index)]
self.list[index] = newItem
if not old:
self.taken += 1
def __getitem__(self,index):
return self.list[index]
def remove(self, index):
if self.list.remove(index):
self.taken -= 1
|
993,026 | 760db1dcd073d63dab71e3c0d965eb3a4b231edd | import sys
import logging
logger = logging.getLogger(__file__)
def reload_pb_tools():
"""Remove all mop modules from the Python session.
Use this command to reload the `mop` package after
a change was made.
"""
search = ["pb"]
mop_modules = []
for module in sys.modules:
for term in search:
if term in module:
mop_modules.append(module)
break
for module in mop_modules:
del (sys.modules[module])
logger.info("Reloaded Project Borealis tools.")
|
993,027 | 86f57b1731307ea8802a436a88d73e70fc0fb32a | import unittest
from main.SEIR_model import SEIR
import configparser
import os
class TestMultipleFactors(unittest.TestCase):
conf = configparser.ConfigParser()
curpath = os.path.dirname(os.path.realpath(__file__))
cfgpath = os.path.join(curpath, "config.ini")
conf.read(cfgpath, encoding="utf-8")
N = int(conf['mutiple_factors']['N'])
S_0 = int(conf['mutiple_factors']['S_0'])
E_0 = int(conf['mutiple_factors']['E_0'])
I_0 = int(conf['mutiple_factors']['I_0'])
recovery = int(conf['mutiple_factors']['recovery'])
beta1 = float(conf['mutiple_factors']['beta1'])
beta2 = float(conf['mutiple_factors']['beta2'])
sigma = float(conf['mutiple_factors']['sigma'])
gamma = float(conf['mutiple_factors']['gamma'])
r = int(conf['mutiple_factors']['r'])
inivalue = [S_0, E_0, I_0, recovery]
def test_SEIR_notNone(self):
self.assertIsNotNone(SEIR(self.inivalue, self.r, self.beta1, self.beta2, self.N, self.sigma, self.gamma),
msg='None')
def test_SEIR_S(self):
self.assertEqual(-11, SEIR(self.inivalue, self.r, self.beta1, self.beta2, self.N, self.sigma, self.gamma)[0], msg='Not Equal')
def test_SEIR_E(self):
self.assertEqual(8.9, SEIR(self.inivalue, self.r, self.beta1, self.beta2, self.N, self.sigma, self.gamma)[1], msg='Not Equal')
def test_SEIR_I(self):
self.assertEqual(1.1, SEIR(self.inivalue, self.r, self.beta1, self.beta2, self.N, self.sigma, self.gamma)[2], msg='Not Equal')
def test_SEIR_R(self):
self.assertEqual(1, SEIR(self.inivalue, self.r, self.beta1, self.beta2, self.N, self.sigma, self.gamma)[3], msg='Not Equal')
|
993,028 | 4c4f7cb701b090d10227f7a51de9954f9934d79f | ### Routines for demonstrating 1D function learning programs.
from RLtoolkit.G.g import *
from RLtoolkit.Quickgraph.graph import *
from math import *
from .fa import *
from .tilecoder import *
window = None
black = gColorBlack(True)
flip = gColorFlip(True)
gray = gColorLightGray(True)
white = gColorWhite(True)
xresolution = 60 # number of points across
lines = [[]] # the set of lines to draw when refreshing
oldlines = [[]] # the set of lines to draw when refreshing
showoldlines = gIntVar() # indicates whether to show old lines, set by menu button
showoldlines.set(0)
menuloaded = False
functionapproximator = None
examples = [] # a list of all the examples learned so far
class FADataview(Dataview):
def gDrawView(self):
self.parentgraph.gDrawView()
pass
def gClickEventHandler(self, x, y):
self.newExample(x, y)
def newExample(self, xarg, farg):
global inputarray, functionapproximator, examples
self.parentgraph.drawExample(xarg, farg, 1)
inputarray[0] = xarg
examples.append([xarg, farg])
functionapproximator.faLearn(inputarray, farg)
self.parentgraph.updateLines()
gClear(self)
self.parentgraph.maybeDrawOldlines()
self.parentgraph.drawLines()
self.parentgraph.drawEgs()
class DemoWindow(Graph):
def __init__(self, title, fa, dataviewtype=FADataview, **kwargs):
global functionapproximator
Graph.__init__(self, title, dataviewtype, **kwargs)
self.dataview.parentgraph = self
xGraphLimits(0, 1, "Function Approximation Demonstration")
yGraphLimits(-20, 36, "Function Approximation Demonstration")
xTickmarks([0, .2, .4, .6, .8, 1],
"Function Approximation Demonstration")
yTickmarks([-20, -10, 0, 10, 20, 30],
"Function Approximation Demonstration")
self.boxy = True
if fa == None:
fa = makeTileCoder([[0, 1.2, 6]])
functionapproximator = fa
self.initDemo()
self.setupFAdemoMenu()
def gDrawView(self):
gClear(self)
self.drawAxes()
self.drawEgs()
self.maybeDrawOldlines()
self.drawLines()
self.message("Next Point?")
def gDestroy(self, event):
Gwindow.gDestroy(self, event)
self.quit()
def message(self, string, color1='white', color2='black'):
"Updates the message in lower left corner"
global black, white
gFillRectR(self, 0, 0, \
gTextWidth(self, "aaaaaaaaaaaaaaaaaa", self.charstyle), \
gTextHeight(self, "A", self.charstyle), white)
gDrawText(self, string, self.charstyle, 1, 4, black)
def initDemo(self):
global functionapproximator, lines, oldlines, examples, inputarray
functionapproximator.faInit()
gClear(self)
gClear(self.dataview)
inputarray = [0 for i in range(functionapproximator.numinputs)]
lines = [[]]
oldlines = [[]]
examples = []
self.updateLines()
self.gDrawView()
def reDraw(self):
self.viewDrawContents()
def setResolutionHigher(self):
global xresolution
xresolution = int(round(1.5 * xresolution))
print(("New xresolution is", str(xresolution)))
self.updateLines()
self.reDraw()
def setResolutionLower(self):
global xresolution
xresolution = int(round(0.666667 * xresolution))
print(("New xresolution is", str(xresolution)))
self.updateLines()
self.reDraw()
def setAlpha(self, new=0.1):
global functionapproximator
print(("Setting alpha to", new))
functionapproximator.setLearningrate(new)
def setTilings(self, num=8):
global functionapproximator
print(("Set number of tilings to ", num))
functionapproximator = makeTileCoder([[0, 1.2, 6]], 1, num)
self.initDemo()
def quitFA(self):
self.gCloseView()
gQuit()
def setupFAdemoMenu(self):
global showoldlines
gAddMenu(self, "FA Demo", \
[["Init", self.initDemo], \
['button', "Show Old Line", showoldlines, 1, 0, None], \
["Resolution Higher", self.setResolutionHigher], \
["Resolution Lower", self.setResolutionLower], \
'---', \
["Alpha = 1.0", lambda: self.setAlpha(1.0)], \
["Alpha = 0.5", lambda: self.setAlpha(0.5)], \
["Alpha = 0.25", lambda: self.setAlpha(0.25)], \
["Alpha = 0.1", lambda: self.setAlpha(0.1)], \
'---', \
["Number of tilings = 1", lambda: self.setTilings(1)], \
["Number of tilings = 2", lambda: self.setTilings(2)], \
["Number of tilings = 8", lambda: self.setTilings(8)], \
["Number of tilings = 64", lambda: self.setTilings(64)], \
'---', \
['Quit', self.quitFA]])
def drawExample(self, x, f, intensity):
global black
gdDrawCircle(self.dataview, gdCoordx(self.dataview, x),
gdCoordy(self.dataview, f), \
ceil(5 * intensity), black)
def maybeDrawOldlines(self):
global oldlines, gray, showoldlines
if showoldlines.get() == 1:
self.drawLines(oldlines, gray)
def drawLines(self, dlines=None, color=black):
global lines
if dlines == None:
dlines = lines
drawXY(self, dlines[0], color)
def updateLines(self):
global lines, oldlines, xresolution, inputarray, functionapproximator
oldlines = lines
nlines = []
for i in range(xresolution):
x = float(i) / xresolution
fline = [x, functionapproximator.faApproximate([x])]
nlines.append(fline)
nlines.append(fline) # repeat last point so last section draws
lines = [nlines]
def drawEgs(self):
global examples
if examples != []:
printegs = examples[:]
printegs.reverse()
age = 0
for x, f in printegs:
self.drawExample(x, f, .95 ** age)
age += 1
def setupFAdemo(fa=None):
global GDEVICE, \
functionapproximator
if fa == None:
fa = makeTileCoder([[0, 1.2, 6]])
functionapproximator = fa
window = DemoWindow("Function Approximation Demonstration", fa, \
dataviewtype=FADataview, \
gdViewportR=(
10, 30, GDEVICE.wwidth - 50, GDEVICE.wheight - 50))
def faDemo():
setupFAdemo(None)
gMainloop()
if __name__ == "__main__":
faDemo()
|
993,029 | 0cfb1ce36fafaeb2e10b3ce773f6e86afe407b31 | from .densenet import densenet121, densenet161, densenet169, densenet201
from .inception import inception_v3
from .mnasnet import mnasnet0_5, mnasnet0_75, mnasnet1_0, mnasnet1_3
from .mobilenet import mobilenet_v2_
from .resnets import resnet18, resnet34, resnet50, resnet101, resnet152
from .resnext import resnext50_32x4d, resnext101_32x8d
from .vgg import vgg11, vgg11_bn, vgg13, vgg13_bn, vgg16, vgg16_bn, vgg19, vgg19_bn
from .wide_resnet import wide_resnet50_2, wide_resnet101_2
|
993,030 | 0a140cfb0e4b977c5cefe3cb7f6836c7f98f0979 | import pymongo
from pymongo.collection import Collection
class Connect_mongo:
def __init__(self):
self.client = pymongo.MongoClient(host='192.168.100.106', port=27017)
self.db_data = self.client['douguo']
def insert_item(self, item):
db_collectiopn = Collection(self.db_data, 'douguo')
db_collectiopn.insert(item)
mongo_info = Connect_mongo()
|
993,031 | c905b82d93d37dbb23eb80bdedd15120d31b9dd2 | # Generated by Django 3.1.1 on 2020-09-28 14:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('courses', '0017_remove_course_newfield'),
]
operations = [
migrations.AlterField(
model_name='course',
name='image',
field=models.URLField(blank=True, default='https://images.unsplash.com/photo-1498243691581-b145c3f54a5a?fit=crop&w=500&q=60'),
),
]
|
993,032 | 1afbd83e7855afbab33aed4467902b9dbc83abf0 | #!/usr/bin/python
import os
from os import listdir
from os.path import isfile, join
import numpy as np
import cv2
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
vocab = [] # This is a list of every SIFT descriptor.
raw_corpus = []
imp = []
plotdata = []
sift = cv2.xfeatures2d.SIFT_create()
mypath ='./traincar'
onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))]
#print onlyfiles
for image in onlyfiles:
image = mypath+ '/' + image
img = cv2.imread(image)
gray= cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
kp = sift.detect(gray,None)
kp,desc = sift.compute(gray,kp)
if desc != None:
img_features = []
for row in desc:
vocab.append(row.tolist())
img_features.append(row.tolist())
raw_corpus.append(img_features)
#print raw_corpus
# Perform clustering with k clusters. This will probably need tuning.
plotdata = []
k_values = []
count = 60
k = 20
while k <= count:
cluster = KMeans(k, n_init=1)
cluster.fit(vocab)
# Now we build the clustered corpus where each entry is a string containing the cluster ids for each sift-feature.
corpus = []
for entry in raw_corpus:
corpus.append(' '.join([str(x) for x in cluster.predict(entry)]))
print "calculating value of k"
#plt.scatter(list, plotdata)
print "value of k: ",k
k_values.append(k)
plotdata.append(cluster.inertia_)
k+=2
#plt.show()
plt.plot(k_values,plotdata)
plt.show()
|
993,033 | b070c6dc7f5bd53b5f8a4051a003d48ef8a01297 | #!/usr/bin/python
import sys, urllib2, re
proteins = sys.stdin.read().strip().split('\n')
for prot in proteins:
f = urllib2.urlopen('http://www.uniprot.org/uniprot/' + prot + '.fasta')
cur = ''.join(f.read().split('\n')[1:])
ind = []
for m in re.finditer('(?=(N[^P][ST][^P]))', cur):
ind.append(m.start() + 1)
if ind != []:
print prot
print ' '.join(map(str, ind))
|
993,034 | 0ffee1a5537bc7f8fcd393fce35ae788dc142db2 |
from django.contrib import admin
from django.urls import path
from django.urls import include
urlpatterns = [
path('livraria/',include('AppLivraria.urls')),
path('admin/', admin.site.urls),
]
|
993,035 | 7441f38af587040c052e55eb3feec2dec06856f3 | goods = [
(1, {'name': "компьютер", 'price': 20000, 'count': 5, 'units': "шт."}),
(2, {'name': "принтер", 'price': 6000, 'count': 2, 'units': "шт."}),
(3, {'name': "сканер", 'price': 2000, 'count': 7, 'units': "шт."})
]
max_num = 3
fields = ["name", 'price', 'count', 'units']
print('Hello there')
while True:
cmd = input('What you wanna do next? [list/add/analytics/quit]: ')
if cmd == 'list':
for item in goods:
print(item[0], '-', item[1])
elif cmd == 'add':
name = input('Enter name: ')
price = input('Enter price: ')
count = input('Enter count: ')
units = input('Enter units: ')
max_num += 1
item = {'name': name, 'price': price, 'count': count, 'units': units, }
goods.append((max_num, item))
print('New item added: ', item)
elif cmd == 'analytics':
name_list = []
price_list = []
count_list = []
units_list = []
for (_, item) in goods:
print('item', item)
name_list.append(item.get('name'))
price_list.append(item.get('price'))
count_list.append(item.get('count'))
units_list.append(item.get('units'))
analytics = {
"name": name_list,
"price": price_list,
"count": count_list,
"units": units_list,
}
print('Analytics:', analytics)
elif cmd == 'quit':
print('Bye!')
break
else:
print('Unknown command')
|
993,036 | 668b74d33c3c14f4400e95c2cd53e79c8c311e76 | # Generated by Django 2.1.4 on 2018-12-29 17:03
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='BaseScript',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('_status', models.CharField(choices=[('running', 'running'), ('cancelled', 'cancelled'), ('failed', 'failed'), ('error', 'error'), ('warning', 'warning'), ('waiting', 'waiting'), ('unknown', 'unknown'), ('passed', 'passed')], db_column='status', default='unknown', max_length=9)),
('file_name', models.CharField(max_length=256)),
('file_path', models.CharField(max_length=1024)),
('script', models.TextField(blank=True, null=True)),
('timestamp_start', models.DateTimeField(blank=True, null=True)),
('timestamp_stop', models.DateTimeField(blank=True, null=True)),
],
options={
'ordering': ['-pk'],
},
),
migrations.CreateModel(
name='DryRunData',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('timestamp', models.DateTimeField(blank=True, null=True)),
],
options={
'ordering': ['-timestamp', '-pk'],
},
),
migrations.CreateModel(
name='Machine',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('machine_name', models.CharField(max_length=256, unique=True)),
],
),
migrations.CreateModel(
name='MasterScenario',
fields=[
('basescript_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='tm_api.BaseScript')),
],
options={
'ordering': ['-pk'],
},
bases=('tm_api.basescript',),
),
migrations.CreateModel(
name='Scenario',
fields=[
('basescript_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='tm_api.BaseScript')),
('master_scenario', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='scenarios', to='tm_api.MasterScenario')),
],
options={
'ordering': ['-pk'],
},
bases=('tm_api.basescript',),
),
migrations.CreateModel(
name='Test',
fields=[
('basescript_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='tm_api.BaseScript')),
('scenario_parent', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='tests', to='tm_api.Scenario')),
],
options={
'ordering': ['-pk'],
},
bases=('tm_api.basescript',),
),
migrations.AddField(
model_name='dryrundata',
name='machine',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='dry_run_datas', to='tm_api.Machine'),
),
migrations.AddField(
model_name='dryrundata',
name='master_scenario',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='tm_api.MasterScenario'),
),
]
|
993,037 | e542aaa732b630a0310728f399c7957f663a6c43 | from RobotRaconteur.Client import * #import RR client library
import cv2
import sys
#Function to take the data structure returned from the Webcam service
#and convert it to an OpenCV array
def WebcamImageToMat(image):
frame2=image.data.reshape([image.height, image.width, 3], order='C')
return frame2
#Main program
url='rr+tcp://localhost:2355/?service=Webcam'
#take url from command line
if (len(sys.argv)>=2):
url=sys.argv[1]
#connect to service given url, returned with an RR object, defined with service definition
cam_sub=RRN.SubscribeService(url)
cam_obj=cam_sub.GetDefaultClientWait(5)
while True:
if (not cam_obj.image is None):
cv2.imshow("Image",WebcamImageToMat(cam_obj.image))
if cv2.waitKey(50)!=-1:
break
cv2.destroyAllWindows()
|
993,038 | d4f4af1ac216dffe8a0cdf4b42a44beb29539562 | #!/usr/bin/python3
import sys
from wltr8 import get_file_addr
#Get all transactions from address argv1, store file info in argv2
get_file_addr(sys.argv[1], sys.argv[2])
|
993,039 | 5c2838391d3e1aa8d062bd2cea0d31d92ba9026b | import cv2
def main():
low = (48, 86, 6)
high = (64, 255, 255)
image = cv2.imread('test.bmp')
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, low, high)
res = cv2.bitwise_and(image, image, mask=mask)
cv2.imwrite("Result.png", res)
if __name__ == "__main__":
main()
|
993,040 | 0f08ed6060461c56a8fe925d11ad167d610b2889 | # Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
## @PowhegControl PowhegConfig_HZj
# Powheg configuration for HZj subprocess
#
# Authors: James Robinson <james.robinson@cern.ch>
#! /usr/bin/env python
from ..PowhegConfig_base import PowhegConfig_base
## Default Powheg configuration for HZj generation
#
# Create a full configurable with all available Powheg options
class PowhegConfig_HZj(PowhegConfig_base) :
def __init__( self, runArgs=None, opts=None ) :
## Constructor: set process-dependent executable path here
super(PowhegConfig_HZj, self).__init__( runArgs, opts )
self._powheg_executable += '/HZJ/pwhg_main'
## Add process specific options
self.add_parameter( 'bornsuppfactV', -1 )
self.add_parameter( 'kappa_ghz', 1, desc='multiplicative kappa-factor of the Higgs-Z coupling' )
self.add_parameter( 'ptVhigh', -1 )
self.add_parameter( 'ptVlow', -1 )
self.add_parameter( 'Vstep', -1 )
## Decorate with generic option sets
self.add_parameter_set( 'Higgs mass window' )
self.add_parameter_set( 'Higgs properties' )
self.add_parameter_set( 'Higgs+V+jet' )
self.add_parameter_set( 'LHEv3' )
self.add_parameter_set( 'MiNLO NNLL' )
self.add_parameter_set( 'running scales' )
self.add_parameter_set( 'top mass' )
self.add_parameter_set( 'vector boson decay' )
self.add_parameter_set( 'v2' )
self.add_parameter_set( 'Z mass window' )
## Set optimised integration parameters
self.ncall1 = 40000
self.ncall2 = 150000
self.nubound = 100000
self.xupbound = 4
self.foldx = 5
self.foldy = 10
self.foldphi = 5
## Override defaults
self.doublefsr = 1
self.mass_Z_low = 60.0
self.mass_Z_high = 2.0 * self.beam_energy
self.populate_default_strings()
|
993,041 | 016680dec61165ac5dd2b37b7558f0109c5b10b5 | CURRENT_WEATHER_DATA="https://api.openweathermap.org/data/2.5/weather?"
ONE_CALL_API="https://api.openweathermap.org/data/2.5/onecall?exclude=current,minutely,hourly,alerts&"
DAY_LIMIT=7 |
993,042 | 7f3d77e592d421bee5397f9fe9ade7ad4f6296bf | import codecs
# 打开文件
f = codecs.open("text.txt", 'w', 'utf-8')
# 文件中写入内容
f.write("Hello World,Python! 中国加油!")
# 关闭流
f.close()
|
993,043 | 17ba6650e6075835f62d0bdbdadb71929a4e09c8 | # # Advanced Machine Learning Techniques
# ## Agenda
#
# 1. Reading in the Kaggle data and adding features
# 2. Using a **`Pipeline`** for proper cross-validation
# 3. Combining **`GridSearchCV`** with **`Pipeline`**
# 4. Efficiently searching for tuning parameters using **`RandomizedSearchCV`**
# 5. Adding features to a document-term matrix (using SciPy)
# 6. Adding features to a document-term matrix (using **`FeatureUnion`**)
# 7. Ensembling models
# 8. Locating groups of similar cuisines
# 9. Model stacking
# for Python 2: use print only as a function
from __future__ import print_function
# ## Part 1: Reading in the Kaggle data and adding features
#
# - Our goal is to predict the **cuisine** of a recipe, given its **ingredients**.
# - **Feature engineering** is the process through which you create features that don't natively exist in the dataset.
import pandas as pd
import numpy as np
# define a function that accepts a DataFrame and adds new features
def make_features(df):
# number of ingredients
df['num_ingredients'] = df.ingredients.apply(len)
# mean length of ingredient names
df['ingredient_length'] = df.ingredients.apply(lambda x: np.mean([len(item) for item in x]))
# string representation of the ingredient list
df['ingredients_str'] = df.ingredients.astype(str)
return df
# create the same features in the training data and the new data
train = make_features(pd.read_json('../data/train.json'))
new = make_features(pd.read_json('../data/test.json'))
train.head()
train.shape
new.head()
new.shape
# ## Part 2: Using a `Pipeline` for proper cross-validation
# define X and y
X = train.ingredients_str
y = train.cuisine
# X is just a Series of strings
X.head()
# replace the regex pattern that is used for tokenization
from sklearn.feature_extraction.text import CountVectorizer
vect = CountVectorizer(token_pattern=r"'([a-z ]+)'")
# import and instantiate Multinomial Naive Bayes (with the default parameters)
from sklearn.naive_bayes import MultinomialNB
nb = MultinomialNB()
# [make_pipeline documentation](http://scikit-learn.org/stable/modules/generated/sklearn.pipeline.make_pipeline.html)
# create a pipeline of vectorization and Naive Bayes
from sklearn.pipeline import make_pipeline
pipe = make_pipeline(vect, nb)
# examine the pipeline steps
pipe.steps
# **Proper cross-validation:**
#
# - By passing our pipeline to **`cross_val_score`**, features will be created from **`X`** (via **`CountVectorizer`**) within each fold of cross-validation.
# - This process simulates the real world, in which your out-of-sample data will contain **features that were not seen** during model training.
# cross-validate the entire pipeline
from sklearn.cross_validation import cross_val_score
cross_val_score(pipe, X, y, cv=5, scoring='accuracy').mean()
# ## Part 3: Combining `GridSearchCV` with `Pipeline`
#
# - We use **`GridSearchCV`** to locate optimal tuning parameters by performing an "exhaustive grid search" of different parameter combinations, searching for the combination that has the best cross-validated accuracy.
# - By passing a **`Pipeline`** to **`GridSearchCV`** (instead of just a model), we can search tuning parameters for both the vectorizer and the model.
# pipeline steps are automatically assigned names by make_pipeline
pipe.named_steps.keys()
# create a grid of parameters to search (and specify the pipeline step along with the parameter)
param_grid = {}
param_grid['countvectorizer__token_pattern'] = [r"\b\w\w+\b", r"'([a-z ]+)'"]
param_grid['multinomialnb__alpha'] = [0.5, 1]
param_grid
# [GridSearchCV documentation](http://scikit-learn.org/stable/modules/generated/sklearn.grid_search.GridSearchCV.html)
# pass the pipeline (instead of the model) to GridSearchCV
from sklearn.grid_search import GridSearchCV
grid = GridSearchCV(pipe, param_grid, cv=5, scoring='accuracy')
# run the grid search
grid.fit(X, y)
# examine the score for each combination of parameters
grid.grid_scores_
# print the single best score and parameters that produced that score
print(grid.best_score_)
print(grid.best_params_)
# ## Part 4: Efficiently searching for tuning parameters using `RandomizedSearchCV`
#
# - When there are many parameters to tune, searching all possible combinations of parameter values may be **computationally infeasible**.
# - **`RandomizedSearchCV`** searches a sample of the parameter values, and you control the computational "budget".
#
# [RandomizedSearchCV documentation](http://scikit-learn.org/stable/modules/generated/sklearn.grid_search.RandomizedSearchCV.html)
from sklearn.grid_search import RandomizedSearchCV
# [scipy.stats documentation](http://docs.scipy.org/doc/scipy/reference/stats.html)
# for any continuous parameters, specify a distribution instead of a list of options
import scipy as sp
param_grid = {}
param_grid['countvectorizer__token_pattern'] = [r"\b\w\w+\b", r"'([a-z ]+)'"]
param_grid['countvectorizer__min_df'] = [1, 2, 3]
param_grid['multinomialnb__alpha'] = sp.stats.uniform(scale=1)
param_grid
# set a random seed for sp.stats.uniform
np.random.seed(1)
# additional parameters are n_iter (number of searches) and random_state
rand = RandomizedSearchCV(pipe, param_grid, cv=5, scoring='accuracy', n_iter=5, random_state=1)
# run the randomized search
rand.fit(X, y)
rand.grid_scores_
print(rand.best_score_)
print(rand.best_params_)
# ### Making predictions for new data
# define X_new as the ingredient text
X_new = new.ingredients_str
# print the best model found by RandomizedSearchCV
rand.best_estimator_
# RandomizedSearchCV/GridSearchCV automatically refit the best model with the entire dataset, and can be used to make predictions
new_pred_class_rand = rand.predict(X_new)
new_pred_class_rand
# create a submission file (score: 0.75342)
pd.DataFrame({'id':new.id, 'cuisine':new_pred_class_rand}).set_index('id').to_csv('sub3.csv')
# ## Part 5: Adding features to a document-term matrix (using SciPy)
#
# - So far, we've trained models on either the **document-term matrix** or the **manually created features**, but not both.
# - To train a model on both types of features, we need to **combine them into a single feature matrix**.
# - Because one of the matrices is **sparse** and the other is **dense**, the easiest way to combine them is by using SciPy.
# create a document-term matrix from all of the training data
X_dtm = vect.fit_transform(X)
X_dtm.shape
type(X_dtm)
# [scipy.sparse documentation](http://docs.scipy.org/doc/scipy/reference/sparse.html)
# create a DataFrame of the manually created features
X_manual = train.loc[:, ['num_ingredients', 'ingredient_length']]
X_manual.shape
# create a sparse matrix from the DataFrame
X_manual_sparse = sp.sparse.csr_matrix(X_manual)
type(X_manual_sparse)
# combine the two sparse matrices
X_dtm_manual = sp.sparse.hstack([X_dtm, X_manual_sparse])
X_dtm_manual.shape
# - This was a relatively easy process.
# - However, it does not allow us to do **proper cross-validation**, and it doesn't integrate well with the rest of the **scikit-learn workflow**.
# ## Part 6: Adding features to a document-term matrix (using `FeatureUnion`)
#
# - Below is an alternative process that does allow for proper cross-validation, and does integrate well with the scikit-learn workflow.
# - To use this process, we have to learn about transformers, **`FunctionTransformer`**, and **`FeatureUnion`**.
# ### What are "transformers"?
#
# Transformer objects provide a `transform` method in order to perform **data transformations**. Here are a few examples:
#
# - **`CountVectorizer`**
# - `fit` learns the vocabulary
# - `transform` creates a document-term matrix using the vocabulary
# - **`Imputer`**
# - `fit` learns the value to impute
# - `transform` fills in missing entries using the imputation value
# - **`StandardScaler`**
# - `fit` learns the mean and scale of each feature
# - `transform` standardizes the features using the mean and scale
# - **`HashingVectorizer`**
# - `fit` is not used, and thus it is known as a "stateless" transformer
# - `transform` creates the document-term matrix using a hash of the token
# ### Converting a function into a transformer
# define a function that accepts a DataFrame returns the manually created features
def get_manual(df):
return df.loc[:, ['num_ingredients', 'ingredient_length']]
get_manual(train).head()
# [FunctionTransformer documentation](http://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.FunctionTransformer.html) (new in 0.17)
from sklearn.preprocessing import FunctionTransformer
# create a stateless transformer from the get_manual function
get_manual_ft = FunctionTransformer(get_manual, validate=False)
type(get_manual_ft)
# execute the function using the transform method
get_manual_ft.transform(train).head()
# define a function that accepts a DataFrame returns the ingredients string
def get_text(df):
return df.ingredients_str
# create and test another transformer
get_text_ft = FunctionTransformer(get_text, validate=False)
get_text_ft.transform(train).head()
# ### Combining feature extraction steps
#
# - **`FeatureUnion`** applies a list of transformers in parallel to the input data (not sequentially), then **concatenates the results**.
# - This is useful for combining several feature extraction mechanisms into a single transformer.
#
# 
# [make_union documentation](http://scikit-learn.org/stable/modules/generated/sklearn.pipeline.make_union.html)
from sklearn.pipeline import make_union
# create a document-term matrix from all of the training data
X_dtm = vect.fit_transform(X)
X_dtm.shape
# this is identical to a FeatureUnion with just one transformer
union = make_union(vect)
X_dtm = union.fit_transform(X)
X_dtm.shape
# try to add a second transformer to the Feature Union (what's wrong with this?)
# union = make_union(vect, get_manual_ft)
# X_dtm_manual = union.fit_transform(X)
# properly combine the transformers into a FeatureUnion
union = make_union(make_pipeline(get_text_ft, vect), get_manual_ft)
X_dtm_manual = union.fit_transform(train)
X_dtm_manual.shape
# 
# ### Cross-validation
# slightly improper cross-validation
cross_val_score(nb, X_dtm_manual, y, cv=5, scoring='accuracy').mean()
# create a pipeline of the FeatureUnion and Naive Bayes
pipe = make_pipeline(union, nb)
# properly cross-validate the entire pipeline (and pass it the entire DataFrame)
cross_val_score(pipe, train, y, cv=5, scoring='accuracy').mean()
# ### Alternative way to specify `Pipeline` and `FeatureUnion`
# reminder of how we created the pipeline
union = make_union(make_pipeline(get_text_ft, vect), get_manual_ft)
pipe = make_pipeline(union, nb)
# [Pipeline documentation](http://scikit-learn.org/stable/modules/generated/sklearn.pipeline.Pipeline.html) and [FeatureUnion documentation](http://scikit-learn.org/stable/modules/generated/sklearn.pipeline.FeatureUnion.html)
# duplicate the pipeline structure without using make_pipeline or make_union
from sklearn.pipeline import Pipeline, FeatureUnion
pipe = Pipeline([
('featureunion', FeatureUnion([
('pipeline', Pipeline([
('functiontransformer', get_text_ft),
('countvectorizer', vect)
])),
('functiontransformer', get_manual_ft)
])),
('multinomialnb', nb)
])
# ### Grid search of a nested `Pipeline`
# examine the pipeline steps
pipe.steps
# create a grid of parameters to search (and specify the pipeline step along with the parameter)
param_grid = {}
param_grid['featureunion__pipeline__countvectorizer__token_pattern'] = [r"\b\w\w+\b", r"'([a-z ]+)'"]
param_grid['multinomialnb__alpha'] = [0.5, 1]
param_grid
grid = GridSearchCV(pipe, param_grid, cv=5, scoring='accuracy')
grid.fit(train, y)
print(grid.best_score_)
print(grid.best_params_)
# ## Part 7: Ensembling models
#
# Rather than combining features into a single feature matrix and training a single model, we can instead create separate models and "ensemble" them.
# ### What is ensembling?
#
# Ensemble learning (or "ensembling") is the process of combining several predictive models in order to produce a combined model that is **better than any individual model**.
#
# - **Regression:** average the predictions made by the individual models
# - **Classification:** let the models "vote" and use the most common prediction, or average the predicted probabilities
#
# For ensembling to work well, the models must have the following characteristics:
#
# - **Accurate:** they outperform the null model
# - **Independent:** their predictions are generated using different "processes", such as:
# - different types of models
# - different features
# - different tuning parameters
#
# **The big idea:** If you have a collection of individually imperfect (and independent) models, the "one-off" mistakes made by each model are probably not going to be made by the rest of the models, and thus the mistakes will be discarded when averaging the models.
#
# **Note:** There are also models that have built-in ensembling, such as Random Forests.
# ### Model 1: KNN model using only manually created features
# define X and y
feature_cols = ['num_ingredients', 'ingredient_length']
X = train[feature_cols]
y = train.cuisine
# use KNN with K=800
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(n_neighbors=800)
# train KNN on all of the training data
knn.fit(X, y)
# define X_new as the manually created features
X_new = new[feature_cols]
# calculate predicted probabilities of class membership for the new data
new_pred_prob_knn = knn.predict_proba(X_new)
new_pred_prob_knn.shape
# print predicted probabilities for the first row only
new_pred_prob_knn[0, :]
# display classes with probabilities
zip(knn.classes_, new_pred_prob_knn[0, :])
# predicted probabilities will sum to 1 for each row
new_pred_prob_knn[0, :].sum()
# ### Model 2: Naive Bayes model using only text features
# print the best model found by RandomizedSearchCV
rand.best_estimator_
# define X_new as the ingredient text
X_new = new.ingredients_str
# calculate predicted probabilities of class membership for the new data
new_pred_prob_rand = rand.predict_proba(X_new)
new_pred_prob_rand.shape
# print predicted probabilities for the first row only
new_pred_prob_rand[0, :]
# ### Ensembling models 1 and 2
# calculate the mean of the predicted probabilities for the first row
(new_pred_prob_knn[0, :] + new_pred_prob_rand[0, :]) / 2
# calculate the mean of the predicted probabilities for all rows
new_pred_prob = pd.DataFrame((new_pred_prob_knn + new_pred_prob_rand) / 2, columns=knn.classes_)
new_pred_prob.head()
# for each row, find the column with the highest predicted probability
new_pred_class = new_pred_prob.apply(np.argmax, axis=1)
new_pred_class.head()
# create a submission file (score: 0.75241)
pd.DataFrame({'id':new.id, 'cuisine':new_pred_class}).set_index('id').to_csv('sub4.csv')
# **Note:** [VotingClassifier](http://scikit-learn.org/stable/modules/ensemble.html#votingclassifier) (new in 0.17) makes it easier to ensemble classifiers, though it is limited to the case in which all of the classifiers are fit to the same data.
# ## Part 8: Locating groups of similar cuisines
# for each cuisine, combine all of the recipes into a single string
cuisine_ingredients = train.groupby('cuisine').ingredients_str.sum()
cuisine_ingredients
# examine the brazilian ingredients
cuisine_ingredients['brazilian'][0:500]
# confirm that they match the brazilian recipes
train.loc[train.cuisine=='brazilian', 'ingredients_str'].head()
# create a document-term matrix from cuisine_ingredients
from sklearn.feature_extraction.text import TfidfVectorizer
vect = TfidfVectorizer()
cuisine_dtm = vect.fit_transform(cuisine_ingredients)
cuisine_dtm.shape
# [How to calculate document similarity](http://stackoverflow.com/questions/12118720/python-tf-idf-cosine-to-find-document-similarity/12128777#12128777) (Stack Overflow)
# calculate the cosine similarity between each cuisine and all other cuisines
from sklearn import metrics
cuisine_similarity = []
for idx in range(cuisine_dtm.shape[0]):
similarity = metrics.pairwise.linear_kernel(cuisine_dtm[idx, :], cuisine_dtm).flatten()
cuisine_similarity.append(similarity)
# convert the results to a DataFrame
cuisine_list = cuisine_ingredients.index
cuisine_similarity = pd.DataFrame(cuisine_similarity, index=cuisine_list, columns=cuisine_list)
cuisine_similarity
# display the similarities as a heatmap
import seaborn as sns
sns.heatmap(cuisine_similarity)
# hand-selected cuisine groups
group_1 = ['chinese', 'filipino', 'japanese', 'korean', 'thai', 'vietnamese']
group_2 = ['british', 'french', 'irish', 'russian', 'southern_us']
group_3 = ['greek', 'italian', 'moroccan', 'spanish']
group_4 = ['brazilian', 'cajun_creole', 'indian', 'jamaican', 'mexican']
# ## Part 9: Model stacking
#
# - The term "model stacking" is used any time there are **multiple "levels" of models**, in which the outputs from one level are used as inputs to another level.
# - In this case, we will create one model that predicts the **cuisine group** for a recipe. Within each of the four groups, we will create another model that predicts the actual **cuisine**.
# - Our theory is that each of these five models may need to be **tuned differently** for maximum accuracy, but will ultimately result in a process that is more accurate than a single-level model.
# create a dictionary that maps each cuisine to its group number
cuisines = group_1 + group_2 + group_3 + group_4
group_numbers = [1]*len(group_1) + [2]*len(group_2) + [3]*len(group_3) + [4]*len(group_4)
cuisine_to_group = dict(zip(cuisines, group_numbers))
cuisine_to_group
# map the cuisines to their group numbers
train['group'] = train.cuisine.map(cuisine_to_group)
train.head()
# confirm that all recipes were assigned a cuisine group
train.group.isnull().sum()
# calculate the cross-validated accuracy of using text to predict cuisine group
X = train.ingredients_str
y = train.group
pipe_main = make_pipeline(CountVectorizer(), MultinomialNB())
cross_val_score(pipe_main, X, y, cv=5, scoring='accuracy').mean()
# define an X and y for each cuisine group
X1 = train.loc[train.group==1, 'ingredients_str']
y1 = train.loc[train.group==1, 'cuisine']
X2 = train.loc[train.group==2, 'ingredients_str']
y2 = train.loc[train.group==2, 'cuisine']
X3 = train.loc[train.group==3, 'ingredients_str']
y3 = train.loc[train.group==3, 'cuisine']
X4 = train.loc[train.group==4, 'ingredients_str']
y4 = train.loc[train.group==4, 'cuisine']
# define a pipeline for each cuisine group
pipe_1 = make_pipeline(CountVectorizer(), MultinomialNB())
pipe_2 = make_pipeline(CountVectorizer(), MultinomialNB())
pipe_3 = make_pipeline(CountVectorizer(), MultinomialNB())
pipe_4 = make_pipeline(CountVectorizer(), MultinomialNB())
# within each cuisine group, calculate the cross-validated accuracy of using text to predict cuisine
print(cross_val_score(pipe_1, X1, y1, cv=5, scoring='accuracy').mean())
print(cross_val_score(pipe_2, X2, y2, cv=5, scoring='accuracy').mean())
print(cross_val_score(pipe_3, X3, y3, cv=5, scoring='accuracy').mean())
print(cross_val_score(pipe_4, X4, y4, cv=5, scoring='accuracy').mean())
# **Note:** Ideally, each of the five pipelines should be **individually tuned** from start to finish, including feature engineering, model selection, and parameter tuning.
# ### Making predictions for the new data
# fit each pipeline with the relevant X and y
pipe_main.fit(X, y)
pipe_1.fit(X1, y1)
pipe_2.fit(X2, y2)
pipe_3.fit(X3, y3)
pipe_4.fit(X4, y4)
# for the new data, first make cuisine group predictions
X_new = new.ingredients_str
new_pred_group = pipe_main.predict(X_new)
new_pred_group
# then within each predicted cuisine group, make cuisine predictions
new_pred_class_1 = pipe_1.predict(X_new[new_pred_group==1])
new_pred_class_2 = pipe_2.predict(X_new[new_pred_group==2])
new_pred_class_3 = pipe_3.predict(X_new[new_pred_group==3])
new_pred_class_4 = pipe_4.predict(X_new[new_pred_group==4])
print(new_pred_class_1)
print(new_pred_class_2)
print(new_pred_class_3)
print(new_pred_class_4)
# add the cuisine predictions to the DataFrame of new data
new.loc[new_pred_group==1, 'pred_class'] = new_pred_class_1
new.loc[new_pred_group==2, 'pred_class'] = new_pred_class_2
new.loc[new_pred_group==3, 'pred_class'] = new_pred_class_3
new.loc[new_pred_group==4, 'pred_class'] = new_pred_class_4
new.head()
# create a submission file (score: 0.70475)
pd.DataFrame({'id':new.id, 'cuisine':new.pred_class}).set_index('id').to_csv('sub5.csv')
|
993,044 | 7157dfc0340c462b9eccbd308b2aeae8b5fa81ef | class School:
def __init__(self, name = None, roster = {}):
self._name = name
self._roster = roster
def get_roster(self):
return self._roster
def set_roster(self, roster):
self._roster = roster
roster = property(get_roster, set_roster)
def add_student(self, student, grade):
grade_list = self.roster.get(str(grade),[])
grade_list.append(student)
self.roster[str(grade)] = grade_list
def grade(self, grade):
return self.roster[str(grade)]
def sort_roster(self):
for key in self.roster.keys():
self.roster[key].sort()
return self.roster
|
993,045 | 2b3ccee329bbe2b858ecffa05a4c28564755fb3f | sbox = [62, 117, 195, 179, 20, 210, 41, 66, 116, 178, 152, 143, 75, 105, 254, 1, 158, 95, 101, 175, 191, 166, 36, 24, 50, 39, 190, 120, 52, 242, 182, 185, 61, 225, 140, 38, 150, 80, 19, 109, 246, 252, 40, 13, 65, 236, 124, 186, 214, 86, 235, 100, 97, 49, 197, 154, 176, 199, 253, 69, 88, 112, 139, 77, 184, 45, 133, 104, 15, 54, 177, 244, 160, 169, 82, 148, 73, 30, 229, 35, 79, 137, 157, 180, 248, 163, 241, 231, 81, 94, 165, 9, 162, 233, 18, 85, 217, 84, 7, 55, 63, 171, 56, 118, 237, 132, 136, 22, 90, 221, 103, 161, 205, 11, 255, 14, 122, 47, 71, 201, 99, 220, 83, 74, 173, 76, 144, 16, 155, 126, 60, 96, 44, 234, 17, 215, 107, 138, 159, 183, 251, 3, 198, 0, 89, 170, 131, 151, 219, 29, 230, 32, 187, 125, 134, 64, 12, 202, 164, 247, 25, 223, 222, 119, 174, 67, 147, 146, 206, 51, 243, 53, 121, 239, 68, 130, 70, 203, 211, 111, 108, 113, 8, 106, 57, 240, 21, 93, 142, 238, 167, 5, 128, 72, 189, 192, 193, 92, 10, 204, 87, 145, 188, 172, 224, 226, 207, 27, 218, 48, 33, 28, 123, 6, 37, 59, 4, 102, 114, 91, 23, 209, 34, 42, 2, 196, 141, 208, 181, 245, 43, 78, 213, 216, 232, 46, 98, 26, 212, 58, 115, 194, 200, 129, 227, 249, 127, 149, 135, 228, 31, 153, 250, 156, 168, 110]
ptable = [
0, 8, 16, 24, 32, 40, 48, 56,
1, 9, 17, 25, 33, 41, 49, 57,
2, 10, 18, 26, 34, 42, 50, 58,
3, 11, 19, 27, 35, 43, 51, 59,
4, 12, 20, 28, 36, 44, 52, 60,
5, 13, 21, 29, 37, 45, 53, 61,
6, 14, 22, 30, 38, 46, 54, 62,
7, 15, 23, 31, 39, 47, 55, 63
]
#sbox = [0xe,4,0xd,1,2,0xf,0xb,8,3,0xa,6,0xc,5,9,0,7]
Nbits = 8
B = 1<<Nbits
stat=[[0]*B for i in range(B)]
def calc(msk,val):
res = 0
for i in range(Nbits):
cur = 1<<i
if msk&cur>0 and val&cur>0:
res ^= 1
return res
'''
for a in range(B):
b=sbox[a]
for i in range(B):
x=calc(a,i)
for j in range(B):
y=calc(b,j)
if x==y:
stat[i][j]+=1
'''
'''
for i in range(B):
for j in range(B):
print "%d\t" % (stat[i][j]-B/2),
print
'''
'''
cands = [0]*256
for i in range(256):
tmp=i
mm=-1
res=-1
for j in range(256):
mm2=abs(stat[tmp][j]-128)
if mm2>mm:
mm=mm2
res=j
cands[i]=res
if (bin(i)[2:].count('1')==1 or bin(res)[2:].count('1')==1):
print i,res
'''
cands=[0, 135, 155, 28, 223, 88, 68, 195, 178, 53, 41, 174, 109, 234, 246, 113, 117, 242, 238, 105, 170, 45, 49, 182, 199, 64, 92, 219, 24, 159, 131, 4, 31, 152, 132, 3, 192, 71, 91, 220, 173, 42, 54, 177, 114, 245, 233, 110, 106, 237, 241, 118, 181, 50, 46, 169, 216, 95, 67, 196, 7, 128, 156, 27, 130, 5, 25, 158, 93, 218, 198, 65, 48, 183, 171, 44, 239, 104, 116, 243, 247, 112, 108, 235, 40, 175, 179, 52, 69, 194, 222, 89, 154, 29, 1, 134, 157, 26, 6, 129, 66, 197, 217, 94, 47, 168, 180, 51, 240, 119, 107, 236, 232, 111, 115, 244, 55, 176, 172, 43, 90, 221, 193, 70, 133, 2, 30, 153, 17, 150, 138, 13, 206, 73, 85, 210, 163, 36, 56, 191, 124, 251, 231, 96, 100, 227, 255, 120, 187, 60, 32, 167, 214, 81, 77, 202, 9, 142, 146, 21, 14, 137, 149, 18, 209, 86, 74, 205, 188, 59, 39, 160, 99, 228, 248, 127, 123, 252, 224, 103, 164, 35, 63, 184, 201, 78, 82, 213, 22, 145, 141, 10, 147, 20, 8, 143, 76, 203, 215, 80, 33, 166, 186, 61, 254, 121, 101, 226, 230, 97, 125, 250, 57, 190, 162, 37, 84, 211, 207, 72, 139, 12, 16, 151, 140, 11, 23, 144, 83, 212, 200, 79, 62, 185, 165, 34, 225, 102, 122, 253, 249, 126, 98, 229, 38, 161, 189, 58, 75, 204, 208, 87, 148, 19, 15, 136]
def bits(pre, state):
res=''
first=True
for i in range(len(state)):
if state[i]==1:
if first:
first=False
else:
res+=' + '
res+=pre+'['+str(i/8)+','+str(7-i%8)+']'
return res
def b2i(state):
tmp=0
for i in range(8):
if state[i]==1:
tmp+=(1<<(7-i))
return tmp
def i2b(val):
tmp=[0]*8
for i in range(8):
if val&(1<<i)>0:
tmp[7-i]=1
return tmp
def backward1(state):
state2=[]
for j in range(8):
num=b2i(state[j*8:(j+1)*8])
res=-1
for k in range(256):
if cands[k]==num:
res=k
assert res!=-1
state2.extend(i2b(res))
return state2
def forward(state, steps):
cnt=0
for i in range(steps):
state2=[]
for j in range(8):
num=b2i(state[j*8:(j+1)*8])
if num!=0:
cnt+=1
state2.extend(i2b(cands[num]))
for j in range(len(state)):
state[j]=state2[ptable[j]]
return state,cnt
for i in range(2):
for j in range(8):
state=[0]*64
state[j*8+i]=1
state2 = backward1(state)
print bits('P',state2),'+',
state2, cnt = forward(state,2)
print bits('U4',state2), cnt+1
'''
maybe should run it interactively
from https://gist.github.com/ngg/f534e51c14a832d69c41289837078773
int bit(int n, int k)
{
return (n & (1 << k)) != 0;
}
int main()
{
auto f = fopen("data", "rb");
for (int i = 0; i < 65536; i++) {
uint8_t p[8], c[8];
fread(&p, 8, 1, f);
fread(&c, 8, 1, f);
for (int a = 0; a < 256; a++) {
for (int b = 0; b < 256; b++) {
int u4b0 = sbox_inv[(int)c[0] ^ a], u4b6 = sbox_inv[(int)c[6] ^ b];
if (bit(p[0], 0) ^ bit(p[0], 3) ^ bit(p[0], 4) ^ bit(u4b0, 0) ^ bit(u4b0, 4)
^ bit(u4b6, 0) ^ bit(u4b6, 4)) {
cnt[a][b]++;
}
}
}
}
for (int a = 0; a < 256; a++) {
for (int b = 0; b < 256; b++) {
int bias = abs(cnt[a][b] - 32768);
if (bias > 1000)
cout << a << " " << b << " " << bias << endl;
}
}
}
'''
|
993,046 | bee105e79bf1e244cda2600e169f90d766789a36 | import matplotlib.pyplot as plt
import numpy as np
from scipy.io import loadmat
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.model_selection import train_test_split
from matplotlib import gridspec
# import sklearn
# print('The scikit-learn version is {}.'.format(sklearn.__version__))
def neural_net_ex4_ng():
"""Run a neural network on test dataset.
Example from Andrew Ng's coursera course
"""
# ==================
# read data
dataset = loadmat('data/ex4data1.mat')
print(dataset.keys())
y = dataset['y'] # 5000 x 1
print('dims y: ', y.shape)
# print('y[0]: ', y[0])
X = dataset['X'] # 5000 x 400
print('dims X: ', X.shape)
# print('X[0]: ', X[0])
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
num_samples_test = X_test.shape[0]
# ==================
# display data
# pick 20 examples and visualize them
fig = plt.figure(figsize=(10, 8), facecolor='white')
fig.add_subplot(651)
samples = np.random.choice(num_samples_test, 10)
print('samples:', samples)
plt.imshow(X_test[samples, :].reshape(-1, 20).T, cmap="Greys")
plt.axis('off')
# ==================
# run neural net
hidden_layer_size = 25
mlp = MLPClassifier(hidden_layer_sizes=(25,), max_iter=20, alpha=1e-4,
solver='sgd', verbose=False, tol=1e-4, random_state=1,
learning_rate_init=.1)
mlp.fit(X_train, y_train.ravel())
predictions = mlp.predict(X_test)
print('Test set accuracy: {} %'.format(np.mean(predictions == y_test.ravel())*100))
# print(confusion_matrix(y_test, predictions))
# print(classification_report(y_test, predictions))
print("Training set score: %f" % mlp.score(X_train, y_train))
print("Test set score: %f" % mlp.score(X_test, y_test))
print('coeffs shape', (mlp.coefs_[0]).shape)
# ==================
# display coefficients of hidden layer
fig.add_subplot(652)
plt.imshow(mlp.coefs_[0][:, 0].reshape(20, 20))
plt.axis('off')
gs = gridspec.GridSpec(6, 5)
cur_img_idx = 5
# use global min / max to ensure all weights are shown on the same scale
vmin, vmax = mlp.coefs_[0].min(), mlp.coefs_[0].max()
for coef, ax in zip(mlp.coefs_[0].T, range(hidden_layer_size)):
fig.add_subplot(gs[cur_img_idx])
plt.imshow(coef.reshape(20, 20), cmap=plt.cm.gray, vmin=.5 * vmin, vmax=.5 * vmax)
plt.axis('off')
cur_img_idx += 1
plt.show()
neural_net_ex4_ng()
|
993,047 | 6d0e6203994419ca104557740b33c76da2ad5262 | from django.shortcuts import render, redirect
from django.views.generic import CreateView, ListView, UpdateView, TemplateView
from django.contrib.auth import login
from .forms import TeacherSignUpForm, StudentSignUpForm
from .models import User
class SignUpView(TemplateView):
template_name = 'accounts/signup.html'
class TeacherSignUpView(CreateView):
model = User
form_class = TeacherSignUpForm
template_name = 'accounts/signup_form.html'
def get_context_data(self, **kwargs):
kwargs['user_type'] = 'teacher'
return super().get_context_data(**kwargs)
def form_valid(self, form):
user = form.save()
login(self.request, user)
return redirect('home')
class StudentSignUpView(CreateView):
model = User
form_class = StudentSignUpForm
template_name = 'accounts/signup_form.html'
def get_context_data(self, **kwargs):
kwargs['user_type'] = 'student'
return super().get_context_data(**kwargs)
def form_valid(self, form):
user = form.save()
login(self.request, user)
return redirect('profiles:ogrenci_profil', ogrenci_id = user.id)
|
993,048 | 13420f754b7eb295d413f31954e14940b09ff821 | import pymysql
class A:
def __init__(self):
self.conn = pymysql.connect(host="localhost",
user="root",
password="mysqlfgh.00",
db="QcChat",
port=3306,
charset="utf8")
self.cursor = self.conn.cursor()
sql = "select id, password from users;"
self.cursor.execute(sql)
ret = self.cursor.fetchall()
self.conn.close()
print(self.conn)
self.conn = pymysql.connect(host="localhost",
user="root",
password="mysqlfgh.00",
db="QcChat",
port=3306,
charset="utf8")
self.cursor.execute(sql)
ret = self.cursor.fetchall()
print(ret)
class B:
def __init__(self):
self.conn = pymysql.connect(host="localhost",
user="root",
password="mysqlfgh.00",
db="QcChat",
port=3306,
charset="utf8")
self.cursor = self.conn.cursor()
sql = "select id, password from users;"
res = self.cursor.execute(sql)
print(res)
print("b mysql")
if __name__ == "__main__":
conn = pymysql.connect(host="localhost",
user="root",
password="mysqlfgh.00",
db="QcChat",
port=3306,
charset="utf8")
print(conn)
conn.close()
print(conn) |
993,049 | e0286758c609a37a45046ccb5b4644eb11092199 |
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
count = 0
url = input("Enter Browser URL:")
client = webdriver.Chrome()
while count < 100:
print("Currently ran this ", count)
client.get(url)
count += 1
|
993,050 | ebafcc3d271a05376e1f2baa0ffb8c208df11624 | # Copyright (c) 2021, Hyunwoong Ko. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import streamlit as st
def page():
st.title('Paraphrase Generation')
st.markdown("<br>", unsafe_allow_html=True)
st.write(
"Adding sentences to analyze user's intention is a very laborious task. "
"Therefore, Dialobot supports the Parapharse Generation feature which automatically generates similar sentences. "
"With just a few sentences, you can add dozens of similar sentences to your desired intent. "
"However, this feature is only applied to intents with more than 10 sentences registered."
)
|
993,051 | 2e887ce52411de470759f594d72e9426e41706ae | import arcpy
import os
from arcpy import management as DM
from time import time
from string import zfill
from datetime import date
arcpy.env.workspace = "C:/WorkSpace/Phase5/Objects/test.gdb"
outf = open("c:/workspace/phase5/Objects/modellog.txt", "a")
outf.writelines("\n" + str(date.today()) + " --Test-Small File-\n")
def elapsed_time(t0):
seconds = int(round(time() - t0))
h,rsecs = divmod(seconds,3600)
m,s = divmod(rsecs,60)
return zfill(h,2) + ":" + zfill(m,2) + ":" + zfill(s,2)
# Apply models from key in separate dbf for each landcover
#suffixes = ["01","03","05","07","09","11","13","15","19","21","23","25","27","31"]
#covers = [1,3,5,7,9,11,13,15,19,21,23,25,27,31]
starttime = time()
geodb = "test.gdb"
processstart = time()
fcs = arcpy.ListFeatureClasses("*", "polygon")
# drop_fields = ["VegNum1"]
#for fc in fcs:
# try:
# print fc
# #DM.AddField(fc, "VegNum", "LONG")
# #DM.DeleteField(fc, drop_fields)
# except Exception, e:
# # If an error occurred, print line number and error message
# import traceback, sys
# tb = sys.exc_info()[2]
# print "Line %i" % tb.tb_lineno
# print e.message
#print "process time = " + elapsed_time(processstart)
#outf.writelines("Completed adding field. " + elapsed_time(processstart) + "\n")
#processstart = time()
selectstring = "\"ECOCLASSNA\" = 'Gravelly, Hot Desert Shrub' and \"slope20\" = 0"
for fc in fcs:
outf.writelines("\nProcessing: "+fc)
print (fc)
layername = "l_"+fc
DM.MakeFeatureLayer(fc, layername)
# for i in range(1,26):
# DM.SelectLayerByAttribute(layername, "NEW_SELECTION", selectstring)
# DM.CalculateField(layername, "VegNum", i, "VB", "")
print "process time = " + elapsed_time(processstart)
outf.writelines("\nCompleted select and calculate field. " + elapsed_time(processstart) + "\n")
processstart = time()
print "Finished - Elapsed Time = " + elapsed_time(starttime)
outf.writelines("\nFinished. " + elapsed_time(starttime) + "\n\n")
outf.close()
|
993,052 | e527202ed9691b63df49c9f78d50f2b2485d35c4 | import unittest
import sys
sys.path.append('src')
from arduinoSerialMonitorParser import ArduinoParser
from datetime import datetime
import numpy as np
class TestParser(unittest.TestCase):
def testProcessGoodLines(self):
self.parser.lines = self.simpleFileLines
self.parser.processLines()
desiredFlows = self.simpleFlowsProcessed
desiredTimestamps = self.simpleTimestampsProcessed
actualFlows = self.parser.flows
actualTimestamps = self.parser.timestamps
self.assertEqual(desiredFlows, actualFlows, msg="flows not equal")
self.assertEqual(desiredTimestamps, actualTimestamps, msg="timestamps not equal")
def testProcessFaultyLines(self):
self.parser.lines = self.faultyFileLines
self.parser.processLines()
actualFlows = self.parser.flows
actualTimestamps = self.parser.timestamps
desiredFlows = np.array([])
desiredTimestamps = []
np.testing.assert_allclose(desiredFlows, actualFlows, err_msg="flows not equal")
self.assertEqual(desiredTimestamps, actualTimestamps, msg="timestamps not equal")
def testConvertToRelativeTimes(self):
self.parser.lines = self.simpleFileLines
self.parser.processLines()
self.parser.convertToRelativeTimes()
desiredSeconds = self.simpleSeconds
actualSeconds = self.parser.seconds
self.assertEqual(desiredSeconds, actualSeconds, msg="simple seconds not equal")
def setUp(self):
self.parser = ArduinoParser()
@classmethod
def setUpClass(self):
self.simpleFileLines = \
["15:50:09.450 -> S0.01 -0.00 -683.55 9.66"]
self.simpleFlowsProcessed = [-0.00]
self.simpleTimestampsProcessed = [datetime(year=1940, month=1, day=1,hour=15,minute=50, second=9,
microsecond=450000)]
self.simpleSeconds = [0]
self.faultyFileLines = [
"49:33.316 -> S0.00 0.49 678.97 3.46",
"17:15:48.006 -> S-0.05 -S⸮S-0.05 -8.26 411.13 6.19",
"17:15:48.006 -> S-⸮S-0.05 -8.13 0.00 0.00"]
self.longFileLines = [
"15:50:09.450 -> S0.01 0.00 683.55 9.66",
"15:50:09.450 -> S0.00 0.00 683.55 9.66",
"15:50:09.450 -> S0.00 0.00 683.55 9.66",
"15:50:09.450 -> S0.00 0.00 683.55 9.66",
"15:50:09.483 -> S0.00 0.00 683.55 9.66",
"15:50:09.483 -> S0.00 0.49 683.55 9.66",
"15:50:09.483 -> S0.00 0.49 683.55 9.66",
"15:50:09.483 -> S0.00 0.49 683.55 9.66",
"15:50:09.483 -> S0.00 0.00 683.55 9.66",
"15:50:09.483 -> S0.00 0.49 683.55 9.66",
]
if __name__ == "__main__":
unittest.main()
|
993,053 | a1f6bbe02711d7874534cdb6a06f4f583ceebdaf | from django.contrib.contenttypes.models import ContentType
from django_utils.render_helpers import render_or_default
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from django.core.urlresolvers import reverse
import django.shortcuts as shortcuts
from django.template import RequestContext
from django.contrib.contenttypes.models import ContentType
import django.http as http
from django_moderation.models import ContentApprovalVote
from django_community.decorators import UserRequired, user_required
from django_community_wiki.community_wiki import community_wiki
@user_required
def queue(request, content_type = None):
"""
View that generates a context containing an object that the
logged in user can moderate or None, if there are no
available objects to moderate.
"""
def get_template_for_object(object):
from django.template.loader import get_template
from django.template import TemplateDoesNotExist
content_type_object = ContentType.objects.get_for_model(object.__class__)
style_template = "django_moderation/%s/%s.html" % (content_type_object.app_label, content_type_object.model)
try:
get_template(style_template)
target_template = style_template
except TemplateDoesNotExist, e:
target_template = "django_moderation/base_queue_item.html"
return target_template
user = request.user
instance = ContentApprovalVote.objects.queue(user)
if instance:
model = instance.content_type.model_class()
node = model.objects.get_all(id = instance.object_id)
node.model = node.__class__.__name__
node.template = get_template_for_object(node)
node_content_type = instance.content_type.id
content_type_object = instance.content_type
node.accepts = ContentApprovalVote.objects.filter(content_type = content_type_object,
object_id = node.id,
mode = 'accept').count()
node.rejects = ContentApprovalVote.objects.filter(content_type = content_type_object,
object_id = node.id,
mode = 'reject').count()
else:
node = None
node_content_type = None
return shortcuts.render_to_response(
'django_moderation/queue.html',
{'node':node,
'node_content_type':node_content_type},
context_instance = RequestContext(request)
)
def moderate(request, content_type, object_id, mode):
"""
Cast an accept, pass or reject vote to moderate an object.
"""
user = request.user
content_type_object = ContentType.objects.get(id = content_type)
object = content_type_object.model_class().objects.get_all(id = object_id)
status = ContentApprovalVote.objects.vote(object, user, mode)
redirect_url = request.GET.get('queue_url', reverse('moderation-queue'))
return http.HttpResponseRedirect(redirect_url)
def delete(request, content_type, object_id):
"""
Cast an accept, pass or reject vote to moderate an object.
"""
user = request.user
content_type_object = ContentType.objects.get(id = content_type)
node = content_type_object.model_class().objects.get(id = object_id)
community_wiki.delete_content(node)
redirect_url = reverse('content-list-redirect', args=[content_type_object.id])
return http.HttpResponseRedirect(redirect_url)
class FlagAction(object):
"""
Class with callable instances that provide flagging functionality for any
content type.
"""
def __init__(self, model, form_builder, template = None, redirect_url = None):
self.model = model
self.form_builder = form_builder
self.template = template
self.redirect_url = redirect_url
self.content_type = ContentType.objects.get_for_model(model)
def __call__(self, request, object_id):
"""
Provides a context with the object being flagged, the existing flags
on that object, an instance of a FlagForm. Renders the template
provided in self.template.
"""
from django_moderation.models import ContentFlag, ContentFlagVote
remove = request.GET.get('remove', False)
FlagForm = self.form_builder()
object = get_object_or_404(self.model, id = object_id)
if request.POST or remove:
if remove:
existing_flag = ContentFlagVote.objects.get_vote_for_content(object, request.user)
if existing_flag:
existing_flag.delete()
return http.HttpResponseRedirect(reverse(self.redirect_url, args=[object.id]))
else:
form = FlagForm(request.POST, request.FILES)
if form.is_valid():
ContentFlagVote.objects.add_vote(self.content_type.id,
object_id,
form.cleaned_data['action'],
form.cleaned_data['reason'],
form.cleaned_data['details'],
request.user)
return http.HttpResponseRedirect(reverse(self.redirect_url, args=[object.id]))
else:
form = FlagForm()
existing_flag = ContentFlagVote.objects.get_vote_for_content(object, request.user)
return shortcuts.render_to_response(
self.template,
{'form':form,
'object_id':object.id,
'node':object,
'existing_flag':existing_flag},
context_instance = RequestContext(request)
) |
993,054 | 6da2e31324e1952036ad289e3fb2c727c2b731f0 | import random
import requests
from lxml import etree
import redis
import time
# Redis
pool = redis.ConnectionPool(host="localhost", port=6379, decode_responses=True, db=4)
r = redis.Redis(connection_pool=pool)
headers = {
"Host": "www.letpub.com.cn",
"Connection": "keep-alive",
"Content-Length": "183",
"Accept": "*/*",
"X-Requested-With": "XMLHttpRequest",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36",
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
"Origin": "https://www.letpub.com.cn",
"Sec-Fetch-Site": "same-origin",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Dest": "empty",
"Referer": "https://www.letpub.com.cn/index.php?page=grant",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "zh-CN,zh;q=0.9",
# "Cookie": "_ga=GA1.3.197927283.1606913066; __utmz=189275190.1606913067.1.1.utmcsr=(direct)|utmccn=(direct)|utmcmd=(none); _gid=GA1.3.1985560273.1607948592; PHPSESSID=m7u74d594u4df0s6nd0nbrip72; __utma=189275190.197927283.1606913066.1608012077.1608078793.5; __utmc=189275190; __utmt=1; __utmb=189275190.4.10.1608078793",
}
def get_proxy():
proxy_url = "http://http.tiqu.letecs.com/getip3?num=1&type=1&pro=&city=0&yys=0&port=1&time=1&ts=0&ys=0&cs=0&lb=1&sb=0&pb=4&mr=1®ions=&gm=4"
res = requests.get(proxy_url)
proxy_text = res.text.replace("\r\n", "")
print(proxy_text)
proxies = {
"http": "http://" + proxy_text,
"https": "http://" + proxy_text,
}
return proxies
proxies = {
"http": "http://" + "223.244.175.184:41626",
"https": "http://" + "223.244.175.184:41626",
}
for i in range(100):
# while r.exists("academy"):
# data_redis = r.lpop("academy")
# s1 = data_redis.split(",")[0]
# s2 = data_redis.split(",")[1]
# if s2 == "None":
# s2 = ""
# s3 = data_redis.split(",")[2]
# if s3 == "None":
# s3 = ""
# s4 = data_redis.split(",")[2]
# if s4 == "None":
# s4 = ""
s1 = "C"
s2 = "C19"
s3 = "C1901"
s4 = "C190105"
data = {
"page": "",
"name": "",
"person": "",
"no": "",
"company": "",
"addcomment_s1": s1,
"addcomment_s2": s2,
"addcomment_s3": s3,
"addcomment_s4": s4,
"money1": "",
"money2": "",
"startTime": "1997",
"endTime": "2019",
"subcategory": "",
"searchsubmit": "true",
}
url = "https://www.letpub.com.cn/nsfcfund_search.php?mode=advanced&datakind=list¤tpage=1"
with requests.Session() as s:
try:
resp = s.post(url=url, data=data, headers=headers)
lxml = etree.HTML(resp.text)
# print(resp.text)
div = lxml.xpath('//div[contains(text(), "搜索条件匹配:")]/b/text()')[0].strip()
# final_result = data_redis + "," + div
r.rpush("academy_result", div)
print(div)
except Exception as e:
# r.rpush("academy_bak", data_redis)
# proxies = get_proxy()
# print("换ip了")
print(e)
# print(data)
# time.sleep(random.uniform(2.0, 2.5))
|
993,055 | a9391a60ccf1dc1cf1a605125ab84fdd5d3c35c5 | import re
'''
Calculate the sentiment of each dictionary
'''
def calculate_pos_neg(sum_sentiment, pos, neg, threshold, is_emo):
if is_emo:
if sum_sentiment > threshold:
pos += 2
elif sum_sentiment < -threshold:
neg += 2
else:
if sum_sentiment > threshold:
pos += 1
elif sum_sentiment < -threshold:
neg += 1
return pos, neg
'''
calculate the sentiment of the tweet based on emoticon
'''
def calculate_emoti_senti(tweet, pos, neg):
count_t_pos = 0
count_t_neg = 0
threshold = 0
is_emo = True
pos_string = '[:;==]+\s*-*\s*[oO]*[dD)pP\]]'
neg_string = r'[:;==]+\s*-*\s*[oO]*[(/\\]*'
pos_regex = re.compile(pos_string)
neg_regex = re.compile(neg_string)
for word in tweet:
if pos_regex.findall(word):
count_t_pos += 1
continue
if neg_regex.findall(word):
count_t_neg += 1
sum_senti = count_t_pos - count_t_neg
pos, neg = calculate_pos_neg(sum_senti, pos, neg, threshold, is_emo)
return pos, neg
'''
calculate the sentiment of the tweet based on emoji
'''
def calculate_emoji_senti(tweet, emojis, pos, neg):
cur_senti = 0.0
threshold = 0.0
is_emo = True
for word in tweet:
if word in emojis:
cur_senti += emojis[word]
pos, neg = calculate_pos_neg(cur_senti, pos, neg, threshold, is_emo)
return pos, neg
# def calculate_afinn_senti(tweet, afinn, pos, neg):
# sum_senti = 0.0
# threshold = 3
# is_emo = False
# for word in tweet:
# if word in afinn:
# sum_senti += afinn.get(word)
# pos, neg = calculate_pos_neg(sum_senti, pos, neg, threshold, is_emo)
# return pos, neg
# def calculate_minqing_senti(tweet, pos_minging, neg_minging, pos, neg):
# tweet_set = set(tweet)
# threshold = 0
# is_emo = False
# pos_num = len(tweet_set & pos_minging)
# neg_num = len(tweet_set & neg_minging)
# sum_sentiment = pos_num - neg_num
# pos, neg = calculate_pos_neg(sum_sentiment, pos, neg, threshold, is_emo)
# return pos, neg
def create_data(tweet, tweets):
each_tweet = ' '.join(tweet)
tweets.append(tweet)
return tweets
'''
Determine the sentiment of the tweet based on the summation of sentiment of all dictionaries
'''
def total_sentiment(pos, neg):
if pos >= 2 and pos>neg:
p_or_n = 1
return p_or_n
elif neg >= 2 and pos < neg:
p_or_n = -1
return p_or_n
elif pos == 0 and neg == 0:
p_or_n = 0
return p_or_n
return 5
'''
Check the sentiment by the lexical dictionaries
The temp_dict is the neighbour words of negation word
use if temp_dict word is positive or negative, it will be store and return
'''
def create_senti_and_word(tweet, temp_dict, afinn, pos_minging, neg_minging, pos, neg):
sum_senti = 0.0
threshold = 0
is_emo = False
word_dict = {}
word_dicts = {}
temp_list = []
for word in tweet:
if word in afinn:
sum_senti += afinn.get(word)
if temp_dict:
for word in temp_dict:
temp_list.append(word)
if word in afinn:
word_dict[word] = afinn.get(word)
tweet_set = set(tweet)
pos_num = len(tweet_set & pos_minging)
neg_num = len(tweet_set & neg_minging)
senti_ming = pos_num - neg_num
sum_senti += senti_ming
if temp_dict:
temp_set = set(temp_list)
result_pos = temp_set & pos_minging
result_neg = temp_set & neg_minging
for word in result_pos:
word_dict[word] = 1
for word in result_neg:
word_dict[word] = -1
for key in word_dict:
if sum_senti > 0 and word_dict[key] > 0:
word_dicts[key] = key
elif sum_senti < 0 and word_dict[key] < 0:
word_dicts[key] = key
return word_dicts, sum_senti |
993,056 | 07520d2903c4e6cb9bc55c4c33c3d6af1ba5998b | /Users/principal/anaconda3/lib/python3.6/posixpath.py |
993,057 | 3e09baa334d1977182aea5bcd0e8c6bffeae8be4 | from calendar import *
weekdays = ["MONDAY","TUESDAY","WEDNESDAY","THURSDAY","FRIDAY","SATURDAY","SUNDAY"]
date = list(map(int, input().split()))
i = weekday(date[2], date[0], date[1])
print(weekdays[i])
|
993,058 | 4c267385b4b505dfe83cd66fa956202bc5e97b0d | # -*- coding: utf-8 -*-
"""
Created on Fri May 13 19:52:20 2016
@author: Julien
"""
import pygame
import variables
from classes import Item, Inventory, Message, Illuminator
class Chest(Item):
def __init__(self,x, y, contents_list):
self.name = 'Chest'
self.value = 0
self.image = variables.chest_img
super(Chest, self).__init__(self.name, self.value, self.image, x, y)
self.has_opened = False
self.inventory = Inventory()
for k in contents_list:
self.inventory.contents.append(k)
def open_(self,Character):
if self.rect.collidepoint(pygame.mouse.get_pos()) == True and Character.rect.colliderect(self.rect.inflate(5,5)) == True:
if len(self.inventory.contents) >= 1:
for item in self.inventory.contents:
print 'removes item from chest'
item.rect = self.pop_around(item,45,45)
print item.rect
#self.level.all_sprites_list.add(item) #add's it so all items of chest pop
self.level.item_list.add(item) #add's sprite back to item list for it to behave as item in game
self.inventory.contents.remove(item) #removes item from chest
else:
w = 150
h = 30
msg = Message('The chest is empty !!',2000, 0,0,w,h)
#msg.image = pygame.transform.scale(msg.image, (w, h))
msg.rect.center = (variables.screenWIDTH/2,25)
self.level.message_list.add(msg)
class Torch(Illuminator):
def __init__(self, radius):
self.name = 'Torch'
self.value = 4.
self.image = variables.torch_img
super(Torch, self).__init__(self.name, self.value, self.image, radius)
|
993,059 | 805336a7bee956253f130f5f97a0e0aab768ec90 | import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.keras import layers, Sequential
cifar10 = tf.keras.datasets.cifar10
(xc_train, yc_train), (xc_test, yc_test) = cifar10.load_data()
mnist = tf.keras.datasets.mnist
(train_x, train_y), (test_x, test_y) = mnist.load_data()
# 数据预处理
Xc_train, Xc_test = tf.cast(xc_train, dtype=tf.float32) / 255.0, tf.cast(xc_test, dtype=tf.float32) / 255.0
Yc_train, Yc_test = tf.cast(yc_train, dtype=tf.int32), tf.cast(yc_test, dtype=tf.int32)
X_train, X_test = tf.cast(train_x, dtype=tf.float32) / 255.0, tf.cast(test_x, dtype=tf.float32) / 255.0
y_train, y_test = tf.cast(train_y, dtype=tf.int32), tf.cast(test_y, dtype=tf.int32)
X_train = train_x.reshape(60000, 28, 28, 1)
X_test = test_x.reshape(10000, 28, 28, 1)
# 建立模型
modelc = Sequential([
# Unit 1
layers.Conv2D(16, kernel_size=(3, 3), padding="same", activation=tf.nn.relu, input_shape=xc_train.shape[1:]),
layers.Conv2D(16, kernel_size=(3, 3), padding="same", activation=tf.nn.relu),
layers.MaxPool2D(pool_size=(2, 2)),
# Unit 2
layers.Conv2D(32, kernel_size=(3, 3), padding="same", activation=tf.nn.relu),
layers.Conv2D(32, kernel_size=(3, 3), padding="same", activation=tf.nn.relu),
layers.MaxPool2D(pool_size=(2, 2)),
# Unit 3
layers.Flatten(),
layers.Dense(128, activation="relu"),
layers.Dense(10, activation="softmax")
])
model = tf.keras.Sequential([
# Unit 1
tf.keras.layers.Conv2D(16, kernel_size=(3, 3), padding="same", activation=tf.nn.relu, input_shape=(28, 28, 1)),
tf.keras.layers.MaxPool2D(pool_size=(2, 2)),
# Unit 2
tf.keras.layers.Conv2D(32, kernel_size=(3, 3), padding="same", activation=tf.nn.relu),
tf.keras.layers.MaxPool2D(pool_size=(2, 2)),
# Unit 3
tf.keras.layers.Flatten(),
# Unit 4
tf.keras.layers.Dense(128, activation="relu"),
tf.keras.layers.Dense(10, activation="softmax")
])
# 查看摘要
modelc.summary()
model.summary()
# 配置训练方法
modelc.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy'])
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy'])
# 训练模型
# batch_size一次训练所选取的样本数,epochs训练模型的次数,validation_split划分验证集
modelc.fit(Xc_train, Yc_train, batch_size=64, epochs=12, validation_split=0.2)
model.fit(X_train, y_train, batch_size=64, epochs=5, validation_split=0.2)
# 评估模型
# verbose为每个epoch输出一行记录
modelc.evaluate(Xc_test, Yc_test, verbose=2)
model.evaluate(X_test, y_test, verbose=2)
|
993,060 | 9137b0b3fbeafda20f719dda29351747175dd304 | import sys
import random
from constellationData import *
from dataModel import *
from models import *
from utils import *
from solution import *
def getNextMoves(current, possibles, points, model):
start = time()
moves = [c for c in possibles if len(c.stars) <= points and c.canActivate(current.provides, current.constellations) and c not in current.constellations]
# redundantMoves = []
tempMoves = moves[:]
for move in tempMoves:
for other in moves:
if other in move.redundancies:
moves.remove(move)
# redundantMoves.append(move)
break
timeMethod("getNextMoves", start)
return moves#, redundantMoves
def getProvidersForVector(possibles, neededAffinities, afV, model):
start = time()
providesV = []
for c in possibles:
if c.provides.isVector(afV) and c.getTier() == 1:
providesV += [c]
for c in providesV:
c.evaluate(model)
scores = []
for c in providesV:
score = 0
for d in providesV:
if c.isWorse(d, model):
score -= 1
if c.isBetter(d, model):
score += 1
scores += [(c, score)]
scores.sort(key=lambda c: c[1], reverse=True)
providers = [score[0] for score in scores]
goodProviders = []
badProviders = []
for i in range(len(providers)):
if i == len(providers)-1:
goodProviders = providers[:]
break
if scores[i][1] == scores[i+1][1]:
continue
if getAffinities(providers[:i+1]).get(afV) >= neededAffinities.get(afV)-1:
goodProviders = providers[:i+1]
badProviders = providers[i+1:]
break
timeMethod("getProvidersForVector", start)
return (goodProviders, badProviders)
def getRemainingLinks(wanted, neededAffinities, remainingLinks):
start = time()
links = []
for c in remainingLinks:
if neededAffinities.intersects(c.provides):
links.append(c)
timeMethod("getRemainingLinks", start)
return links
def getNeededConstellations(current, wanted, model, affinities=Affinity(0), remaining=None):
global globalMetadata
start = time()
neededAffinities = Solution.maxAffinities - affinities
# possibles should be all tier 0-1 constellations sans the ones we remove.
if remaining == None:
possibles = [p for p in Constellation.constellations if p.getTier() <= 1 and not p in wanted]
else:
possibles = remaining[:]
# print neededAffinities
for c in possibles[:]:
if not neededAffinities.intersects(c.provides):
possibles.remove(c)
# print "Discarding unnecessary constellation", c.name
timeMethod("getNeededConstellations", start)
return possibles
def getWanted(model):
global globalMetadata
print "\nEvaluating constellations..."
constellationRanks = []
for c in Constellation.constellations:
# print c
constellationRanks += [(c, c.evaluate(model), c.evaluate(model)/len(c.stars))]
c.buildRedundancies(model)
constellationRanks.sort(key=itemgetter(1), reverse=True)
thresh = constellationRanks[len(constellationRanks)/6][1] * .9
print "\n Desired constellations (value > %s):"%thresh
wanted = []
cv = constellationRanks[0][1]
for c in constellationRanks:
if c[1] > thresh:
wanted += [c[0]]
print " ", str(int(c[1])).rjust(7), c[0].name.ljust(45), c[0].requires
else:
print " - ", str(int(c[1])).rjust(7), c[0].name.ljust(45), c[0].requires
constellationRanks.sort(key=itemgetter(2), reverse=True)
thresh = constellationRanks[len(constellationRanks)/6][2]
print "\n Desired constellations (efficiency > %s):"%thresh
wanted = []
for c in constellationRanks:
if c[2] > thresh and not c[0] in wanted:
wanted += [c[0]]
print " ", int(c[2]), c[0].name
else:
print " - ", int(c[2]), c[0].name
globalMetadata["bestScorePerStar"] = constellationRanks[0][2]
print " Best score per star:", globalMetadata["bestScorePerStar"]
print " Total:", len(wanted)
wanted.sort(key=lambda c: c.evaluate(model), reverse=True)
return wanted
def addBoundedPath(solution):
global globalMetadata
if len(solution.constellations) > globalMetadata["boundedPathLengthMax"]:
return False
for bpi in range(len(globalMetadata["boundedPaths"])-1, -1, -1):
bp = globalMetadata["boundedPaths"][bpi]
if solution <= bp:
return True
else:
del globalMetadata["boundedPaths"][bpi]
globalMetadata["boundedPaths"] += [solution]
return False
def checkBoundedPath(solution):
global globalMetadata
if len(solution.constellations) > globalMetadata["boundedPathLengthMax"]:
return False
start = time()
for bpi in range(len(globalMetadata["boundedPaths"])-1, -1, -1):
bp = globalMetadata["boundedPaths"][bpi]
if solution <= bp and not solution == bp:
timeMethod("checkBoundedPath", start)
return True
elif solution >= bp:
# print " -->> "+str(solution)
# print " -> "+str(bp)
globalMetadata["boundedPaths"][bpi] = solution
timeMethod("checkBoundedPath", start)
if len(solution.constellations) <= globalMetadata["boundedPathLengthMax"]:
# print " -+-> "+str(solution)
globalMetadata["boundedPaths"] += [solution]
globalMetadata["boundedPaths"] = list(set(globalMetadata["boundedPaths"]))
timeMethod("checkBoundedPath", start)
return False
def getUpperBoundScore(solution, points, wanted, model):
global globalMetadataffww
start = time()
score = evaluateSolution(solution, model)
score += points * globalMetadata["bestScorePerStar"]
# for c in wanted:
# if len(c.stars) <= points:
# score += c.evaluate(model)
# points -= len(c.stars)
timeMethod("getUpperBoundScore", start)
return score
def getUpperBoundScore2(solutionScore, points):
global globalMetadata
start = time()
solutionScore += points * globalMetadata["bestScorePerStar"]
timeMethod("getUpperBoundScore", start)
return solutionScore
def sortDeadSolution(solution):
start = time()
s = sorted(solution, key=lambda c: c.index/100.0)
timeMethod("sortDeadSolution", start)
return s
def doMove2(model, wanted, points, solution, remainingLinks, moveStr=""):
global globalMetadata
if globalMetadata["boundingRun"] == True:
if len(solution.constellations) >= globalMetadata["boundingRunDepth"]:
return
globalMetadata["numCheckedSolutions"] += 1
if solution.isDead: # should never hit this check but...
return
ub = getUpperBoundScore2(solution.score, points)
if ub < globalMetadata["bestScore"] and solution.score < ub:
# print ub, "<", globalMetadata["bestScore"]
# print points, "points left before trim"
solution.kill()
return
if checkBoundedPath(solution):
# print "Killing bounded solution:", solution
solution.kill()
return
neededAffinities = Solution.maxAffinities - solution.provides
remainingLinks = getRemainingLinks(wanted, neededAffinities, remainingLinks)
possibleMoves = wanted + remainingLinks
nextMoves = getNextMoves(solution, possibleMoves, points, model)
# nextMoves, redundantMoves = getNextMoves(solution, possibleMoves, affinities, points, model)
# nextMoves = sortByScore(nextMoves, model)
nextMoves = sortConstellationsByProvidesValueScore(nextMoves, model)
# random.shuffle(nextMoves)
isSolution = True
newWanted = wanted[:]
links = remainingLinks[:]
for move in nextMoves:
isSolution = False
newMoveStr = moveStr + move.id + "("+ str(int(move.evaluate(model))) +")" +" {"+str(nextMoves.index(move)+1)+"/"+str(len(nextMoves))+"}, "
try:
links.remove(move)
except:
pass
try:
newWanted.remove(move)
except:
pass
nextSolution = Solution(solution.constellations+[move], model)
if not nextSolution.isDead:
doMove2(model, newWanted, points-len(move.stars), nextSolution, links, newMoveStr)
if globalMetadata["boundingRun"]:
return
solution.kill()
if len(solution.constellations) <= model.points/8.0:
print " <-X- (" + str(solution.cost) + "): " + moveStr[:-2]
print " ", globalMetadata["numCheckedSolutions"]#, " ", len(globalMetadata["boundedPaths"])
# print " ", str(methodTimes), sum([methodTimes[key] for key in methodTimes.keys()])
if isSolution:
if solution.score >= globalMetadata["bestScore"]:
globalMetadata["bestScore"] = solution.score
globalMetadata["bestSolutions"] += [(solution.score, solution.constellations)]
model.seedSolutions += [solution.constellations]
model.saveSeedSolutions()
print "New best: "
print solution
def doMove(model, wanted, points, solution=[], affinities=Affinity(), remaining=None, moveStr=""):
global globalMetadata
globalMetadata["numCheckedSolutions"] += 1
# if globalMetadata["boundingRun"] == True:
# if len(solution) >= globalMetadata["boundingRunDepth"]:
# return
if len(solution) > 0:
if isDeadSolution(solution, model):
return
# ub = getUpperBoundScore(solution, points, wanted, model)
# if ub < globalMetadata["bestScore"] and evaluateSolution(solution, model) < ub:
# killSolution(solution, model)
# return
# if checkBoundedPath(solution, model):
# killSolution(solution, model)
# return
# if not remaining:
remaining = getNeededConstellations(solution, wanted, model, affinities, remaining)
possibleMoves = wanted + remaining
# print len(possibleMoves)
nextMoves,_ = getNextMoves(solution, possibleMoves, affinities, points, model)
# if len(solution) > 0:
# lastMove = solution[-1]
# if lastMove.provides.magnitude() == 0:
# neededAffinities = globalMetadata["globalMaxAffinities"] - affinities
# for move in nextMoves[:]:
# provides = move.provides.minAffinities(neededAffinities)
# if provides.magnitude() > 0:
# nextMoves.remove(move)
# nextMoves = sortByLeastProvides(nextMoves, model)
nextMoves = sortByScore(nextMoves, model)
# nextMoves = sortByScorePerStar(nextMoves, model)
# nextMoves = sortConstellationsByProvides(nextMoves)
# nextMoves = sorted(availableConstellations, key=lambda c: c.name, reverse=False)
# random.shuffle(nextMoves)
# nextMoves = availableConstellations
isSolution = True
for move in nextMoves:
isSolution = False
newMoveStr = moveStr + move.id + "("+ str(int(move.evaluate(model))) +")" +" {"+str(nextMoves.index(move)+1)+"/"+str(len(nextMoves))+"}, "
try:
remaining.remove(move)
except:
pass
newWanted = wanted[:]
try:
newWanted.remove(move)
except:
pass
doMove(model, newWanted, points-len(move.stars), solution+[move], affinities+move.provides, remaining, newMoveStr)
# if globalMetadata["boundingRun"]:
# return
killSolution(solution, model)
if len(solution) <= model.points/9:
print " <-X- (" + str(getSolutionCost(solution)) + "): " + moveStr[:-2]
print " ", globalMetadata["numCheckedSolutions"]#, " ", len(globalMetadata["boundedPaths"])
print " ", str(methodTimes), sum([methodTimes[key] for key in methodTimes.keys()])
# if globalMetadata["numCheckedSolutions"] > 20000:
# print (time() - globalMetadata["startTime"]), "seconds"
# print " ", str(methodTimes), sum([methodTimes[key] for key in methodTimes.keys()])
# sys.exit(0)
if isSolution:
score = evaluateSolution(solution, model)
if score >= globalMetadata["bestScore"]:
globalMetadata["bestScore"] = score
globalMetadata["bestSolutions"] += [(score, solution)]
model.seedSolutions += [solution]
model.saveSeedSolutions()
print "New best: "
printSolution(solution, model)
def startSearch(model, startingSolution=[]):
global globalMetadata
model.points -= getSolutionCost(startingSolution)
model.initialize(False)
wanted = getWanted(model)
Solution.maxAffinities = Affinity()
for c in wanted:
Solution.maxAffinities = Solution.maxAffinities.maxAffinities(c.requires)
print Solution.maxAffinities
aVector = Affinity()
for c in Constellation.constellations:
# print c.evaluate(model)
score = c.evaluate(model)
aVector += c.requires*score
aVector = aVector/aVector.magnitude()
globalMetadata["providesValue"] = aVector
out = "Affinity value: "
for i in range(len(aVector.affinities)):
out += "%0.2f"%aVector.affinities[i] + Affinity.sh[i] + " "
print out
# getNeededConstellations(current, points, wanted, affinities=Affinity(0), possibles=Constellation.constellations):
needed = getNeededConstellations([], wanted, model)
print solutionPath(needed)
print "\nSearch Space: "+str(len(needed))
# return
wanted.sort(key=lambda c: c.evaluate(model), reverse=True)
globalMetadata["bestSolutions"] = [(evaluateSolution(solution, model), solution) for solution in model.seedSolutions]
globalMetadata["bestSolutions"].sort(key=itemgetter(0), reverse=True)
print "\nEvaluating seed solutions..."
for constellations in globalMetadata["bestSolutions"]:
solution = Solution(constellations[1], model)
print "\t" + str(solution)
if solution.score >= globalMetadata["bestScore"]:
globalMetadata["bestScore"] = solution.score
for i in range(1, len(solution.constellations)):
checkBoundedPath(Solution(solution.constellations[:i+1], model))
globalMetadata["bestSolutions"] = []
if globalMetadata["boundingRun"]:
print "\nPerforming a bounding run to depth", globalMetadata["boundingRunDepth"]
doMove2(model, wanted, model.points, Solution([], model), needed)
globalMetadata["boundingRun"] = False
# globalMetadata["deadSolutions"] = {}
print " ", len(globalMetadata["boundedPaths"]), "bounding paths created."
# if globalMetadata["globalMaxAffinities"].magnitude() == 0:
# globalMetadata["globalMaxAffinities"] = Affinity()
# for c in wanted:
# globalMetadata["globalMaxAffinities"] = globalMetadata["globalMaxAffinities"].maxAffinities(c.requires)
print "\nExecuting search..."
# needed = list(set(random.sample(needed, 5) + [xC, xA, xP, xO, xE]))
# print needed
doMove2(model, wanted, model.points, Solution([], model), needed)
print "Evaluated " + str(globalMetadata["numCheckedSolutions"])
print "\n\n\n\n\nBest solutions found:"
globalMetadata["bestSolutions"].sort(key=itemgetter(0), reverse=True)
for solution in globalMetadata["bestSolutions"]:
printSolution(solution[1], model, " ")
for solution in globalMetadata["bestSolutions"]:
print solutionPath(solution[1], " ")
globalMetadata = {}
globalMetadata["globalMaxAffinities"] = Affinity()
globalMetadata["bestScorePerStar"] = 0
globalMetadata["providesValue"] = Affinity()
globalMetadata["bestScore"] = 0
globalMetadata["bestSolutions"] = []
globalMetadata["deadSolutions"] = {}
globalMetadata["boundedPaths"] = []
globalMetadata["boundedPathLengthMax"] = 7
globalMetadata["boundingRun"] = False
globalMetadata["boundingRunDepth"] = 5
globalMetadata["numCheckedSolutions"] = 0
globalMetadata["startTime"] = time()
# startSearch(Model.loadModel("Armitage"))
# model = Model.loadModel("Armitage")
# model.initialize(False)
# wanted = getWanted(model)
# Solution.maxAffinities = Affinity()
# for c in wanted:
# Solution.maxAffinities = Solution.maxAffinities.maxAffinities(c.requires)
# print len(getLinks(wanted))
# I think the next step is to look at trying to branch and bound.
# I think this is pretty nonlinear so I don't have a real good way of doing that.
# an expensive way would be to look at each solution's best possible outcome by adding the best scoring constellations to the solution up to the remaining points and if it's not better than my current best don't continue.
# too expensive to evaluate
# I can kill a solutiond path if I have already seen a solution fewer points, greater affinities and greater score
# I don't need to evaluate needs every time. Adding a constellation can only remove needs so if I pass them in and trim the ones I no longer need that should save time.
# I can probably eliminate constellations from the initial search space by looking at the total needed affinity. With unneeded constellations I jsut need to satisfy the need I don't need all possible ways of satisfying the need.
#if I need 5 and
# a provides 2
# b provides 4
# c provides 5
# if a + b is cheaper and higher scoring than c then c is udseless.
# if c is cheaper and higher scoring than a + b then they are useless. |
993,061 | ea297a7d2982a1fa57558a1d4f1b5f5b85ec6174 | from envio.models import Centro, Estudio, Plan, Persona, Matricula, Entrega, Departamento
centros = [
Centro(100, 'Facultad de Ciencias','Z'),
Centro(101, 'Facultad de Economía y Empresa', 'Z'),
Centro(102, 'Facultad de Derecho', 'Z'),
Centro(109, 'Facultad de Economía y Empresa', 'Z'),
Centro(110, 'Escuela de Ingeniería y Arquitectura', 'Z'),
Centro(228, 'Facultad de Empresa y Gestión Pública', 'Z'),
Centro(175, 'Escuela Universitaria Politécnica de la Almunia','A'),
Centro(302, 'Facultad de Ciencias Sociales y Humanas', 'Z'),
Centro(326, 'Escuela Universitaria Politécnica de Teruel', 'T'),
]
for c in centros:
c.save()
estudios = [
Estudio(134, 'Graduado en Finanzas y Contabilidad', 5),
Estudio(157, 'Graduado en Estudios en Arquitectura', 5),
Estudio(148, 'Graduado en Ingeniería Informática', 5),
]
for e in estudios:
e.save()
planes = [
Plan(pid=449, curso='2018', estudio=Estudio.objects.get(eid=134), centro=Centro.objects.get(cid=109)),
Plan(pid=470, curso='2018', estudio=Estudio.objects.get(eid=157), centro=Centro.objects.get(cid=110)),
Plan(pid=439, curso='2018', estudio=Estudio.objects.get(eid=148), centro=Centro.objects.get(cid=110)),
Plan(pid=443, curso='2018', estudio=Estudio.objects.get(eid=148), centro=Centro.objects.get(cid=326)),
]
for p in planes:
p.save()
departamentos = [
Departamento(1, 'Análisis Económico'),
Departamento(2, 'Anatomía e Histología Humanas'),
Departamento(3, 'Anatomía Patológica, Medicina Legal y Forense y Toxicología'),
Departamento(4, 'Anatomía, Embriología y Genética Animal'),
Departamento(5, 'Bioquímica y Biología Molecular y Celular'),
Departamento(6, 'Ciencia y Tecnología de Materiales y Fluidos'),
Departamento(7, 'Ciencias Agrarias y del Medio Natural'),
Departamento(8, 'Ciencias de la Antigüedad'),
Departamento(9, 'Ciencias de la Documentación e Historia de la Ciencia'),
Departamento(10, 'Ciencias de la Educación'),
Departamento(11, 'Ciencias de la Tierra'),
Departamento(12, 'Cirugía, Ginecología y Obstetricia'),
Departamento(13, 'Contabilidad y Finanzas'),
Departamento(14, 'Derecho de la Empresa'),
Departamento(15, 'Derecho Penal, Filosofía del Derecho e Historia del Derecho'),
Departamento(16, 'Derecho Privado'),
Departamento(17, 'Derecho Público'),
Departamento(18, 'Didáctica de las Ciencias Experimentales'),
Departamento(19, 'Didáctica de las Lenguas y de las Ciencias Humanas y Sociales'),
Departamento(20, 'Dirección de Marketing e Investigación de Mercados'),
Departamento(21, 'Dirección y Organización de Empresas'),
Departamento(22, 'Estructura e Historia Económica y Economía Pública'),
Departamento(23, 'Expresión Musical, Plástica y Corporal'),
Departamento(24, 'Farmacología y Fisiología'),
Departamento(25, 'Filología Española'),
Departamento(26, 'Filología Francesa'),
Departamento(27, 'Filología Inglesa y Alemana'),
Departamento(28, 'Filosofía'),
Departamento(29, 'Fisiatría y Enfermería'),
Departamento(30, 'Fisica Aplicada'),
Departamento(31, 'Física de la Materia Condensada'),
Departamento(32, 'Física Teórica'),
Departamento(33, 'Geografía y Ordenación del Territorio'),
Departamento(34, 'Historia del Arte'),
Departamento(35, 'Historia Medieval, Ciencias y Técnicas Historiográficas y Estudios Árabes e Islámicos'),
Departamento(36, 'Historia Moderna y Contemporánea'),
Departamento(37, 'Informática e Ingeniería de Sistemas'),
Departamento(38, 'Ingeniería de Diseño y Fabricación'),
Departamento(39, 'Ingeniería Eléctrica'),
Departamento(40, 'Ingeniería Electrónica y Comunicaciones'),
Departamento(41, 'Ingeniería Mecánica'),
Departamento(42, 'Ingeniería Química y Tecnologías del Medio Ambiente'),
Departamento(43, 'Lingüística General e Hispánica'),
Departamento(44, 'Matemática Aplicada'),
Departamento(45, 'Matemáticas'),
Departamento(46, 'Medicina, Psiquiatría y Dermatología'),
Departamento(47, 'Métodos Estadísticos'),
Departamento(48, 'Microbiología, Medicina Preventiva y Salud Pública'),
Departamento(49, 'Patología Animal'),
Departamento(50, 'Pediatría, Radiología y Medicina Física'),
Departamento(51, 'Producción Animal y Ciencia de los Alimentos'),
Departamento(52, 'Psicología y Sociología'),
Departamento(53, 'Química Analítica'),
Departamento(54, 'Química Física'),
Departamento(55, 'Química Inorgánica'),
Departamento(56, 'Química Orgánica'),
Departamento(57, 'Unidad Predepartamental de Arquitectura'),
]
for d in departamentos:
d.save() |
993,062 | 891c416311e9d801eb79bf17ce51f1fc1e2fe9a8 | import tw2.core as twc
import tw2.sqla as tws
import sqlalchemy as sa
import transaction
import tw2.sqla.utils as twsu
import testapi
from nose.tools import eq_
class BaseObject(object):
""" Contains all tests to be run over Elixir and sqlalchemy-declarative """
def setUp(self):
raise NotImplementedError("Must be subclassed.")
def test_query_from_dict_simple(self):
d = {
'id': 1,
}
e = twsu.from_dict(self.DBTestCls1.query.filter_by(**d).first(), d)
self.session.flush()
assert(e.id == 1)
assert(e.name == 'foo')
assert(len(e.others) == 1)
assert(e.others[0].id == 1)
assert(e.others[0].nick == 'bob')
assert(e.others[0].other == e)
def test_query_from_dict_empty(self):
d = {}
x = self.DBTestCls1()
self.session.add(x)
e = twsu.from_dict(x, d)
self.session.flush()
assert(e.id == 2)
assert(e.name == None)
assert(e.others == [])
def test_from_dict_new(self):
d = {
'id': None,
'name': 'bazaar',
}
x = self.DBTestCls1()
self.session.add(x)
e = twsu.from_dict(x, d)
if hasattr(self, 'session'):
self.session.flush()
assert(e.id == 2)
assert(e.name == 'bazaar')
assert(len(e.others) == 0)
def test_from_dict_modify_to_none(self):
# Do this to set up an object with name => 'bazaar'
self.test_from_dict_new()
# Now try to modify that object and set its name to None
d = {
'id': 2,
'name': None,
}
x = self.DBTestCls1.query.filter_by(id=2).first()
e = twsu.from_dict(x, d)
self.session.flush()
eq_(e.id, 2)
eq_(e.name, None)
eq_(len(e.others), 0)
def test_from_dict_modify_many_to_many(self):
d = {
'id': 1,
'surname': 'user1',
'roles': [],
}
u = twsu.from_dict(self.session.query(self.DBTestCls3).one(), d)
self.session.add(u)
self.session.flush()
assert(self.session.query(self.DBTestCls3).count() == 1)
assert(self.session.query(self.DBTestCls4).count() == 1)
assert(u.id == 1)
assert(u.surname == 'user1')
assert(u.roles == [])
def test_from_dict_modify_one_to_one(self):
d = {
'id': None,
'name': 'user1',
'account': {
'account_name': 'account2',
}
}
u = twsu.from_dict(self.session.query(self.DBTestCls6).one(), d)
self.session.add(u)
self.session.flush()
assert(u.id == 1)
assert(u.name == 'user1')
assert(u.account.account_name == 'account2')
assert(self.session.query(self.DBTestCls5).count() == 1)
def test_from_dict_modify_one_to_one_to_none(self):
d = {
'id': None,
'name': 'user1',
'account': None
}
u = twsu.from_dict(self.session.query(self.DBTestCls6).one(), d)
self.session.flush()
assert(u.id == 1)
assert(u.name == 'user1')
assert(u.account == None)
assert(self.session.query(self.DBTestCls5).count() == 0)
def test_from_dict_new_many_to_one_by_id(self):
d = {
'id': None,
'nick': 'bazaar',
'other_id': 1,
}
e = twsu.from_dict(self.DBTestCls2(), d)
self.session.add(e)
self.session.flush()
assert(e.id == 3)
assert(e.nick == 'bazaar')
assert(len(e.other.others) == 2)
assert(e in e.other.others)
def test_from_dict_new_many_to_many_by_id(self):
d = {
'id': None,
'surname': 'user1',
'roles': [self.admin_role],
}
u = twsu.from_dict(self.DBTestCls3(), d)
self.session.add(u)
self.session.flush()
assert(u.id == 2)
assert(u.surname == 'user1')
assert(u.roles == [self.admin_role])
def test_from_dict_new_one_to_one_by_id(self):
d = {
'id': None,
'name': 'user1',
'account': self.DBTestCls5(account_name='account2'),
}
u = twsu.from_dict(self.DBTestCls6(), d)
self.session.add(u)
self.session.flush()
assert(u.id == 2)
assert(u.name == 'user1')
assert(u.account.account_name == 'account2')
assert(self.session.query(self.DBTestCls5).count() == 2)
def test_from_dict_new_one_to_one_by_id_no_account(self):
d = {
'id': None,
'name': 'user1',
'account': None,
}
u = twsu.from_dict(self.DBTestCls6(), d)
self.session.add(u)
self.session.flush()
assert(u.id == 2)
assert(u.name == 'user1')
assert(u.account == None)
assert(self.session.query(self.DBTestCls5).count() == 1)
def test_from_dict_old_many_to_one_by_dict_recall(self):
assert(self.DBTestCls2.query.first().nick == 'bob')
d = {
'nick': 'updated',
'other': {
'id': 1
}
}
e = twsu.from_dict(self.DBTestCls2.query.first(), d)
self.session.flush()
assert(self.DBTestCls2.query.first().nick == 'updated')
assert(self.DBTestCls1.query.first().others[0].nick == 'updated')
def test_from_dict_old_many_to_one_by_dict(self):
d = {
'id': None,
'nick': 'bazaar',
'other': {
'name': 'foo'
}
}
x = self.DBTestCls2()
self.session.add(x)
e = twsu.from_dict(x, d)
self.session.flush()
assert(e.id == 3)
assert(e.nick == 'bazaar')
assert(e.other.name == 'foo')
def test_from_dict_new_many_to_one_by_dict(self):
d = {
'id': None,
'nick': 'bazaar',
'other': {
'name': 'blamaz'
}
}
x = self.DBTestCls2()
self.session.add(x)
e = twsu.from_dict(x, d)
self.session.flush()
assert(e.id == 3)
assert(e.nick == 'bazaar')
assert(e in e.other.others)
assert(e.other.name == 'blamaz')
assert(e.other.id == 2)
def test_from_dict_new_one_to_many_by_dict(self):
d = {
'id': None,
'name': 'qatar',
'others': [
{'nick': 'blang'},
{'nick': 'blong'},
]
}
x = self.DBTestCls1()
self.session.add(x)
e = twsu.from_dict(x, d)
self.session.flush()
assert(e.id == 2)
assert(e.name == 'qatar')
assert(e.others[0].nick == 'blang')
assert(e.others[0].id == 3)
assert(e.others[1].nick == 'blong')
assert(e.others[1].id == 4)
def test_from_dict_mixed_list(self):
d = {
'id': None,
'name': 'qatar',
'others': [
{'nick': 'blang'},
'foo',
]
}
try:
e = twsu.from_dict(self.DBTestCls1(), d)
assert(False)
except Exception, e:
assert(str(e) == 'Cannot send mixed (dict/non dict) data ' +
'to list relationships in from_dict data.')
def test_from_dict_prm_tamp_mto(self):
# When updating a DBTestCls2 object,
# it should only be possible to modify
# a DBTestCls1 object that is related to that object.
prev_name = self.DBTestCls1.query.get(1).name
twsu.from_dict(self.DBTestCls2.query.get(2), {
'other': {'id': 1, 'name': prev_name + '_fred'}})
assert(self.DBTestCls1.query.get(1).name == prev_name)
def test_from_dict_prm_tamp_otm(self):
# When updating a DBTestCls1 object,
# it should only be possible to modify
# a DBTestCls2 object that is related to that object.
prev_nick = self.DBTestCls2.query.get(1).nick
prev_id = self.DBTestCls2.query.get(1).id
prev_count = self.DBTestCls2.query.count()
twsu.from_dict(self.DBTestCls1(), {'others': [
{'id': prev_id, 'nick': prev_nick + '_fred'}]})
obj = self.DBTestCls2.query.get(1)
count = self.DBTestCls2.query.count()
assert(prev_nick == obj.nick)
assert(prev_id == obj.id)
assert(count == prev_count + 1)
def test_update_or_create(self):
d = {'name': 'winboat'}
e = twsu.update_or_create(self.DBTestCls1, d)
self.session.flush()
assert(e.id == 2)
assert(e.name == 'winboat')
d = {'id': 1, 'name': 'winboat'}
e = twsu.update_or_create(self.DBTestCls1, d)
self.session.flush()
assert(e.id == 1)
assert(e.name == 'winboat')
def test_update_or_create_with_zero(self):
""" Ensure that 0 doesn't get interpreted as None.
For the following issue: http://bit.ly/OiFUfb
"""
d = {'name': 'winboat', 'some_number': 0}
e = twsu.update_or_create(self.DBTestCls1, d)
self.session.flush()
eq_(e.some_number, 0)
def test_update_or_create_exception(self):
d = {
'id': 55,
'name': 'failboat'
}
try:
e = twsu.update_or_create(self.DBTestCls1, d)
assert(False)
except Exception, e:
assert([s in str(e) for s in ['cannot create', 'with pk']])
#
# From a design standpoint, it would be nice to make the tw2.sqla.utils
# functions persistance-layer agnostic.
#
class TestElixir(BaseObject):
def setUp(self):
self.session = el.session = tws.transactional_session()
el.metadata = sa.MetaData('sqlite:///:memory:')
class DBTestCls1(el.Entity):
name = el.Field(el.String)
some_number = el.Field(el.Integer, default=2)
class DBTestCls2(el.Entity):
nick = el.Field(el.String)
other_id = el.Field(el.Integer, colname='other')
other = el.ManyToOne(DBTestCls1,
field=other_id,
backref='others')
class DBTestCls3(el.Entity):
surname = el.Field(el.String)
roles = el.ManyToMany('DBTestCls4')
class DBTestCls4(el.Entity):
rolename = el.Field(el.String)
users = el.ManyToMany('DBTestCls3')
class DBTestCls5(el.Entity):
account_name = el.Field(el.String, required=True)
user = el.OneToOne('DBTestCls6', inverse='account')
class DBTestCls6(el.Entity):
name = el.Field(el.String)
account_id = el.Field(el.Integer, required=False)
account = el.ManyToOne(DBTestCls5, field=account_id, inverse='user', uselist=False)
self.DBTestCls1 = DBTestCls1
self.DBTestCls2 = DBTestCls2
self.DBTestCls3 = DBTestCls3
self.DBTestCls4 = DBTestCls4
self.DBTestCls5 = DBTestCls5
self.DBTestCls6 = DBTestCls6
el.setup_all()
el.metadata.create_all()
foo = self.DBTestCls1(id=1, name='foo')
bob = self.DBTestCls2(id=1, nick='bob', other=foo)
george = self.DBTestCls2(id=2, nick='george')
fred = self.DBTestCls3(id=1, surname='fred')
admin = self.DBTestCls4(id=1, rolename='admin')
fred.roles.append(admin)
account1 = self.DbTestCls5(id=1, account_name='account1')
bob1 = self.DbTestCls6(id=1, name='bob1', account_id=1)
testapi.setup()
self.admin_role = admin
#def tearDown(self):
# import elixir as el
# el.drop_all()
# Disable elixir tests if its not importable.
el = None
try:
import elixir as el
except ImportError:
pass
if not el:
TestElixir = None
class TestSQLA(BaseObject):
def setUp(self):
from sqlalchemy.ext.declarative import declarative_base
self.session = tws.transactional_session()
Base = declarative_base(metadata=sa.MetaData('sqlite:///:memory:'))
Base.query = self.session.query_property()
class DBTestCls1(Base):
__tablename__ = 'Test'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.String(50))
some_number = sa.Column(sa.Integer, default=2)
class DBTestCls2(Base):
__tablename__ = 'Test2'
id = sa.Column(sa.Integer, primary_key=True)
nick = sa.Column(sa.String)
other_id = sa.Column(sa.Integer, sa.ForeignKey('Test.id'))
other = sa.orm.relation(DBTestCls1,
backref=sa.orm.backref('others'))
join_table = sa.Table('Test3_Test4', Base.metadata,
sa.Column('Test3', sa.Integer, sa.ForeignKey('Test3.id'), primary_key=True),
sa.Column('Test4', sa.Integer, sa.ForeignKey('Test4.id'), primary_key=True)
)
class DBTestCls3(Base):
__tablename__ = 'Test3'
id = sa.Column(sa.Integer, primary_key=True)
surname = sa.Column(sa.String(50))
def __unicode__(self):
return self.surname
class DBTestCls4(Base):
__tablename__ = 'Test4'
id = sa.Column(sa.Integer, primary_key=True)
rolename = sa.Column(sa.String(50))
users = sa.orm.relationship('DBTestCls3', secondary=join_table, backref='roles')
def __unicode__(self):
return self.rolename
class DBTestCls5(Base):
__tablename__ = 'Test5'
id = sa.Column(sa.Integer, primary_key=True)
account_name = sa.Column(sa.String(50), nullable=False)
class DBTestCls6(Base):
__tablename__ = 'Test6'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.String(50))
account_id = sa.Column(sa.Integer, sa.ForeignKey('Test5.id'), nullable=True)
account = sa.orm.relation(DBTestCls5, backref=sa.orm.backref('user', uselist=False))
Base.metadata.create_all()
self.DBTestCls1 = DBTestCls1
self.DBTestCls2 = DBTestCls2
self.DBTestCls3 = DBTestCls3
self.DBTestCls4 = DBTestCls4
self.DBTestCls5 = DBTestCls5
self.DBTestCls6 = DBTestCls6
foo = self.DBTestCls1(id=1, name='foo')
self.session.add(foo)
bob = self.DBTestCls2(id=1, nick='bob')
bob.other = foo
self.session.add(bob)
george = self.DBTestCls2(id=2, nick='george')
self.session.add(george)
fred = self.DBTestCls3(id=1, surname='fred')
admin = self.DBTestCls4(id=1, rolename='admin')
fred.roles.append(admin)
self.session.add(fred)
self.admin_role = admin
account1 = self.DBTestCls5(id=1, account_name='account1')
self.session.add(account1)
bob1 = self.DBTestCls6(id=1, name='bob1', account_id=1)
self.session.add(bob1)
transaction.commit()
testapi.setup()
#def tearDown(self):
# Base.metadata.drop_all()
|
993,063 | cb09049b4c36476077bad63da1cf78afc93cc401 | from ns.utils import stampedstore
class VirtualClockServer:
""" Models a virtual clock server. For theory and implementation see:
L. Zhang, Virtual clock: A new traffic control algorithm for packet switching networks,
in ACM SIGCOMM Computer Communication Review, 1990, vol. 20, pp. 19.
Parameters
----------
env : simpy.Environment
the simulation environment
rate : float
the bit rate of the port
vticks : A list
list of the vtick parameters (for each possible packet flow_id). We assume a simple assignment of
flow id to vticks, i.e., flow_id = 0 corresponds to vticks[0], etc... We assume that the vticks are
the inverse of the desired rates for the flows in bits per second.
"""
def __init__(self, env, rate, vticks, debug=False):
self.env = env
self.rate = rate
self.vticks = vticks
self.auxVCs = [0.0 for i in range(len(vticks))
] # Initialize all the auxVC variables
self.out = None
self.packets_received = 0
self.packets_dropped = 0
self.debug = debug
self.store = stampedstore.StampedStore(env)
self.action = env.process(
self.run()) # starts the run() method as a SimPy process
def run(self):
while True:
packet = (yield self.store.get())
# Send message
yield self.env.timeout(msg.size * 8.0 / self.rate)
self.out.put(packet)
def put(self, pkt):
""" Sends the packet 'pkt' to this element. """
self.packets_received += 1
now = self.env.now
flow_id = pkt.flow_id
# Update of auxVC for the flow. We assume that vticks is the desired bit time
# i.e., the inverse of the desired bits per second data rate.
# Hence we then multiply this value by the size of the packet in bits.
self.auxVCs[flow_id] = max(
now, self.auxVCs[flow_id]) + self.vticks[flow_id] * pkt.size * 8.0
# Lots of work to do here to implement the queueing discipline
return self.store.put((self.auxVCs[flow_id], pkt))
|
993,064 | 6936dcc69833b4324b71e112a5e4d31e2bc0a822 | import numpy as np
import tqdm
import time
import os
import pandas as pd
import tensorflow as tf
from flearn.utils.model_utils import gen_batch
from .fedbase import BaseFedarated
class Adam:
def __init__(self, lr=0.01, betas=(0.9, 0.999), eps=1e-08):
self.lr = lr
self.beta1 = betas[0]
self.beta2 = betas[1]
self.eps = eps
self.m = dict()
self.v = dict()
self.n = 0
self.creted_momtem_grad_index = set()
def __call__(self, params, grads, i):
# 创建对应的 id
if i not in self.m:
self.m[i] = np.zeros_like(params)
if i not in self.v:
self.v[i] = np.zeros_like(params)
self.m[i] = self.beta1 * self.m[i] + (1 - self.beta1) * grads
self.v[i] = self.beta2 * self.v[i] + (1 - self.beta2) * np.square(grads)
alpha = self.lr * np.sqrt(1 - np.power(self.beta2, self.n))
alpha = alpha / (1 - np.power(self.beta1, self.n))
params -= alpha * self.m[i] / (np.sqrt(self.v[i]) + self.eps)
def increase_n(self):
self.n += 1
class FedMetaBaseServer(BaseFedarated):
def __init__(self, params, learner, dataset):
print('Using Federated-Meta to Train')
self.meta_algo = params["meta_algo"]
self.num_fine_tune = params['num_inner_steps']
inner_opt = tf.train.AdamOptimizer(params['lr'])
#
append = f'meta_algo[{self.meta_algo}]_outer_lr{params["outer_lr"]}_finetune{self.num_fine_tune}'
super(FedMetaBaseServer, self).__init__(params, learner, dataset, optimizer=inner_opt, append2metric=append)
self.split_clients()
# 对于所有的客户端均生成 generator
# self.train_support_batches, self.train_query_batches = self.generate_mini_batch_generator(self.train_clients)
# self.test_support_batches, self.test_query_batches = self.generate_mini_batch_generator(self.test_clients)
if self.meta_algo in ['maml', 'meta_sgd']:
self.impl = self._impl_maml
else:
raise NotImplementedError
print('Using ', params['meta_algo'], "as implement of FedMeta")
self.outer_lr = params['outer_lr']
self.optimizer = Adam(lr=self.outer_lr)
def split_clients(self):
"""
拆分客户端
:return:
"""
train_rate = int(0.8 * self.num_clients)
val_rate = int(0.1 * self.num_clients)
test_rate = self.num_clients - train_rate - val_rate
assert test_rate > 0 and val_rate > 0 and test_rate > 0, '不能为空'
ind = np.random.permutation(self.num_clients)
arryed_cls = np.asarray(self.clients)
self.train_clients = arryed_cls[ind[:train_rate]]
self.eval_clients = arryed_cls[ind[train_rate:train_rate + val_rate]]
self.test_clients = arryed_cls[ind[train_rate + val_rate:]]
#
print('用于测试的客户端数量{}, 用于验证:{}, 用于测试: {}'.format(len(self.train_clients), len(self.eval_clients), len(self.test_clients)))
def local_test_only_acc(self, round_i, on_train=False, sync_params=True):
"""
基于测试集的客户端
:param round_i:
:param on_train:
:param sync_params:
:return:
"""
num_samples = []
tot_correct = []
tot_losses = []
begin_time = time.time()
for c in self.test_clients:
if sync_params:
c.set_params(self.latest_model)
# 是不是需要新运行一遍 query
# for _ in range(self.num_fine_tune):
# support_batch = next(self.test_support_batches[c.id])
# c.solve_sgd(support_batch)
# 不需要结果
# c.model.solve_inner(data=c.train_data, client_id=c.id, round_i=round_i, num_epochs=1, batch_size=self.batch_size, hide_output=True)
# all_x, all_y = np.concatenate((c.train_data['x'], c.eval_data['x']), axis=0), np.concatenate((c.train_data['y'], c.eval_data['y']), axis=0)
# correct, loss = c.model.test((all_x, all_y))
# ds = len(all_y)
# 这里的参数已经更新
# correct, loss, ds = c.test(on_train=False)
# support_batch = next(self.test_support_batches[c.id])
# query_batch = next(self.test_query_batches[c.id])
# correct, loss = c.model.test_meta(support_batch, query_batch)
# ds = len(query_batch[1])
# support_batch = next(self.test_support_batches[c.id])
# query_batch = next(self.test_query_batches[c.id])
# client_wise_correct = []
# client_wise_size = []
# client_wise_loss = []
# for _ in range(self.num_fine_tune):
# correct, loss = c.model.test_meta(support_batch, query_batch)
# # 这里的 correct loss 是 query 上的
# ds = len(query_batch[1])
# client_wise_loss.append(loss)
# client_wise_size.append(ds)
# client_wise_correct.append(correct)
# tot_correct.extend(client_wise_correct)
# num_samples.extend(client_wise_size)
# tot_losses.extend(client_wise_loss)
correct, loss = c.model.test_meta(c.train_data, c.eval_data)
ds = c.num_test_samples + c.num_train_samples
tot_correct.append(correct)
num_samples.append(ds)
tot_losses.append(loss)
end_time = time.time()
# 计算平均的数据
# 平均的准确率
avg_correct = np.sum(tot_correct) * 1.0 / np.sum(num_samples)
# 注意, 所有的 loss 都是 reduce_mean 的
avg_loss = np.dot(tot_losses, num_samples) * 1.0 / np.sum(num_samples)
stats = {'loss': avg_loss, 'acc': avg_correct, 'time': end_time - begin_time}
# 始终不隐藏
print('>>> On {} dataset: round: {} / acc: {:.3%} / '
'loss: {:.4f} / Time: {:.2f}s'.format(
'Train' if on_train else 'Test',
round_i, stats['acc'],
stats['loss'], stats['time']))
if on_train:
self.metrics.update_train_stats_only_acc_loss(round_i=round_i, train_stats=stats)
else:
self.metrics.update_eval_stats(round_i=round_i, eval_stats=stats)
return stats
def select_clients(self, round_i, num_clients=20):
"""
使用随机采样
:param round_i:
:param num_clients:
:return:
"""
np.random.seed(round_i) # make sure for each comparison, we are selecting the same clients each round
indices = np.random.choice(range(len(self.train_clients)), num_clients, replace=False)
return indices, self.train_clients[indices]
def generate_mini_batch_generator(self, clients, num_fine_tune=1):
train_batches = {}
for c in clients:
train_batches[c.id] = gen_batch(c.train_data, self.batch_size, (self.num_rounds + 1) * num_fine_tune)
test_batches = {}
for c in clients:
test_batches[c.id] = gen_batch(c.eval_data, self.batch_size, (self.num_rounds + 1) * num_fine_tune)
return train_batches, test_batches
def aggregate_gd(self, weights_before, grads, train_size):
"""
这里的 sols 定义为梯度
:param wsolns:
:return:
"""
# m = len(grads)
# g = []
# for i in range(len(grads[0])):
# # i 表示的当前的梯度的 index
# # 总是 client 1 的梯度的形状
# grad_sum = np.zeros_like(grads[0][i])
# # num_sz = 0
# for ic in range(m):
# grad_sum += grads[ic][i] # * train_size[ic]
# # num_sz += train_size[ic]
# # grad_sum /= num_sz
# # 累加之后, 进行梯度下降
# g.append(grad_sum)
# return [u - (v * self.outer_lr / m) for u, v in zip(weights_before, g)]
####
# m = len(grads)
# g = []
# for i in range(len(grads[0])):
# # i 表示的当前的梯度的 index
# # 总是 client 1 的梯度的形状
# grad_sum = np.zeros_like(grads[0][i])
# num_sz = 0
# for ic in range(len(grads)):
# grad_sum += grads[ic][i] * train_size[ic]
# num_sz += train_size[ic]
# grad_sum /= num_sz
# # 累加之后, 进行梯度下降
# g.append(grad_sum)
# return [u - (v * self.outer_lr) for u, v in zip(weights_before, g)]
###########
m = len(grads)
g = []
for i in range(len(grads[0])):
# i 表示的当前的梯度的 index
# 总是 client 1 的梯度的形状
grad_sum = np.zeros_like(grads[0][i])
for ic in range(len(grads)):
grad_sum += grads[ic][i]
# 累加之后, 进行梯度下降
g.append(grad_sum)
# 普通的梯度下降 [u - (v * self.outer_lr / m) for u, v in zip(weights_before, g)]
self.optimizer.increase_n()
new_weights = weights_before # [w.copy() for w in weights_before]
for i in range(len(new_weights)):
self.optimizer(new_weights[i], g[i] / m, i=i)
return new_weights
########
# m = len(grads)
# g = []
# for i in range(len(grads[0])):
# # i 表示的当前的梯度的 index
# # 总是 client 1 的梯度的形状
# grad_sum = np.zeros_like(grads[0][i])
# num_sz = 0
# for ic in range(len(grads)):
# grad_sum += grads[ic][i] * train_size[ic]
# num_sz += train_size[ic]
# grad_sum /= num_sz
# # 累加之后, 进行梯度下降
# g.append(grad_sum)
# # 普通的梯度下降 [u - (v * self.outer_lr / m) for u, v in zip(weights_before, g)]
# self.optimizer.increase_n()
# new_weights = [w.copy() for w in weights_before]
# for i in range(len(new_weights)):
# self.optimizer(new_weights[i], g[i], i=i)
# return new_weights
def _impl_maml(self, clients, round_i):
"""
FedMetaMAML
:param clients:
:return:
"""
grads = [] # 记录客户端运行的数据
comps = []
weight_before = clients[0].get_params()
train_size = []
for c in clients: # simply drop the slow devices
# communicate the latest model
c.set_params(self.latest_model)
# support_batch = next(self.train_support_batches[c.id])
# query_batch = next(self.train_query_batches[c.id])
# 这里的梯度的需要根绝
# for _ in range(self.num_fine_tune):
# grads1, loss1, weights1, comp1 = c.model.solve_sgd_meta(support_batch)
# 基于 query, 这时候网络的参数为 theta'
# grads2, loss2, weights2, comp2 = c.model.solve_sgd_meta(support_batch, query_batch)
# comp += comp2
# _, comp1 = c.model.solve_inner(data=c.train_data, client_id=c.id, round_i=round_i, num_epochs=1, batch_size=self.batch_size, hide_output=self.hide_client_output)
# client_grads, comp2 = c.model.solve_inner_support_query(data=c.eval_data, client_id=c.id, round_i=round_i, num_epochs=1,
# batch_size=self.batch_size, hide_output=self.hide_client_output)
# grads2, loss2, comp = c.model.solve_sgd_meta(c.train_data, c.eval_data, self.batch_size)
grads2, loss2, comp, ds = c.model.solve_sgd_meta_full_data(c.train_data, c.eval_data)
grads.append(grads2)
comps.append(comp)
train_size.append(ds)
return weight_before, grads, comps, train_size
def eval_to_file(self, round_i, sync_params=True):
"""
测试模型在所有客户端上的准确率
:param round_i:
:param on_train: 是否是训练数据
:param sync_params: 同步参数(如果量此调用, 第二次可以设为 False)
:return: {'loss': loss, 'acc': acc, 'time': time_diff }
"""
# save_path = os.path.join(self.metric_prefix, 'eval_result_at_round_{}.csv'.format(round_i))
# df = pd.DataFrame(columns=['id', 'train_acc', 'train_loss', 'test_loss', 'test_acc'])
# if sync_params:
# self.client_model.set_params(self.latest_model)
# # begin_time = time.time()
# for i, c in enumerate(self.clients):
# train_correct, train_loss, train_ds = c.test(on_train=True)
# test_correct, test_loss, test_ds = c.test(on_train=False)
# # 添加数据信息
# df = df.append({'id': c.id, 'train_acc': train_correct / train_ds, 'test_acc': test_correct / test_ds,
# 'train_loss': train_loss, 'test_loss': test_loss}, ignore_index=True)
# print('Eval on client:', c.id)
# # end_time = time.time()
# # 保存为路径
# df.to_csv(save_path)
# print(f'>>> Saved eval result to "{save_path}"')
pass
def train(self):
'''Train using Federated Proximal'''
print('Training with {} workers ---'.format(self.clients_per_round))
for i in range(self.start_round, self.num_rounds):
# test model
if (i + 1) % self.eval_every_round == 0:
stats = self.local_test_only_acc(round_i=i, on_train=False, sync_params=True) # have set the latest model for all clients
# 接下来再运行必须重新设置网络的参数
# stats_train = self.local_test_only_acc(round_i=i, on_train=True, sync_params=False)
indices, selected_clients = self.select_clients(i, num_clients=self.clients_per_round) # uniform sampling
weight_before, grads, comps, train_size = self.impl(selected_clients, i)
# update models
self.latest_model = self.aggregate_gd(weight_before, grads, train_size)
if (i + 1) % self.save_every_round == 0:
self.save_model(i)
self.metrics.write()
# final test model
stats = self.local_test_only_acc(round_i=self.num_rounds, on_train=False,
sync_params=True) # have set the latest model for all clients
# stats_train = self.local_test_only_acc(round_i=self.num_rounds, on_train=True, sync_params=False)
self.eval_to_file(round_i=self.num_rounds, sync_params=True)
self.metrics.write()
self.save_model(self.num_rounds)
|
993,065 | 455d8ad2789235c8170a4325bee3c50e3df1da91 | # build url from ConductR base url and given path
def url(path, args):
base_url = 'http://{}:{}'.format(args.ip, args.port)
return '{}/{}'.format(base_url, path)
|
993,066 | d3c5775c3ad427e6fa76cc2430b37450f9e4a16b | from rest_framework import serializers
from .models import Power, Temp, Switches
class PowerSerializer(serializers.ModelSerializer):
class Meta:
model = Power
fields = ('id', 'VoltsRMS','CurrentRMS','ApparentPower','TruePower' ,'ReactivePower', 'Pf', 'date_created')
class TempSerializer(serializers.ModelSerializer):
class Meta:
model = Temp
fields = ('id', 'temperature', 'date_created')
class SwitchesSerializer(serializers.ModelSerializer):
class Meta:
model = Switches
fields = ('id', 'Slider', 'Button') |
993,067 | 22f4bd6c74184247b4ae2f78ab1c9a89875b1bc2 | from django.http import JsonResponse
from django.http import Http404
from django.views.decorators.http import require_http_methods
from django.core.cache import cache
from ..models import Redirect
@require_http_methods(["GET"])
def redirect(request, key):
data = get_from_cache(key)
if data:
return JsonResponse(data, content_type="application/json")
else:
raise Http404
def get_from_cache(key):
url = cache.get(key)
if url:
return { 'key': key, 'url': url}
return None |
993,068 | b07737bf65ba4526915108e2aa674a0dfd6fb75f | import traceback
from datetime import datetime
from sqlalchemy import Column, Integer, String, Text, MetaData, ForeignKey, DateTime, Index, Boolean, func, Table, \
SmallInteger
from sqlalchemy import text
from sqlalchemy.dialects.mysql import LONGTEXT
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from engine_factory import EngineFactory
from sqlalchemy_fulltext import FullText
Base = declarative_base()
class Log(Base):
__tablename__ = 'logs'
id = Column(Integer, primary_key=True) # auto incrementing
logger = Column(String(256)) # the name of the logger.
level = Column(String(50)) # info, debug, or error?
trace = Column(LONGTEXT()) # the full traceback printout
msg = Column(LONGTEXT()) # any custom log you may have included
path = Column(String(64))
ip = Column(String(32))
method = Column(String(64))
created_at = Column(DateTime, default=func.now()) # the current timestamp
def __init__(self, logger=None, level=None, trace=None, msg=None, path=None, ip=None, method=None):
self.logger = logger
self.level = level
self.trace = trace
self.msg = msg
self.path = path
self.ip = ip
self.method = method
def __unicode__(self):
return self.__repr__()
def __repr__(self):
return "<Log: %s - %s>" % (self.created_at.strftime('%m/%d/%Y-%H:%M:%S'), self.msg[:50])
class WikiDataProperty(Base):
__tablename__ = 'wikidata_property'
id = Column(Integer, primary_key=True, autoincrement=True)
wd_item_id = Column(String(256), nullable=False, unique=True)
property_name = Column(String(512), nullable=False)
data_json = Column(LONGTEXT())
__table_args__ = ({
"mysql_charset": "utf8",
})
def __init__(self, wd_item_id, property_name, data_json):
self.wd_item_id = wd_item_id
self.property_name = property_name
self.data_json = data_json
def find_or_create(self, session, autocommit=True):
remote_instance = self.get_remote_object(session)
if not remote_instance:
session.add(self)
if autocommit:
session.commit()
return self
else:
return remote_instance
def get_remote_object(self, session):
if self.id:
return self
else:
try:
return session.query(WikiDataProperty).filter(WikiDataProperty.wd_item_id == self.wd_item_id).first()
except Exception:
traceback.print_exc()
return None
@staticmethod
def get_property_by_wd_item_id(session, wd_item_id):
try:
property_object = session.query(WikiDataProperty).filter(WikiDataProperty.wd_item_id == wd_item_id).first()
return property_object
except Exception:
traceback.print_exc()
return None
class EntityHeat(Base):
__tablename__ = 'api_heat'
id = Column(Integer, primary_key=True, autoincrement=True)
heat = Column(Integer, index=True)
api_id = Column(Integer, nullable=False, index=True)
def __init__(self, heat, api_id):
self.heat = heat
self.api_id = api_id
def __unicode__(self):
return self.__repr__()
def __repr__(self):
return '<APIHeat: %r: api_id=%s >' % (self.heat, self.api_id)
class APIRelation(Base):
RELATION_TYPE_BELONG_TO = 1
RELATION_TYPE_EXTENDS = 2
RELATION_TYPE_IMPLEMENTS = 3
RELATION_TYPE_SEE_ALSO = 4
RELATION_TYPE_THROW_EXCEPTION = 5
RELATION_TYPE_RETURN_VALUE = 6
__tablename__ = 'java_api_relation'
id = Column(Integer, primary_key=True, autoincrement=True)
start_api_id = Column(Integer, ForeignKey('java_all_api_entity.id'), nullable=False, index=True)
end_api_id = Column(Integer, ForeignKey('java_all_api_entity.id'), nullable=False, index=True)
relation_type = Column(Integer, index=True)
__table_args__ = (Index('unique_index', start_api_id, end_api_id, relation_type),
Index('all_relation_index', start_api_id, end_api_id),
{
"mysql_charset": "utf8",
})
def __init__(self, start_api_id, end_api_id, relation_type):
self.start_api_id = start_api_id
self.end_api_id = end_api_id
self.relation_type = relation_type
def exist_in_remote(self, session):
try:
if session.query(APIRelation.id).filter_by(start_api_id=self.start_api_id,
end_api_id=self.end_api_id,
relation_type=self.relation_type).first():
return True
else:
return False
except Exception:
traceback.print_exc()
return False
def get_remote_object(self, session):
if self.id:
return self
else:
try:
return session.query(APIRelation).filter_by(start_api_id=self.start_api_id,
end_api_id=self.end_api_id,
relation_type=self.relation_type).first()
except Exception:
# traceback.print_exc()
return None
def find_or_create(self, session, autocommit=True):
remote_instance = self.get_remote_object(session)
if not remote_instance:
session.add(self)
if autocommit:
session.commit()
return self
else:
return remote_instance
def __repr__(self):
return '<APIRelation: %r-%r: type=%r >' % (self.start_api_id, self.end_api_id, self.relation_type)
has_alias_table = Table('has_alias', Base.metadata,
Column('api_id', Integer, ForeignKey('java_all_api_entity.id')),
Column('alias_id', Integer, ForeignKey('java_api_alias.id'))
)
class APIAlias(Base, FullText):
__tablename__ = 'java_api_alias'
__fulltext_columns__ = ('alias',)
id = Column(Integer, primary_key=True, autoincrement=True)
alias = Column(String(1024), nullable=False, index=True)
type = Column(Integer, nullable=True, index=True)
all_apis = relationship(
"APIEntity",
secondary=has_alias_table,
back_populates="all_aliases")
ALIAS_TYPE_QUALIFIER_NAME = 1
# all api only with simple name, for example android.view.Button -> Button
ALIAS_TYPE_SIMPLE_NAME = 2
# all api only with api type + simple name, for example android.view.Button -> class Button
ALIAS_TYPE_SIMPLE_NAME_WITH_TYPE = 3
# only for method. qualifier type. etc. append(java.lang.Object)
ALIAS_TYPE_SIMPLE_METHOD_WITH_QUALIFIER_PARAMETER_TYPE = 4
ALIAS_TYPE_SIMPLE_CLASS_NAME_METHOD_WITH_QUALIFIER_PARAMETER_TYPE = 5
ALIAS_TYPE_SIMPLE_NAME_METHOD_WITH_SIMPLE_PARAMETER_TYPE = 6
ALIAS_TYPE_SIMPLE_CLASS_NAME_METHOD_WITH_SIMPLE_PARAMETER_TYPE = 7
# for field and other,javax.xml.transform.OutputKeys.DOCTYPE_SYSTEM->OutputKeys.DOCTYPE_SYSTEM, save the last two
ALIAS_TYPE_SIMPLE_PARENT_API_NAME_WITH_SIMPLE_NAME = 8
# @nullable
ALIAS_TYPE_ANNOTATION_REFERENCE = 9
ALIAS_TYPE_CAMEL_CASE_TO_SPACE = 10
ALIAS_TYPE_UNDERLINE_TO_SPACE = 11
__table_args__ = (Index('alias_type_index', alias, type), {
"mysql_charset": "utf8",
})
def __init__(self, alias, type):
self.alias = alias
self.type = type
def find_or_create(self, session, autocommit=True):
remote_instance = self.get_remote_object(session)
if not remote_instance:
session.add(self)
if autocommit:
session.commit()
return self
else:
return remote_instance
def get_remote_object(self, session):
if self.id:
return self
else:
try:
return session.query(APIAlias).filter(APIAlias.alias == func.binary(self.alias),
APIAlias.type == self.type,
).first()
except Exception:
traceback.print_exc()
return None
@staticmethod
def aliases_to_apis(aliases):
api_entity_set = set()
for alias in aliases:
for api in alias.all_apis:
api_entity_set.add(api)
return api_entity_set
def __eq__(self, other):
if isinstance(other, APIAlias):
return self.id == other.id
else:
return False
def __hash__(self):
return hash(self.id)
def __repr__(self):
return '<APIAlias: id=%r alias=%r type=%r >' % (self.id, self.alias, self.type)
class APIDocumentWebsite(Base):
__tablename__ = 'java_api_document_website'
id = Column(Integer, primary_key=True, autoincrement=True)
api_id = Column(Integer, ForeignKey('java_all_api_entity.id'), nullable=False)
document_website = Column(String(512), nullable=False)
__table_args__ = (Index('api_document_website_index', api_id, document_website), {
"mysql_charset": "utf8",
})
def __init__(self, api_id, document_website):
self.api_id = api_id
self.document_website = document_website
def find_or_create(self, session, autocommit=True):
remote_instance = self.get_remote_object(session)
if not remote_instance:
session.add(self)
if autocommit:
session.commit()
return self
else:
return remote_instance
def get_remote_object(self, session):
if self.id:
return self
else:
try:
return session.query(APIDocumentWebsite).filter(APIDocumentWebsite.api_id == self.api_id,
APIDocumentWebsite.document_website == func.binary(
self.document_website)).first()
except Exception:
traceback.print_exc()
return None
@staticmethod
def get_api_id_by_website(session, website):
try:
api_id = session.query(APIDocumentWebsite.api_id).filter_by(document_website=website).scalar()
return api_id
except Exception:
traceback.print_exc()
return None
class APIEntity(Base):
API_TYPE_ALL_API_ENTITY = -1
API_TYPE_UNKNOWN = 0
API_TYPE_PACKAGE = 1
API_TYPE_CLASS = 2
API_TYPE_INTERFACE = 3
API_TYPE_EXCEPTION = 4
API_TYPE_ERROR = 5
API_TYPE_FIELD = 6
API_TYPE_CONSTRUCTOR = 7
API_TYPE_ENUM_CLASS = 8
API_TYPE_ANNOTATION = 9
API_TYPE_XML_ATTRIBUTE = 10
API_TYPE_METHOD = 11
API_TYPE_ENUM_CONSTANTS = 12
API_TYPE_PRIMARY_TYPE = 13
__tablename__ = 'java_all_api_entity'
id = Column(Integer, primary_key=True, autoincrement=True)
api_type = Column(Integer, default=API_TYPE_UNKNOWN, index=True)
qualified_name = Column(String(1024), index=True)
full_declaration = Column(String(1024), nullable=True, index=True)
short_description = Column(Text(), nullable=True)
added_in_version = Column(String(128), nullable=True)
document_websites = relationship("APIDocumentWebsite", foreign_keys=[APIDocumentWebsite.api_id], backref="api")
out_relation = relationship('APIRelation', foreign_keys=[APIRelation.start_api_id],
backref='start_api')
in_relation = relationship('APIRelation', foreign_keys=[APIRelation.end_api_id],
backref='end_api')
all_aliases = relationship(
"APIAlias",
secondary=has_alias_table,
back_populates="all_apis")
__table_args__ = {
"mysql_charset": "utf8"
}
def __init__(self, qualified_name, api_type, full_declaration=None, short_description=None, added_in_version=None):
self.api_type = api_type
self.qualified_name = qualified_name
self.full_declaration = full_declaration
self.short_description = short_description
self.added_in_version = added_in_version
def find_or_create(self, session, autocommit=True):
remote_instance = self.get_remote_object(session)
if not remote_instance:
session.add(self)
if autocommit:
session.commit()
return self
else:
return remote_instance
def get_remote_object(self, session):
if self.id:
return self
else:
try:
return session.query(APIEntity).filter(
APIEntity.qualified_name == func.binary(self.qualified_name)).first()
except Exception:
traceback.print_exc()
return None
@staticmethod
def exist(session, qualified_name):
try:
if session.query(APIEntity.id).filter(APIEntity.qualified_name == func.binary(qualified_name)).first():
return True
else:
return False
except Exception:
traceback.print_exc()
return None
@staticmethod
def find_by_id(session, api_entity_id):
try:
return session.query(APIEntity).filter(APIEntity.id == api_entity_id).first()
except Exception:
return None
@staticmethod
def find_by_qualifier(session, qualified_name):
try:
return session.query(APIEntity).filter(APIEntity.qualified_name == func.binary(qualified_name)).first()
except Exception:
traceback.print_exc()
return None
@staticmethod
def get_api_type_string(type):
if type == APIEntity.API_TYPE_UNKNOWN:
return []
if type == APIEntity.API_TYPE_PACKAGE:
return ["package"]
if type == APIEntity.API_TYPE_CLASS:
return ["class"]
if type == APIEntity.API_TYPE_INTERFACE:
return ["interface", "class"]
if type == APIEntity.API_TYPE_EXCEPTION:
return ["exception", "class"]
if type == APIEntity.API_TYPE_ERROR:
return ["error", "class"]
if type == APIEntity.API_TYPE_FIELD:
return ["field", "constant"]
if type == APIEntity.API_TYPE_CONSTRUCTOR:
return ["constructor", "constructor method"]
if type == APIEntity.API_TYPE_ENUM_CLASS:
return ["enum", "constant", "enum class"]
if type == APIEntity.API_TYPE_ANNOTATION:
return ["annotation"]
if type == APIEntity.API_TYPE_XML_ATTRIBUTE:
return ["XML attribute", "attribute"]
if type == APIEntity.API_TYPE_METHOD:
return ["API", "method"]
if type == APIEntity.API_TYPE_ENUM_CONSTANTS:
return ["constant", "enum constant"]
return []
def __repr__(self):
return '<APIEntity: id=%r name=%r>' % (self.id, self.qualified_name)
def __eq__(self, other):
if isinstance(other, APIEntity):
return self.id == other.id
else:
return False
def __hash__(self):
return hash(self.id)
@staticmethod
def type_string_to_api_type_constant(api_type_string):
if not api_type_string:
return APIEntity.API_TYPE_UNKNOWN
api_type_string = api_type_string.strip()
if not api_type_string:
return APIEntity.API_TYPE_UNKNOWN
api_type_string = api_type_string.lower()
if api_type_string == "package":
return APIEntity.API_TYPE_PACKAGE
if api_type_string == "class":
return APIEntity.API_TYPE_CLASS
if api_type_string == "interface":
return APIEntity.API_TYPE_INTERFACE
if api_type_string == "error":
return APIEntity.API_TYPE_ERROR
if api_type_string == "enum":
return APIEntity.API_TYPE_ENUM_CLASS
if api_type_string == "exception":
return APIEntity.API_TYPE_EXCEPTION
if api_type_string == "annotation type" or api_type_string == "annotation":
return APIEntity.API_TYPE_ANNOTATION
if api_type_string == "method":
return APIEntity.API_TYPE_METHOD
if api_type_string == "constructor":
return APIEntity.API_TYPE_CONSTRUCTOR
if api_type_string == "nested" or api_type_string == "nested class":
return APIEntity.API_TYPE_CLASS
if api_type_string == "required":
return APIEntity.API_TYPE_FIELD
if api_type_string == "optional":
return APIEntity.API_TYPE_FIELD
if api_type_string == "field":
return APIEntity.API_TYPE_FIELD
if api_type_string == "enum constant":
return APIEntity.API_TYPE_ENUM_CONSTANTS
return APIEntity.API_TYPE_UNKNOWN
@staticmethod
def api_type_belong_to_relation(api_type, subject_api_type):
if api_type == subject_api_type:
return True
if subject_api_type == APIEntity.API_TYPE_METHOD:
if api_type == APIEntity.API_TYPE_CONSTRUCTOR:
return True
if subject_api_type == APIEntity.API_TYPE_CLASS:
if api_type == APIEntity.API_TYPE_INTERFACE:
return True
if api_type == APIEntity.API_TYPE_ERROR:
return True
if api_type == APIEntity.API_TYPE_ENUM_CLASS:
return True
if api_type == APIEntity.API_TYPE_EXCEPTION:
return True
if subject_api_type == APIEntity.API_TYPE_FIELD:
if api_type == APIEntity.API_TYPE_ENUM_CONSTANTS:
return True
return False
class APIEntityProperty(Base):
__tablename__ = 'java_api_property'
id = Column(Integer, primary_key=True, autoincrement=True)
api_id = Column(Integer, ForeignKey('java_all_api_entity.id'), nullable=False, index=True)
property_name = Column(String(512), nullable=False, index=True)
property_value = Column(LONGTEXT(), nullable=True) # text with no html tags
__table_args__ = (Index('api_id_property_name_index', api_id, property_name, unique=True), {
"mysql_charset": "utf8",
})
def __init__(self, api_id, property_name, property_value):
self.api_id = api_id
self.property_name = property_name
self.property_value = property_value
def create(self, session, autocommit=True):
session.add(self)
if autocommit:
session.commit()
return self
class APIHTMLText(Base):
__tablename__ = 'java_api_html_text'
id = Column(Integer, primary_key=True, autoincrement=True)
api_id = Column(Integer, ForeignKey('java_all_api_entity.id'), nullable=False)
html = Column(LONGTEXT(), nullable=False)
clean_text = Column(LONGTEXT(), nullable=True) # text with no html tags
reserve_part_tag_text = Column(LONGTEXT(), nullable=True) # text with only code tags text
html_type = Column(Integer, nullable=True)
__table_args__ = (Index('api_id_text_type_index', api_id, html_type), {
"mysql_charset": "utf8",
})
HTML_TYPE_UNKNOWN = 0
HTML_TYPE_API_DECLARATION = 1
HTML_TYPE_API_SHORT_DESCRIPTION = 2
HTML_TYPE_API_DETAIL_DESCRIPTION = 3
HTML_TYPE_METHOD_RETURN_VALUE_DESCRIPTION = 4
def __init__(self, api_id, html, html_type=HTML_TYPE_UNKNOWN):
self.api_id = api_id
self.html = html
self.html_type = html_type
def create(self, session, autocommit=True):
session.add(self)
if autocommit:
session.commit()
return self
class DocumentSourceRecord(Base):
__tablename__ = 'java_document_source_record'
id = Column(Integer, primary_key=True, autoincrement=True)
doc_id = Column(Integer, ForeignKey('java_api_document_text.id'), unique=True, nullable=False)
kg_table_id = Column(Integer, nullable=False)
kg_table_primary_key = Column(Integer, nullable=False)
__table_args__ = (Index('source_table_index', kg_table_id, kg_table_primary_key), {
"mysql_charset": "utf8",
})
def __init__(self, doc_id, kg_table_id, kg_table_primary_key):
self.doc_id = doc_id
self.kg_table_id = kg_table_id
self.kg_table_primary_key = kg_table_primary_key
def create(self, session, autocommit=True):
session.add(self)
if autocommit:
session.commit()
return self
@staticmethod
def exist_import_record(session, kg_table_id, kg_table_primary_key):
"""
check if the start_row_id has map in end_knowledge_table
"""
try:
team = session.query(DocumentSourceRecord).filter_by(kg_table_id=kg_table_id,
kg_table_primary_key=kg_table_primary_key).first()
if team:
return True
else:
return False
except Exception:
traceback.print_exc()
return False
class DocumentText(Base):
__tablename__ = 'java_api_document_text'
id = Column(Integer, primary_key=True, autoincrement=True)
html_text_id = Column(Integer, ForeignKey('java_api_html_text.id'), unique=True, nullable=False)
text = Column(LONGTEXT(), nullable=True) # text with no html tags
__table_args__ = (Index('api_id_text_type_index', html_text_id), {
"mysql_charset": "utf8",
})
def __init__(self, html_text_id, text):
self.html_text_id = html_text_id
self.text = text
def create(self, session, autocommit=True):
session.add(self)
if autocommit:
session.commit()
return self
@staticmethod
def get_doc_id_list(session):
try:
doc_id_list = session.query(DocumentText.id).all()
return doc_id_list
except Exception:
traceback.print_exc()
return None
class DocumentSentenceText(Base):
__tablename__ = 'java_api_document_sentence_text'
id = Column(Integer, primary_key=True, autoincrement=True)
doc_id = Column(Integer, ForeignKey('java_api_document_text.id'), nullable=False, index=True)
sentence_index = Column(Integer, nullable=True) # text with no html tags
text = Column(Text(), nullable=True) # text with no html tags
__table_args__ = (Index('doc_id_sentence_index', doc_id, sentence_index), {
"mysql_charset": "utf8",
})
def __init__(self, doc_id, sentence_index, text):
self.doc_id = doc_id
self.sentence_index = sentence_index
self.text = text
def create(self, session, autocommit=True):
session.add(self)
if autocommit:
session.commit()
return self
@staticmethod
def exist_import_record(session, doc_id, sentence_index):
"""
check if the start_row_id has map in end_knowledge_table
"""
try:
team = session.query(DocumentSentenceText).filter_by(doc_id=doc_id,
sentence_index=sentence_index).first()
if team:
return True
else:
return False
except Exception:
traceback.print_exc()
return False
@staticmethod
def get_sentence_list_by_doc_id(session, doc_id):
try:
sentence_list = session.query(DocumentSentenceText).filter_by(doc_id=doc_id).all()
return sentence_list
except Exception:
traceback.print_exc()
return None
class DocumentAnnotationStatus(Base):
__tablename__ = "java_api_document_annotation_status"
id = Column(Integer, primary_key=True, autoincrement=True)
doc_id = Column(Integer, ForeignKey('java_api_document_text.id'), nullable=False, index=True)
status = Column(Integer, nullable=False, index=True, default=0)
STATUS_TO_ANNOTATE = 0 # not start yet
STATUS_UNFINISHED = 1 # begin to annotate but not completed
STATUS_ANNOTATED = 2 # have been annotated
STATUS_SB_DOING = 3 # somebody is annotating now
def __init__(self, doc_id, status):
self.doc_id = doc_id
self.status = status
def create(self, session, autocommit=True):
session.add(self)
if autocommit:
session.commit()
return self
@staticmethod
def exist_import_record(session, doc_id):
"""
check if the start_row_id has map in end_knowledge_table
"""
try:
team = session.query(DocumentSentenceText).filter_by(doc_id=doc_id).first()
if team:
return True
else:
return False
except Exception:
traceback.print_exc()
return False
@staticmethod
def get_unfinished_doc_list(session):
try:
unfinished_doc_list = session.query(DocumentAnnotationStatus.doc_id).filter(DocumentAnnotationStatus.status.in_([0, 1])).all()
return unfinished_doc_list
except Exception:
traceback.print_exc()
return None
class DocumentSentenceTextAnnotation(Base):
__tablename__ = 'java_api_document_sentence_text_annotation'
id = Column(Integer, primary_key=True, autoincrement=True)
doc_id = Column(Integer, ForeignKey('java_api_document_text.id'), nullable=False, index=True)
sentence_index = Column(Integer, nullable=True) # text with no html tags
text = Column(Text(), nullable=True) # text with no html tags
type = Column(Integer, nullable=True, index=True) # annotated type
__table_args__ = (Index('doc_id_sentence_index', doc_id, sentence_index), {
"mysql_charset": "utf8",
})
ANNOTATED_TYPE_FUNCTIONALITY = 1
ANNOTATED_TYPE_DIRECTIVE = 2
ANNOTATED_TYPE_OTHERS = 0
def __init__(self, doc_id, sentence_index, text, type):
self.doc_id = doc_id
self.sentence_index = sentence_index
self.text = text
self.type = type
def create(self, session, autocommit=True):
session.add(self)
if autocommit:
session.commit()
return self
@staticmethod
def exist_import_record(session, doc_id, sentence_index):
"""
check if the start_row_id has map in end_knowledge_table
"""
try:
team = session.query(DocumentSentenceText).filter_by(doc_id=doc_id,
sentence_index=sentence_index).first()
if team:
return True
else:
return False
except Exception:
traceback.print_exc()
return False
@staticmethod
def get_annotation_by_index(session, doc_id, sentence_index):
try:
annotation_type = session.query(DocumentSentenceTextAnnotation.type).filter_by(doc_id=doc_id, sentence_index=sentence_index).scalar()
if annotation_type:
return annotation_type
else:
return -1
except Exception:
traceback.print_exc()
return -1
class APIInstanceEntityRelation(Base):
__tablename__ = 'java_api_value_instance_entity_relation'
id = Column(Integer, primary_key=True, autoincrement=True)
start_instance_id = Column(Integer, ForeignKey('java_api_value_instance_entity.id'), nullable=False, index=True)
end_instance_id = Column(Integer, ForeignKey('java_api_value_instance_entity.id'), nullable=False, index=True)
relation_type = Column(Integer, index=True)
__table_args__ = (Index('unique_index', start_instance_id, end_instance_id, relation_type),
Index('all_relation_index', start_instance_id, end_instance_id),
{
"mysql_charset": "utf8",
})
def __init__(self, start_instance_id, end_instance_id, relation_type):
self.start_instance_id = start_instance_id
self.end_instance_id = end_instance_id
self.relation_type = relation_type
def exist_in_remote(self, session):
try:
if session.query(APIInstanceEntityRelation.id).filter_by(start_instance_id=self.start_instance_id,
end_instance_id=self.end_instance_id,
relation_type=self.relation_type).first():
return True
else:
return False
except Exception:
traceback.print_exc()
return False
def get_remote_object(self, session):
if self.id:
return self
else:
try:
return session.query(APIInstanceEntityRelation).filter_by(start_instance_id=self.start_instance_id,
end_instance_id=self.end_instance_id,
relation_type=self.relation_type).first()
except Exception:
# traceback.print_exc()
return None
def find_or_create(self, session, autocommit=True):
remote_instance = self.get_remote_object(session)
if not remote_instance:
session.add(self)
if autocommit:
session.commit()
return self
else:
return remote_instance
def __repr__(self):
return '<APIInstanceEntityRelation: %r-%r: type=%r >' % (
self.start_instance_id, self.end_instance_id, self.relation_type)
class APIInstanceEntity(Base):
TYPE_UNKNOWN = 0
TYPE_RETURN_VALUE = 1
TYPE_PARAMETER = 2
__tablename__ = 'java_api_value_instance_entity'
id = Column(Integer, primary_key=True, autoincrement=True)
instance_type = Column(Integer, default=TYPE_UNKNOWN, index=True)
simple_type = Column(String(1024), index=True)
qualified_type = Column(String(1024), index=True)
formal_parameter_name = Column(String(1024), nullable=True, index=True)
qualified_full_name = Column(String(1024), index=True)
simple_full_name = Column(String(1024), index=True)
short_description = Column(Text(1024), nullable=True, index=True)
out_relation = relationship('APIInstanceEntityRelation', foreign_keys=[APIInstanceEntityRelation.start_instance_id],
backref='start_api_instance')
in_relation = relationship('APIInstanceEntityRelation', foreign_keys=[APIInstanceEntityRelation.end_instance_id],
backref='end_api_instance')
__table_args__ = {
"mysql_charset": "utf8"
}
def __init__(self, simple_type, qualified_type, formal_parameter_name, qualified_full_name, simple_full_name):
self.simple_type = simple_type
self.qualified_type = qualified_type
self.formal_parameter_name = formal_parameter_name
self.qualified_full_name = qualified_full_name
self.simple_full_name = simple_full_name
def find_or_create(self, session, autocommit=True):
remote_instance = self.get_remote_object(session)
if not remote_instance:
session.add(self)
if autocommit:
session.commit()
return self
else:
return remote_instance
def get_remote_object(self, session):
if self.id:
return self
else:
try:
return session.query(APIInstanceEntity).filter(
APIInstanceEntity.qualified_full_name == func.binary(self.qualified_full_name)).first()
except Exception:
traceback.print_exc()
return None
def __repr__(self):
return '<APIInstanceEntity: id=%r qualified_full_name=%r>' % (self.id, self.qualified_full_name)
def __hash__(self):
return hash(self.id)
class APIInstanceToAPIEntityRelation(Base):
DIRECTION_INSTANCE_TO_API = 0
DIRECTION_API_TO_INSTANCE = 1
RELATION_TYPE_TYPE_OF = 1
RELATION_TYPE_HAS_PARAMETER = 2
RELATION_TYPE_RETURN = 3
__tablename__ = 'instance_entity_to_api_relation'
id = Column(Integer, primary_key=True, autoincrement=True)
instance_entity_id = Column(Integer, ForeignKey('java_api_value_instance_entity.id'), nullable=False, index=True)
api_id = Column(Integer, ForeignKey('java_all_api_entity.id'), nullable=False, index=True)
relation_type = Column(Integer, index=True)
relation_direction = Column(Integer, default=DIRECTION_INSTANCE_TO_API, index=True)
__table_args__ = (Index('unique_index', instance_entity_id, api_id, relation_type, relation_direction),
Index('all_relation_index', instance_entity_id, api_id),
{
"mysql_charset": "utf8",
})
def __init__(self, start_instance_id, end_instance_id, relation_type):
self.start_instance_id = start_instance_id
self.end_instance_id = end_instance_id
self.relation_type = relation_type
def exist_in_remote(self, session):
try:
if session.query(APIInstanceEntityRelation.id).filter_by(start_instance_id=self.start_instance_id,
end_instance_id=self.end_instance_id,
relation_type=self.relation_type).first():
return True
else:
return False
except Exception:
traceback.print_exc()
return False
def get_remote_object(self, session):
if self.id:
return self
else:
try:
return session.query(APIInstanceEntityRelation).filter_by(start_instance_id=self.start_instance_id,
end_instance_id=self.end_instance_id,
relation_type=self.relation_type).first()
except Exception:
# traceback.print_exc()
return None
def find_or_create(self, session, autocommit=True):
remote_instance = self.get_remote_object(session)
if not remote_instance:
session.add(self)
if autocommit:
session.commit()
return self
else:
return remote_instance
def __repr__(self):
return '<APIInstanceEntityRelation: %r-%r: type=%r >' % (
self.start_instance_id, self.end_instance_id, self.relation_type)
class LibraryEntity(Base):
__tablename__ = 'library_entity'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String(256), index=True)
version = Column(String(128), nullable=True)
short_description = Column(Text(), nullable=True)
url = Column(String(512), nullable=True, index=True)
__table_args__ = (Index('name_url_index', "name", "url"), {
"mysql_charset": "utf8",
})
def __init__(self, name, version, short_description, url):
self.name = name
self.version = version
self.short_description = short_description
self.url = url
def find_or_create(self, session, autocommit=True):
remote_instance = self.get_remote_object(session)
if not remote_instance:
session.add(self)
if autocommit:
session.commit()
return self
else:
return remote_instance
def get_remote_object(self, session):
if self.id:
return self
else:
try:
return session.query(LibraryEntity).filter_by(name=self.name, url=self.url).first()
except Exception:
# traceback.print_exc()
return None
class APIBelongToLibraryRelation(Base):
__tablename__ = 'api_belong_to_library_relation'
id = Column(Integer, primary_key=True, autoincrement=True)
api_id = Column(Integer, ForeignKey('java_all_api_entity.id'), nullable=False)
library_id = Column(Integer, ForeignKey('library_entity.id'), nullable=False)
__table_args__ = (Index('belong_to_index', api_id, library_id), {
"mysql_charset": "utf8",
})
def __init__(self, api_id, library_id):
self.api_id = api_id
self.library_id = library_id
def find_or_create(self, session, autocommit=True):
remote_instance = self.get_remote_object(session)
if not remote_instance:
session.add(self)
if autocommit:
session.commit()
return self
else:
return remote_instance
def get_remote_object(self, session):
if self.id:
return self
else:
try:
return session.query(APIBelongToLibraryRelation).filter_by(api_id=self.api_id,
library_id=self.library_id).first()
except Exception:
traceback.print_exc()
return None
class KnowledgeTable(Base):
__tablename__ = 'knowledge_table'
id = Column(Integer, primary_key=True, autoincrement=True)
ip = Column(String(30), nullable=False)
schema = Column(String(128), nullable=False)
table_name = Column(String(128), nullable=False, index=True)
description = Column(Text(), nullable=True)
create_time = Column(DateTime(), nullable=True)
__table_args__ = {
"mysql_charset": "utf8"
}
def __init__(self, ip, schema, table_name, description):
self.ip = ip
self.schema = schema
self.table_name = table_name
self.description = description
self.create_time = datetime.now()
def find_or_create(self, session, autocommit=True):
remote_instance = self.get_remote_object(session)
if not remote_instance:
session.add(self)
if autocommit:
session.commit()
return self
else:
return remote_instance
def get_remote_object(self, session):
if self.id:
return self
else:
try:
return session.query(KnowledgeTable).filter_by(ip=self.ip, schema=self.schema,
table_name=self.table_name).one()
except Exception:
# traceback.print_exc()
return None
class KnowledgeTableRowMapRecord(Base):
__tablename__ = 'knowledge_table_row_map'
id = Column(Integer, primary_key=True, autoincrement=True)
start_table_id = Column(Integer, ForeignKey('knowledge_table.id'), nullable=False)
end_table_id = Column(Integer, ForeignKey('knowledge_table.id'), nullable=False)
start_row_id = Column(Integer, nullable=False, index=True)
end_row_id = Column(Integer, nullable=False, index=True)
valid = Column(Boolean, nullable=False, index=True, default=True)
create_time = Column(DateTime(), nullable=False, index=True)
__table_args__ = (Index('start_id_index', "start_table_id", "end_table_id", start_row_id),
Index('end_id_index', "start_table_id", "end_table_id", end_row_id), {
"mysql_charset": "utf8",
})
def __init__(self, start_knowledge_table, end_knowledge_table, start_row_id, end_row_id):
self.start_table_id = start_knowledge_table.id
self.end_table_id = end_knowledge_table.id
self.start_row_id = start_row_id
self.end_row_id = end_row_id
self.create_time = datetime.now()
def create(self, session, autocommit=True):
session.add(self)
if autocommit:
session.commit()
return self
@staticmethod
def exist_import_record(session, start_knowledge_table, end_knowledge_table, start_row_id):
"""
check if the start_row_id has map in end_knowledge_table
:param session:
:param start_knowledge_table:
:param end_knowledge_table:
:param start_row_id:
:return:
"""
try:
team = session.query(KnowledgeTableRowMapRecord).filter_by(start_table_id=start_knowledge_table.id,
end_table_id=end_knowledge_table.id,
start_row_id=start_row_id).first()
if team:
return True
else:
return False
except Exception:
# traceback.print_exc()
return False
@staticmethod
def get_end_row_id(session, start_knowledge_table, end_knowledge_table, start_row_id):
"""
check if the start_row_id has map in end_knowledge_table
:param session:
:param start_knowledge_table:
:param end_knowledge_table:
:param start_row_id:
:return:
"""
try:
end_row_id = session.query(KnowledgeTableRowMapRecord.end_row_id).filter_by(
start_table_id=start_knowledge_table.id,
end_table_id=end_knowledge_table.id,
start_row_id=start_row_id).scalar()
return end_row_id
except Exception:
traceback.print_exc()
return None
@staticmethod
def get_transformed_table_data(session, start_knowledge_table, end_knowledge_table):
try:
data_list = session.query(KnowledgeTableRowMapRecord).filter_by(
start_table_id=start_knowledge_table.id,
end_table_id=end_knowledge_table.id).all()
return data_list
except Exception:
traceback.print_exc()
return None
class PostsRecord(Base, FullText):
__tablename__ = 'posts'
__fulltext_columns__ = ('title',)
id = Column(Integer, primary_key=True, autoincrement=True, name="Id")
post_type_id = Column(SmallInteger, name="PostTypeId")
accepted_answer_id = Column(Integer, name="AcceptedAnswerId")
parent_id = Column(Integer, name="ParentId")
score = Column(Integer, name="Score")
view_count = Column(Integer, name="ViewCount")
body = Column(Text(), name="Body")
owner_user_id = Column(Integer, name="OwnerUserId")
owner_display_name = Column(String(256), name="OwnerDisplayName")
last_editor_user_id = Column(Integer, name="LastEditorUserId")
last_edit_date = Column(DateTime(), name="LastEditDate")
last_activity_date = Column(DateTime(), name="LastActivityDate")
title = Column(String(256), name="Title")
tags = Column(String(256), name="Tags")
answer_count = Column(Integer, name="AnswerCount")
comment_count = Column(Integer, name="CommentCount")
favorite_count = Column(Integer, name="FavoriteCount")
creation_date = Column(DateTime(), name="CreationDate")
__table_args__ = ({
"mysql_charset": "utf8",
})
def __init__(self, session=None):
self.session = session
if self.session is None:
self.session = self.get_so_session()
def get_so_session(self):
if not self.session:
self.session = EngineFactory.create_so_session()
return self.session
def get_post_by_id(self, id_num):
post_id_node = self.session.query(PostsRecord).get(id_num)
post = {
"id": post_id_node.id,
"post_type_id": post_id_node.post_type_id,
"accepted_answer_id": post_id_node.accepted_answer_id,
"parent_id": post_id_node.parent_id,
"score": post_id_node.score,
"body": post_id_node.body,
"owner_user_id": post_id_node.owner_user_id,
"owner_display_name": post_id_node.owner_display_name,
"last_editor_user_id": post_id_node.last_editor_user_id,
"last_activity_date": post_id_node.last_activity_date,
"title": post_id_node.title,
"tags": post_id_node.tags,
"answer_count": post_id_node.answer_count,
"comment_count": post_id_node.comment_count,
"favorite_count": post_id_node.favorite_count,
"creation_date": post_id_node.creation_date,
}
return post
def query_related_posts_by_string(self, str, top_number=10):
# 'What is unit testing?'
# todo ? limit the number of result
post_nodes = self.session.query(PostsRecord).from_statement(
text("SELECT * FROM posts where Title=:Title")).params(
Title=str)
post_results_id = []
for p in post_nodes:
post_results_id.append({"id": p.id})
return post_results_id
def __repr__(self):
return '<POSTS: id=%r score=%r title=%r tags=%r>' % (self.id, self.score, self.title, self.tags)
class KnowledgeTableColumnMapRecord(Base):
__tablename__ = 'knowledge_table_row_column_map'
id = Column(Integer, primary_key=True, autoincrement=True)
start_table_id = Column(Integer, ForeignKey('knowledge_table.id'), nullable=False)
end_table_id = Column(Integer, ForeignKey('knowledge_table.id'), nullable=False)
start_row_name = Column(String(128), nullable=False, index=True)
start_row_id = Column(Integer, nullable=False, index=True)
end_row_id = Column(Integer, nullable=False, index=True)
valid = Column(Boolean, nullable=False, index=True, default=True)
create_time = Column(DateTime(), nullable=False, index=True)
__table_args__ = (Index('start_id_index', start_table_id, end_table_id, start_row_id, start_row_name),
Index('end_id_index', start_table_id, end_table_id, end_row_id, start_row_name),
{
"mysql_charset": "utf8",
})
def __init__(self, start_knowledge_table, end_knowledge_table, start_row_id, end_row_id, start_row_name):
self.start_table_id = start_knowledge_table.id
self.end_table_id = end_knowledge_table.id
self.start_row_id = start_row_id
self.end_row_id = end_row_id
self.start_row_name = start_row_name
self.create_time = datetime.now()
def create(self, session, autocommit=True):
session.add(self)
if autocommit:
session.commit()
return self
@staticmethod
def exist_import_record(session, start_knowledge_table, end_knowledge_table, start_row_id, start_row_name):
"""
check if the start_row_id has map in end_knowledge_table
:param session:
:param start_knowledge_table:
:param end_knowledge_table:
:param start_row_id:
:return:
"""
try:
team = session.query(KnowledgeTableColumnMapRecord).filter_by(start_table_id=start_knowledge_table.id,
end_table_id=end_knowledge_table.id,
start_row_id=start_row_id,
start_row_name=start_row_name).first()
if team:
return True
else:
return False
except Exception:
traceback.print_exc()
return False
@staticmethod
def get_end_row_id(session, start_knowledge_table, end_knowledge_table, start_row_id, start_row_name):
"""
check if the start_row_id has map in end_knowledge_table
:param session:
:param start_knowledge_table:
:param end_knowledge_table:
:param start_row_id:
:return:
"""
try:
end_row_id = session.query(KnowledgeTableColumnMapRecord.end_row_id).filter_by(
start_table_id=start_knowledge_table.id,
end_table_id=end_knowledge_table.id,
start_row_id=start_row_id,
start_row_name=start_row_name).scalar()
return end_row_id
except Exception:
# traceback.print_exc()
return None
def parse_api_type_string_to_api_type_constant(api_type_string):
if api_type_string == "Method":
return APIEntity.API_TYPE_METHOD
if api_type_string == "Constructor":
return APIEntity.API_TYPE_CONSTRUCTOR
if api_type_string == "Nested":
return APIEntity.API_TYPE_CLASS
if api_type_string == "Required":
return APIEntity.API_TYPE_FIELD
if api_type_string == "Optional":
return APIEntity.API_TYPE_FIELD
if api_type_string == "Field":
return APIEntity.API_TYPE_FIELD
if api_type_string == "Enum":
return APIEntity.API_TYPE_ENUM_CONSTANTS
api_type = APIEntity.API_TYPE_UNKNOWN
return api_type
# if __name__ == "__main__":
# # create table in 75
# engine = EngineFactory.create_graphdata_engine_to_center()
# metadata = MetaData(bind=engine)
# Base.metadata.create_all(bind=engine)
if __name__ == "__main__":
engine = EngineFactory.create_engine_to_center()
metadata = MetaData(bind=engine)
# delete all table
# Base.metadata.drop_all(bind=engine)
# create the table
Base.metadata.create_all(bind=engine)
|
993,069 | f8055748b03b18039e413447183be145268c915c | import cv2
import matplotlib.pyplot as plt
import time
import numpy as np
def getss(list):
avg=sum(list)/len(list)
ss=0
for l in list:
ss+=(l-avg)*(l-avg)/len(list)
return ss
def getdiff(img):
avglist=[]
try:
Sidelength=30
img=cv2.resize(img,(Sidelength,Sidelength),interpolation=cv2.INTER_CUBIC)
gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
for i in range(Sidelength):
avg=sum(gray[i])/len(gray[i])
avglist.append(avg)
except :
print('error')
return avglist
def getSim(img1, img2):
if img1 is None or img2 is None:
return 10000
diff1=getdiff(img1)
diff2=getdiff(img2)
s = np.abs(getss(diff2)-getss(diff1))
return s
|
993,070 | 92c2458c30efc626c1b94a4dd76452d8e6db19ff | """
- Enter 'a' to add a movie, 'l' to see your movies, 'f' to find a movie, and 'q' to quit:
- Add movies
- See movies
- Find a movie
- Stop running the program
Tasks:
[X]: Decide where to store movies
[X]: What is the format of a movie?
[X]: Show the user the main interface and get their input
[X]: Allow users to add movies
[X]: Show all their movies
[X]: Find a movie
[X]: Stop running the program when they type 'q'
"""
"""
{
'name': 'The Matrix',
'director': 'Wachowskis',
'year': '1994'
}
"""
movies = [
{
'name': 'The Matrix',
'director': 'Wachowskis',
'year': '1994'
},
{
'name': 'Moneyball',
'director': 'Bennett',
'year': '2011'
}
]
def menu():
user_input = input(
"Enter 'a' to add a movie, 'l' to see your movies, 'f' to find a movie, and 'q' to quit ")
while user_input != 'q':
if user_input == 'a':
add_movie()
elif user_input == 'l':
show_movies()
elif user_input == 'f':
find_movie()
else:
print('Unknown command-please try again.')
user_input = input(
"Enter 'a' to add a movie, 'l' to see your movies, 'f' to find a movie, and 'q' to quit ")
def add_movie():
name = input("Enter the movie name: ")
director = input("Enter the movie director: ")
year = input("Enter the movie release year: ")
movie = {
'name': name,
'director': director,
'year': year
}
movies.append(movie)
def show_movies():
for movie in movies:
show_movie_details(movie)
def show_movie_details(movie):
print("------")
print(f"Name: {movie['name']}")
print(f"Director: {movie['director']}")
print(f"Release year: {movie['year']}")
print("------")
def find_movie():
print('Find movie by')
print('1. Name')
print('2. Director')
print('3. Year')
options = ['name', 'director', 'year']
filter_option = input("Select a filter: ")
filter_option = int(filter_option) - 1
user_input = input("Enter a value to search: ")
if filter_option >= 0 and filter_option <= 2:
movies_result = [
movie for movie in movies if movie[options[filter_option]] == user_input]
for movie in movies_result:
show_movie_details(movie)
else:
print('Unknown command-please try again.')
menu()
|
993,071 | b2c78d538588511e14bd1bf05c0601923fc00916 | from urllib.parse import urljoin
from django.core.files.base import ContentFile
from django.core.files.storage import default_storage
from gitenberg.metadata.pandata import Pandata
from gitensite.apps.bookinfo.models import Author, Book, Cover
import requests
def addBookFromYaml(yaml):
if isinstance(yaml, Pandata):
obj = yaml.metadata
else:
pandata = Pandata()
pandata.load(yaml)
obj = pandata.metadata
(book,created) = Book.objects.get_or_create(book_id=int(obj['identifiers']['gutenberg']))
if "_repo" in obj:
book.repo_name = obj["_repo"]
if "covers" in obj:
#Delete any existing covers for this book
Cover.objects.filter(book=book).delete()
defaultCover = True
for cover in obj["covers"]:
#Upload cover to S3
url = urljoin("https://raw.githubusercontent.com/GITenberg/" + obj["_repo"] + "/master/", cover["image_path"])
r = requests.get(url)
if r.status_code == 200:
contentfile = ContentFile(r.content)
#Get image file extension from original filename
if "." in cover["image_path"]:
extension = cover["image_path"].split(".")[-1]
else:
extension = "png"
uploadpath = obj['identifiers']['gutenberg'] + "." + extension
#Add cover to database
coverobject = Cover.objects.create(
book=book,
default_cover=defaultCover
)
coverobject.file.save(uploadpath, contentfile)
coverobject.file.close()
#The first cover added will be the default cover
defaultCover = False
creator = None
if "creator" in obj:
creator = obj["creator"]
elif "metadata" in obj and "creator" in obj.metadata:
creator = obj.metadata["creator"]
if creator is not None and "author" in creator:
(author, created) = Author.objects.get_or_create(name=creator["author"]["agent_name"])
if "birthdate" in creator["author"]:
author.birth_year = creator["author"]["birthdate"]
if "deathdate" in creator["author"]:
author.death_year = creator["author"]["deathdate"]
book.author = author
author.save()
if "cover" in obj:
num_existing_covers = len(list(Cover(book=book).objects.all()))
(cover, created) = Cover.objects.get_or_create(link=obj["cover"])
cover.book = book
cover.default_cover = (num_existing_covers == 0)
book.title = obj["title"]
book.language = obj["language"] if isinstance(obj["language"], str) else 'mul'
if "description" in obj:
book.description = obj["description"]
if "gutenberg_type" in obj:
book.gutenberg_type = obj["gutenberg_type"]
elif "metadata" in obj and "gutenberg_type" in obj.metadata:
book.gutenberg_type = obj.metadata["gutenberg_type"]
bookshelf = None
if "gutenberg_bookshelf" in obj:
bookshelf = obj["gutenberg_bookshelf"]
elif "metadata" in obj and "gutenberg_bookshelf" in obj.metadata:
bookshelf = obj.metadata["gutenberg_bookshelf"]
if bookshelf is not None:
if type(bookshelf) is str:
book.gutenberg_bookshelf = bookshelf
else:
book.gutenberg_bookshelf = ";".join(bookshelf)
subjects = None
if "subjects" in obj:
subjects = obj["subjects"]
elif "metadata" in obj and "subjects" in obj.metadata:
subjects = obj.metadata["subjects"]
if subjects is not None:
if type(subjects) is str:
book.subjects = subjects
else:
if len(subjects) > 0:
if type(subjects[0]) is str:
book.subjects = ";".join(subjects)
else:
subjectList = [x[1] for x in subjects]
book.subjects = ";".join(subjectList)
#yaml can either be a Pandata object or a YAML string, we need to handle either case
if isinstance(yaml, Pandata):
book.yaml = yaml.__unicode__
else:
book.yaml = yaml
book.save()
return True |
993,072 | 7ab222d5ee57e05fad85e4d8f971aace249266d7 | from django.contrib import admin
from django.contrib.auth.models import Permission
from django.utils.safestring import mark_safe
from Index.models import Product,Product_type,Review,Order_products,Address,Promotion
from Profile.models import My_User,Financial_detail,Payment,Order
# Register your models here.
class OrderAdmin(admin.ModelAdmin):
list_display = ['user',
'ordered','status']
list_per_page = 10
admin.site.register(Product)
admin.site.register(Product_type)
admin.site.register(My_User)
admin.site.register(Financial_detail)
admin.site.register(Payment)
admin.site.register(Promotion)
admin.site.register(Order,OrderAdmin)
admin.site.register(Review)
admin.site.register(Address)
admin.site.register(Order_products)
|
993,073 | 6d8d4e4cb252124d91f0bbd570f84c003f2736ce | import numpy as np
np.random.seed(1664)
from os import listdir
import sys
import pandas as pd
from keras.preprocessing.image import img_to_array, load_img
DATASET_PATH = 'dataset/MTFL/'
HAAR_FOLDER = '/home/username/opencv/opencv-master/data/haarcascades/'
HAAR_FILE = 'haarcascade_frontalface_default.xml'
def loadTrain(filename):
data = pd.read_csv(filename, sep=' ', header=None, low_memory=False)
files = data[0] # get the first column (Id)
feat = np.genfromtxt(filename, delimiter=' ')
appearances = feat[:, len(feat[0])-4:len(feat[0])]
feat = feat[:, 1:len(feat[0])-4] # get the second until last column
return files, np.asarray(feat), np.asarray(appearances)
def resize_img(img, max_dim=40):
return img.resize((int(max_dim), int(max_dim)))
def loadImages(path,files, max_dim=40):
print('loading '+str(len(files))+' images ...')
import matplotlib.pyplot as plt
X = np.empty((len(files), max_dim, max_dim, 1))
for i in range(len(files)):
filename = path+files[i]
x = resize_img(load_img(filename, grayscale=True), max_dim=max_dim)
x = img_to_array(x)
X[i] = x
X = np.asarray(X,dtype=np.float)/255.0
return X
def isInside(px,py,w,h,xp,yp):
inside = True
for i in range(len(xp)):
if xp[i] < px or xp[i] > px+w or yp[i] < py or yp[i] > py+h:
inside = False
return inside
import os
import cv2
def face_detect(path, files, feat, labels, feat_file, sz=40):
haar_folder = HAAR_FOLDER
face_cascade = cv2.CascadeClassifier(haar_folder + HAAR_FILE)
save_loc = '/faces/'
save_dir = path+save_loc
if not os.path.exists(save_dir):
os.makedirs(save_dir)
file = open(save_dir+feat_file, 'w')
# detecting faces
for i in range(len(files)):
print("%s %i/%i" %(files[i], i, len(files)))
filename = path + files[i]
img = cv2.imread(filename)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (px,py,w,h) in faces:
# print(px,py,w,h)
roi_color = img[py:py+h, px:px+w]
# cv2.rectangle(img, (px, py), (px + w, py + h), (255, 0, 0), 2)
# for j in range(5):
# cv2.circle(img,(int(round(feat[i][j])), int(round(feat[i][j+5]))),3,(0,0,255))
if(isInside(px,py,w,h,feat[i][0:5],feat[i][5:10])):
for j in range(5):
feat[i][j] -= px
feat[i][j+5] -= py
# cv2.circle(roi_color,(int(round(feat[i][j])), int(round(feat[i][j+5]))),3,(0,0,255))
roi_color = cv2.resize(roi_color, (sz, sz))
feat[i] = feat[i] * sz / w
cv2.imwrite(save_dir+'/'+files[i],roi_color)
file.write(save_loc+files[i])
for j in range(10):
file.write(" %f" % feat[i][j])
for j in range(4):
file.write(" %i" % labels[i][j])
file.write("\n")
# for j in range(5):
# cv2.circle(roi_color,(int(round(feat[i][j])), int(round(feat[i][j+5]))),3,(0,0,255))
# cv2.imwrite("img%i.jpg" % i, roi_color)
return feat
import shutil
def pre_process(filename, save_folder, path = DATASET_PATH, max_dim = 40):
if not os.path.exists(path+save_folder):
os.makedirs(path+save_folder)
else:
shutil.rmtree(path+save_folder)
os.makedirs(path+save_folder)
file_testIdx = path + filename
print(file_testIdx)
files, feat, labels = loadTrain(file_testIdx)
feat = face_detect(path, files, feat, labels, filename, max_dim)
return 0
def load(path=DATASET_PATH, max_dim=40):
if not os.path.exists(path + 'faces'):
os.makedirs(path + 'faces')
pre_process('training_lfw.txt', 'faces/lfw_5590', max_dim=max_dim)
pre_process('training_net.txt', 'faces/net_7876', max_dim=max_dim)
pre_process('testing.txt', 'faces/AFLW', max_dim=max_dim)
filenames = [path+'/faces/training_lfw.txt', path+'/faces/training_net.txt']
with open(path +'/faces/' +'training.txt', 'w') as outfile:
for fname in filenames:
with open(fname) as infile:
for line in infile:
outfile.write(line)
file_trainIdx = path +'/faces/' +'training.txt'
file_testIdx = path +'/faces/' +'testing.txt'
# file_trainIdx = path + 'training_lfw.txt'
# file_testIdx = path + 'testing.txt'
files, feat, label = loadTrain(file_trainIdx)
imgs = loadImages(path, files, max_dim)
files_test, feat_test, label_test = loadTrain(file_testIdx)
imgs_test = loadImages(path, files_test, max_dim)
# import matplotlib.pyplot as plt
# plt.imshow(np.uint8(imgs[0].reshape(max_dim,max_dim)*255.0),cmap='gray')
# plt.show()
return imgs, feat, files , imgs_test, feat_test, files_test
|
993,074 | bd5b1b0dc772f637e26d02816aa20c0dbf3ea507 | class NltkSentenceSplitter(object):
def __init__(self):
from nltk import sent_tokenize
self._sent_tokenize = sent_tokenize
def __call__(self, text):
return self._sent_tokenize(text)
class BlingfireSentenceSplitter(object):
def __init__(self):
from blingfire import text_to_sentences
self._text_to_sentences = text_to_sentences
def __call__(self, text):
return self._text_to_sentences(text).split('\n')
class SpacySentenceSplitter(object):
def __init__(self, model_name='en_core_web_sm'):
import spacy
self.nlp = spacy.load(model_name)
def __call__(self, text):
doc = self.nlp(text)
return [sent.text for sent in doc.sents]
|
993,075 | bf068802a4fabbf5b67c5b0dfa13c514f5f9538d | import string
import random
def gen_url():
url = ''
for i in range(4):
url+= random.choice(string.ascii_letters)
return url |
993,076 | 9b4d492458102b5a80d20bdb5b722143678188ef | from django.shortcuts import render, get_object_or_404, redirect
from django.contrib.auth.decorators import login_required
from blog.models import Post, Comment
from django.utils import timezone
from blog.forms import PostForm, CommentForm
from django.contrib.auth.models import User
from django.contrib import auth
from django.contrib.auth import authenticate,login,logout
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.views.generic import (TemplateView,ListView,
DetailView,CreateView,
UpdateView,DeleteView)
from django.urls import reverse_lazy
from django.contrib.auth.mixins import LoginRequiredMixin
class AboutView(TemplateView):
template_name = 'about.html'
class PostListView(ListView):
model = Post
def get_queryset(self):
return Post.objects.filter(published_date__lte=timezone.now()).order_by('-published_date')
class PostDetailView(DetailView):
model = Post
class CreatePostView(LoginRequiredMixin,CreateView):
login_url = '/login/'
redirect_field_name = 'blog/post_detail.html'
form_class = PostForm
model = Post
class PostUpdateView(LoginRequiredMixin,UpdateView):
login_url = '/login/'
redirect_field_name = 'blog/post_detail.html'
form_class = PostForm
model = Post
class DraftListView(LoginRequiredMixin,ListView):
login_url = '/login/'
redirect_field_name = 'blog/post_list.html'
model = Post
def get_queryset(self):
return Post.objects.filter(published_date__isnull=True).order_by('created_date')
class PostDeleteView(LoginRequiredMixin,DeleteView):
model = Post
success_url = reverse_lazy('post_list')
def signup(request):
if request.method == "POST":
if request.POST['password1'] == request.POST['password2']:
user = User.objects.filter(username = request.POST['username'])
if user:
return render(request,'signup.html',{'error' : '*Username already exist'})
else:
user = User.objects.create_user(username = request.POST['username'],password = request.POST['password1'])
auth.login(request,user,backend='django.contrib.auth.backends.ModelBackend')
return render(request,'post_list.html')
else:
return render(request,'signup.html',{'error':'*Passwords must match'})
else:
return render(request,'signup.html')
def passwordreset(request):
if request.method == "POST":
if request.POST['password1'] == request.POST['password2']:
u = User.objects.get(username__exact=request.user.username)
u.set_password(request.POST['password1'])
u.save()
return render(request,'post_list.html')
else:
return render(request,'passwordreset.html',{'error':'*Passwords must match'})
else:
return render(request,'passwordreset.html')
#######################################
## Functions that require a pk match ##
#######################################
@login_required
def post_publish(request, pk):
post = get_object_or_404(Post, pk=pk)
post.publish()
return redirect('post_detail', pk=pk)
@login_required
def add_comment_to_post(request, pk):
post = get_object_or_404(Post, pk=pk)
if request.method == "POST":
form = CommentForm(request.POST)
if form.is_valid():
comment = form.save(commit=False)
comment.post = post
comment.save()
return redirect('post_detail', pk=post.pk)
else:
form = CommentForm()
return render(request, 'blog/comment_form.html', {'form': form})
@login_required
def comment_approve(request, pk):
comment = get_object_or_404(Comment, pk=pk)
comment.approve()
return redirect('post_detail', pk=comment.post.pk)
@login_required
def comment_remove(request, pk):
comment = get_object_or_404(Comment, pk=pk)
post_pk = comment.post.pk
comment.delete()
return redirect('post_detail', pk=post_pk)
def user_login(request):
if request.method == 'POST':
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(username=username,password=password)
if user:
if user.is_active:
login(request,user)
request.session['member_name'] = username
print("Hello World Done")
return HttpResponseRedirect(reverse('dashboard'))
return render(request,'registration/user_login.html')
def dashboard(request):
return render(request,'registration/dashboard.html')
@login_required
def userdetials(request,pk):
if request.method == "POST":
user = User.objects.get(pk=pk)
user.profile.photo = request.FILES['photo']
user.save()
return render(request,'blog/post_list.html')
else:
user = get_object_or_404(User,id=pk)
return render(request,'blog/user_details.html',{'user':user})
|
993,077 | 6dfc8e4edf5a3169205953a9a2c45ce379880a1c | A = int(input())
B = int(input())
C = [i for i in range(1, 4) if i != A and i != B]
print(*C) |
993,078 | 771737853fa5cade35531cb7d2824359343b62d8 | # License: BSD
# Author: Sasank Chilamkurthy
from __future__ import print_function, division
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
import time
import os
import copy
import math
import torch.nn as nn
class Crack(nn.Module):
def __init__(self, Crack_cfg):
super(Crack, self).__init__()
self.features = self._make_layers(Crack_cfg)
# linear layer
# self.classifier = nn.Linear(512, 10)
# self.linear1 = nn.Linear(32*6*6,64)
# self.linear2 = nn.Linear(64,64)
# self.linear3 = nn.Linear(64,25)
self.classifier = self.make_classifier()
def make_classifier(self):
classifier = []
classifier += [nn.Linear(32*6*6,64),nn.ReLU(inplace=True),nn.Dropout(p=0.5)]
classifier += [nn.Linear(64,64),nn.ReLU(inplace=True),nn.Dropout(p=0.5)]
classifier += [nn.Linear(64,2)]
return nn.Sequential(*classifier)
def forward(self, x):
out = self.features(x)
out = out.view(out.size(0), -1)
out = self.classifier(out)
# out = self.linear1(out)
# out = self.linear2(out)
# out = self.linear3(out)
return out
def _make_layers(self, cfg):
"""
cfg: a list define layers this layer contains
'M': MaxPool, number: Conv2d(out_channels=number) -> BN -> ReLU
"""
layers = []
in_channels = 3
for x in cfg:
if x == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
layers += [nn.Conv2d(in_channels, x, kernel_size=3, padding=1),
nn.BatchNorm2d(x),
nn.ReLU(inplace=True)]
in_channels = x
# layers += [nn.AvgPool2d(kernel_size=1, stride=1)]
return nn.Sequential(*layers)
Crack_cfg = {
'Crack11':[16,16,'M',32,32,'M']
}
model_t = Crack(Crack_cfg['Crack11']);
plt.ion() # interactive mode
# Data augmentation and normalization for training
# Just normalization for validation
data_transforms = {
'train' : transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5,0.5,0.5), (0.5,0.5,0.5))]),
"val" : transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5,0.5,0.5), (0.5,0.5,0.5))])
}
data_dir = 'I:\\1裂缝检测\\CrackForest-dataset\\trian\\train2\\'
image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x),
data_transforms[x])
for x in ['train', 'val']}
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=256,
shuffle=True)
for x in ['train', 'val']}
dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']}
class_names = image_datasets['train'].classes
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)
def imshow(inp, title=None):
"""Imshow for Tensor."""
inp = inp.numpy().transpose((1, 2, 0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
plt.imshow(inp)
if title is not None:
plt.title(title)
plt.pause(0.001) # pause a bit so that plots are updated
# Get a batch of training data
inputs, classes = next(iter(dataloaders['train']))
# Make a grid from batch
out = torchvision.utils.make_grid(inputs)
# imshow(out, title=[class_names[x] for x in classes])
def train_model(model, criterion, optimizer, scheduler, num_epochs=25):
since = time.time()
# 这里可能是使用了模型库里面的模型?!
best_model_wts = copy.deepcopy(model.state_dict())
# best_acc = 0.0
best_pr = 0.0
best_re = 0.0
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
scheduler.step()
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
L0 = 0
P0 = 0
P_neq = 0
# Iterate over data.
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
#L0 = TP + FN,即原本的裂缝数据,包括真裂缝和假非裂缝
L0 += torch.sum(labels == 0)
#P0 = TP+FP ,即预测为裂缝的数据,包括真裂缝和假裂缝
P0 += torch.sum(preds == 0)
#P1 = FN+TN,即预测为非裂缝的数据,包括真非裂缝和假非裂缝
P_neq += torch.sum(preds != labels)
# print(L0.numpy().size,P0.numpy().size,P1.numpy().size)
t = L0+P0-P_neq
print(t)
print(2*P0)
print(2*L0)
#经计算,TP = (L0+P0-P1)/2
#则PR = (L0+PO-P1)/(2*P0)
PRECISE = t.float()/(2*P0)
#RE = (L0+P0-P1)/(2*L0),即原本的裂缝数据,有多少被检测出来了
RECALL = t.float()/(2*L0)
epoch_loss = running_loss / dataset_sizes[phase]
# epoch_acc = running_corrects.double() / dataset_sizes[phase]
print('{} Loss: {:.4f} PRE: {:.4f} REC: {:.4f}'.format(
phase, epoch_loss, PRECISE,RECALL))
# deep copy the model
if phase == 'val' and (PRECISE >= best_pr or RECALL >= best_re):
best_re = RECALL
best_pr = PRECISE
best_model_wts = copy.deepcopy(model.state_dict())
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_re))
# load best model weights
model.load_state_dict(best_model_wts)
return model
best_pr = 0.0
best_re = 0.0
def train_model_25(model, criterion, optimizer, scheduler, num_epochs=25):
since = time.time()
# 这里可能是使用了模型库里面的模型?!
best_model_wts = copy.deepcopy(model.state_dict())
# best_acc = 0.0
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
scheduler.step()
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
L0 = 0
P0 = 0
P_neq = 0
# Iterate over data.
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
# print(outputs.data.numpy().shape)
temp = np.array(outputs.data.numpy())
# print(np.sum(t[0]),np.sum(t[20]))
preds = []
for x in temp:
preds.append(np.sum(x))
preds = np.array(preds)
preds = (preds - np.min(preds))/(np.max(preds)-np.min(preds))
preds[preds>= 0.5] = 1
preds[preds < 0.5] = 0
# _, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
labels = np.array(labels)
#L0 = TP + FN,即原本的裂缝数据,包括真裂缝和假非裂缝
L0 += np.sum(labels == 0)
#P0 = TP+FP ,即预测为裂缝的数据,包括真裂缝和假裂缝
P0 += np.sum(preds == 0)
#P1 = FN+TN,即预测为非裂缝的数据,包括真非裂缝和假非裂缝
P_neq += np.sum(preds != labels)
# print(L0.numpy().size,P0.numpy().size,P1.numpy().size)
t = L0+P0-P_neq
print(t)
print(2*P0)
print(2*L0)
#经计算,TP = (L0+P0-P1)/2
#则PR = (L0+PO-P1)/(2*P0)
PRECISE = t.float()/(2*P0)
#RE = (L0+P0-P1)/(2*L0),即原本的裂缝数据,有多少被检测出来了
RECALL = t.float()/(2*L0)
epoch_loss = running_loss / dataset_sizes[phase]
# epoch_acc = running_corrects.double() / dataset_sizes[phase]
print('{} Loss: {:.4f} PRE: {:.4f} REC: {:.4f}'.format(
phase, epoch_loss, PRECISE,RECALL))
# deep copy the model
if phase == 'val' and (PRECISE >= best_pr or RECALL >= best_re):
best_re = RECALL
best_pr = PRECISE
best_model_wts = copy.deepcopy(model.state_dict())
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_re))
# load best model weights
model.load_state_dict(best_model_wts)
return model
Crack_cfg = {
'Crack11':[16,16,'M',32,32,'M']
}
model_ft = Crack(Crack_cfg['Crack11']);
model_ft = model_ft.to(device)
criterion = nn.CrossEntropyLoss()
# Observe that all parameters are being optimized
optimizer_ft = optim.Adam(model_ft.parameters(), lr=0.001)
# Decay LR by a factor of 0.1 every 7 epochs 退火
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=10, gamma=0.1)
model_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler,
num_epochs=25)
model_ft = model_ft.to('cpu')
torch.save(model_ft.cpu().state_dict(),"./crackre{:.4f}pr{:.4f}.pt".format(best_re,best_pr))
# torch.save(model.cpu().state_dict(),"./crackre{:.4f}pr{:.4f}.pt".format(best_re,best_pr)) |
993,079 | 624fbbfe2c95746be39011acb9fe2ea4b71d8165 | import seed
from utils import *
from models import *
from preprocessing import Preprocessing
X, y, files = load_data(patches=True)
X = X - 0.5
pre = Preprocessing(standardize=False, samplewise=False)
X_train, X_valid, y_train, y_valid = pre.split_data(
X, y, test_size=0.1, shuffle=True)
conf = Config(epochs=1000, patience=50,
use_class_weights=True, batch_size=2000)
basic_fcn = BasicFCN(config=conf)
basic_fcn.train(X_train, y_train, X_valid, y_valid)
# Prediction
X_test, test_files = load_tests()
# X_test2 = pre.transform(X_test)
X_test = X_test - 0.5
X_pred = basic_fcn.predict(X_test, test_files)
vis_pred(X_test, X_pred, last_n=False, img_sz=608)
# submission
# dir_path = "runs/BasicFCN_20190626-095011/predictions"
dir_path = os.path.join(basic_fcn.model_dir, basic_fcn.config.pred_dir)
pattern = os.path.join(dir_path, "test_*.png")
submission_filename = os.path.join(dir_path, "submission.csv")
image_filenames = glob.glob(pattern)
for image_filename in image_filenames:
print(image_filename)
masks_to_submission(submission_filename, *image_filenames)
|
993,080 | 33f7a793f5b2c3b585cd9d64cb6d3694522949b8 |
"""lab05_elif.py
This program reads a temperature in fahrenheit from
the keyboard, converts it to centigrade and prints the results.
Then it prints a description of the conditions outside. Note,
in class we discussed why this code is considered somewhat sloppy.
"""
temp = input('Enter a temperature: ')
ftemp = float(temp)
ctemp = 5.0 / 9.0 * (ftemp - 32)
print('{0:.1f} degrees Fahrenheit is {1:.1f} degrees Centigrade'.format(
ftemp, ctemp))
if ftemp > 95:
print("It's very hot!")
elif ftemp > 80:
print("It's hot.")
elif ftemp > 60:
print("It's nice out.")
elif ftemp > 40:
print("It's chilly,")
else:
print("It's cold!")
|
993,081 | 338488503ffe8727e39ef50f1ac62cd7b6a94e53 | class Employee():
def __init__(self,first,last,salary):
self.first=first
self.last=last
self.salary=salary
def give_raise(self,incre=5000):
self.salary=self.salary+incre
import unittest
class TestEmployee(unittest.TestCase):
def setUp(self):
self.my_emp=Employee("jak","lee",100000)
self.fi="jak"
self.la="lee"
self.sa=105000
self.sal=110000
def test_give_default_raise(self):
self.my_emp.give_raise()
self.assertEqual(self.my_emp.salary,self.sa)
def test_give_custom_raise(self):
self.my_emp.give_raise(10000)
self.assertEqual(self.my_emp.salary,self.sal)
print("..........................6-4.......................")
dic_table={"for":"loop_1","while":"loop_2","in":"loop_in ","with":"open file use","as":"to be"}
dic_table["if"]="assume"
dic_table["else"]="another aspect"
dic_table["class"]="define class"
dic_table["def"]="define"
dic_table["and"]="2 condition"
for key,value in dic_table.items():
print(key + ": " + value)
print(".........................9-13.............................")
from collections import OrderedDict
dic_table=OrderedDict()
dic_table["for"]="loop_1"
dic_table["while"]="loop_2"
dic_table["in"]="loop_in"
dic_table["with"]="open file use"
dic_table["as"]="to be"
dic_table["if"]="assume"
dic_table["else"]="another aspect"
dic_table["class"]="define class"
dic_table["def"]="define"
dic_table["and"]="loop_1"
for key,value in dic_table.items():
print(key + ": " + value)
print(".........................9-14.............................")
from random import randint
class Die():
def __init__(self,sides=6):
self.sides=sides
def roll_die(self):
print(randint(1,sides))
the_die=Die()
the_die.roll_die()
|
993,082 | 5c191e0c87a43d09cbcd7f26e19a94e63013da1f | import link
link_name = "GitHub Syllabus Repository"
user_text = "GitHub Syllabus Repository"
url = "https://github.com/MarkHoeber/tools-tech-syllabus"
link.xref_links.update({link_name: (user_text, url)}) |
993,083 | 83cc83ade56aab8f51e06d16fe47161a107c3fd0 | #!/usr/bin/python
### BRAND NEW VERSION. CONTAINS MATERIAL FROM LEONARDO SALA AND MARCO-ANDREA BUCHMANN
import sys; sys.path.append('/usr/lib/root')
from sys import argv,exit
from os import popen
import re
import ROOT
from math import sqrt,ceil
from array import array
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
def disable(self):
self.HEADER = ''
self.OKBLUE = ''
self.OKGREEN = ''
self.WARNING = ''
self.FAIL = ''
self.ENDC = ''
### needed to keep objects in memory
myMultiGraphs = {}
myMultiGraphsMultiJob = {}
def makeabstracthisto1d2d3d(command,samplename,tree,c1,whichjob) :
c1.cd()
repcommand=command.replace(":","____")
addendum=""
drawoption=""
if(command.find(":")==-1 and whichjob==-1) :
# one dimensional draw command (standard)
tree.Draw(command+">>h_"+samplename+"_"+repcommand)
tempobject = ROOT.gPad.GetPrimitive(("h_"+samplename+"_"+repcommand+addendum));
else :
# print "dealing with a wn quantity for sample"+samplename
if (whichjob!=-1) :
#dealing with a WN quantity
drawoption="Entry$=="+str(whichjob)
addendum="__"+str(whichjob)
if (command.find(":")==-1):
#it's possible that the user already specified to draw the WN quantity against WN_net_Seconds so we don't want to do this once more
command=command+":WN_net_Seconds"
tree.Draw(command+">>h_"+samplename+"_"+repcommand+addendum,drawoption,"l")
tempobjecta = c1.cd().GetPrimitive("Graph");
tempobject = tempobjecta.Clone("g_"+samplename+"_"+repcommand+addendum)
#in the case of WN diagrams we really want the GRAPH and not the TH2F which just gives the area.
else :
#not dealing with a WN quantity but with a multidimensional canvas ...
#print bcolors.WARNING+" drawing something multidimensional - is this working?"+bcolors.ENDC
tree.Draw(command+">>h_"+samplename+"_"+repcommand+addendum,drawoption)
tempobjecta = c1.cd().GetPrimitive("Graph");
#tempobject = c1.cd().GetPrimitive(("h_"+samplename+"_"+repcommand+addendum));
tempobject = tempobjecta.Clone("g_"+samplename+"_"+repcommand+addendum)
return tempobject
#def REMOVE_makeabstracthisto(command,samplename,tree,c1,whichjob) :
# ### REMOVE THIS IF NOT NEEDED ANYMORE!
# c1.cd()
# repcommand=command.replace(":","____")
# addendum=""
# if(whichjob==-1) :
# tree.Draw(command+">>h_"+samplename+"_"+repcommand)
# else :
## print "dealing with a wn quantity for sample"+samplename
# drawoption="Entry$=="+str(whichjob)
# addendum="__"+str(whichjob)
# command=command+":WN_net_Seconds"
# tree.Draw(command+">>h_"+samplename+"_"+repcommand+addendum,drawoption,"l")
# print "just drawn command"+command+" with drawoption "+drawoption
# c1.SaveAs("exp/"+samplename+"_"+repcommand+"__"+str(whichjob)+".png")
#
# tempobject = ROOT.gPad.GetPrimitive(("h_"+samplename+"_"+repcommand+addendum));
# print tempobject.GetEntries()
# return tempobject
def makehisto(branch,quantity,samplename,tree,c1) :
c1.cd()
tree.Draw((branch.GetName()+">>h_"+samplename+"_"+quantity))
tempobject = ROOT.gPad.GetPrimitive(("h_"+samplename+"_"+quantity));
return tempobject
def retrieve_info_from_tree(configobjectname,infotree) :
temphisto = ROOT.TH1F("histo","histo",100,0,100)
drawstring=configobjectname+">>histo"
infotree.Draw(drawstring)
returnname=temphisto.GetXaxis().GetBinLabel(1)
print configobjectname, returnname
if(configobjectname=="nevents"): returnname=int(returnname.replace("ne",""))
if(configobjectname=="njobs"): returnname=int(returnname.replace("nj",""))
return returnname
def distribute_nPads(nPads,nPadsxy) :
#we are trying to create a quadratic canvas
a=sqrt(nPads)
b=sqrt(nPads)
nPadsxy[0]=0;
nPadsxy[1]=0;
difference=0.00;
while((a-difference)>(int(a)) and (a+difference)<(int(a)+1)) :
difference+=0.05
# print "difference is atm : "+str(difference) + "so that a-difference is "+str(a-difference)+" and a+diff="+str(a+difference)
if((a-difference)<(int(a))) :
a=int(a)+1
else:
a=int(a)
b=int(ceil(float(nPads)/a))
#return str(a)+" : "+str(ceil(float(nPads)/a))
if a>=b :
nPadsxy[0]=a
nPadsxy[1]=b
if b>a :
nPadsxy[0]=b
nPadsxy[1]=a
def getinfoabout(filename,configuration) :
infocanvas = ROOT.TCanvas("infocanvas","infocanvas")
file = ROOT.TFile(filename)
info_tree = file.Get("info_tree")
configuration["config"]=retrieve_info_from_tree("config",info_tree);
configuration["eventsJob"]=retrieve_info_from_tree("eventsJob",info_tree);
configuration["njobs"]=retrieve_info_from_tree("njobs",info_tree);
configuration["sw"]=retrieve_info_from_tree("sw",info_tree);
configuration["site"]=retrieve_info_from_tree("site",info_tree);
configuration["timestamp"]=retrieve_info_from_tree("timestamp",info_tree);
def print_config(fileList,show=True) :
filecounter=0
for file in fileList :
configuration={}
filecounter+=1
getinfoabout(file,configuration)
print ""
print bcolors.OKGREEN+"File "+str(filecounter)+" : "+file+bcolors.ENDC
print "Configuration (key=config): "+configuration["config"]
print "Number of Events (key=eventsJob): "+str(configuration["eventsJob"])
print "Number of Jobs (key=njobs): "+str(configuration["njobs"])
print "Sw (key=sw): "+configuration["sw"]
print "Site (key=site): "+configuration["site"]
print "Time Stamp:"+configuration["timestamp"]
print ""
def get_nPads(fileList,nPadarray) :
maxnpad=0
for file in fileList :
configuration={}
getinfoabout(file,configuration)
nPadarray[file]=configuration["njobs"]
if nPadarray[file] > maxnpad :
maxnpad=nPadarray[file]
return maxnpad
def givenicecolor(index,setthis) :
rindex=index%6
mindex=int(index/6)
# print "give nice color has been called with index="+str(index)+" so we have rindex="+str(rindex)+" (index%6) and mindex="+str(mindex)+" (index/6)"
color=""
if(rindex==0) : color = ROOT.TColor.GetColor("#2E2EFE") #BLUE
if(rindex==1) : color = ROOT.TColor.GetColor("#81F781") #GREEN
if(rindex==2) : color = ROOT.TColor.GetColor("#BE81F7") #PURPLE
if(rindex==3) : color = ROOT.TColor.GetColor("#FA5858") #RED
if(rindex==4) : color = ROOT.TColor.GetColor("#D7DF01") #YELLOWISH
if(rindex==5) : color = ROOT.TColor.GetColor("#848484") #GRAY
setthis["linestyle"]=mindex #this is the line style to be used. usually zero.
setthis["color"]=color
def searchfor(keyword,firstfile,quiet=True) :
if quiet!=True :
print "Searching for keyword",keyword,"in",firstfile
searchhere = ROOT.TFile(firstfile)
newtree=searchhere.Get("Performance")
BranchList=newtree.GetListOfBranches()
totalhits=0
collection = []
for branch in BranchList:
branchname=branch.GetName()
if(branchname.find(keyword)!=-1):
totalhits+=1
if quiet != True :
print "Have found a match (#"+str(totalhits)+") :",branchname[:branchname.find(keyword)]+bcolors.OKGREEN+branchname[branchname.find(keyword):branchname.find(keyword)+len(keyword)]+bcolors.ENDC+branchname[branchname.find(keyword)+len(keyword):]
collection.append(branchname)
if totalhits == 0 :
if quiet != True:
print bcolors.FAIL+"There were no results for your keyword ("+keyword+"). Please try again with a different keyword or try to be less specific"+bcolors.ENDC
return collection
def compute_dimension(variable):
if variable.find(":")==-1:
return 1 #one dimensional variable
else :
if (variable[variable.find(":")+1:].find(":"))==-1:
#the second part (after the :) doesn't contain any : i.e. the expression is two dimensional!
return 2 #two dimensional variable
else:
redvar=variable[variable.find(":")+1:] #for 3 D this still contains a ":"
if (redvar[redvar.find(":")+1:].find(":"))!=-1:
print bcolors.WARNING+"Warning: One of the requested draw commands,"+bcolors.FAIL+variable+bcolors.WARNING+", seems to contain more than 3 dimensions. Given our current computer standards, that's too much to expect. Variable rejected."+bcolors.ENDC
return 4
else:
return 3
### This function will return all "candidates", that is, all variables that correspond to a given search keyword (and their combinations in case of 2D/3D)
def compute_all_candidates(currentvar,filename) :
variabledimension=compute_dimension(currentvar)
if variabledimension==1:
candidates=searchfor(currentvar,filename[0],True)
if variabledimension==2:
candidates=[]
vara=currentvar[:currentvar.find(":")]
varb=currentvar[currentvar.find(":")+1:]
candidatesa=searchfor(vara,filename[0],True);
candidatesb=searchfor(varb,filename[0],True);
for candidatea in candidatesa:
for candidateb in candidatesb:
candidate=candidatea+":"+candidateb
candidates.append(candidate)
if variabledimension==3:
candidates=[]
vara=currentvar[:currentvar.find(":")]
varb=currentvar[currentvar.find(":")+1:]
varc=varb[varb.find(":")+1:] ## this is not a typo!
varb=varb[:varb.find(":")] ## this is not a typo!
candidatesa=searchfor(vara,filename[0],True);
candidatesb=searchfor(varb,filename[0],True);
candidatesc=searchfor(varc,filename[0],True);
for candidatea in candidatesa:
for candidateb in candidatesb:
for candidatec in candidatesc:
candidate=candidatea+":"+candidateb+":"+candidatec
candidates.append(candidate)
if variabledimension==4: ## there are no candidates (obviously)
return []
if len(candidates) == 0 and variabledimension<4:
print bcolors.WARNING+"Warning: One of the requested draw commands,"+bcolors.FAIL+currentvar+bcolors.WARNING+", contains a quantity that cannot be found in the files. Please modify it and try again (or you can use --search to look for it)."+bcolors.ENDC
return candidates
if len(candidates) >= 10:
print bcolors.WARNING+"Warning: One of the requested draw commands,"+bcolors.FAIL+currentvar+bcolors.WARNING+", contains a quantity that cannot be found in the files. Please modify it and try again (or you can use --search to look for it)."+bcolors.ENDC
emptycandidates = []
return emptycandidates
if len(candidates) < 10 and len(candidates)>0 :
# print bcolors.OKGREEN+"The draw command,"+bcolors.OKBLUE+currentvar+bcolors.OKGREEN+", was added succesfully to the list of plots to be created."+bcolors.ENDC
return candidates
def new_compute_all_candidates(currentvar,filename,oldcandidates) :
variabledimension=compute_dimension(currentvar)
if variabledimension==1:
candidates=searchfor(currentvar,filename[0],True)
if variabledimension==2:
candidates=[]
vara=currentvar[:currentvar.find(":")]
varb=currentvar[currentvar.find(":")+1:]
candidatesa=searchfor(vara,filename[0],True);
candidatesb=searchfor(varb,filename[0],True);
for candidatea in candidatesa:
for candidateb in candidatesb:
candidate=candidatea+":"+candidateb
candidates.append(candidate)
if variabledimension==3:
candidates=[]
vara=currentvar[:currentvar.find(":")]
varb=currentvar[currentvar.find(":")+1:]
varc=varb[varb.find(":")+1:] ## this is not a typo!
varb=varb[:varb.find(":")] ## this is not a typo!
candidatesa=searchfor(vara,filename[0],True);
candidatesb=searchfor(varb,filename[0],True);
candidatesc=searchfor(varc,filename[0],True);
for candidatea in candidatesa:
for candidateb in candidatesb:
for candidatec in candidatesc:
candidate=candidatea+":"+candidateb+":"+candidatec
candidates.append(candidate)
if variabledimension>3: ## there are no candidates (obviously)
print bcolors.WARNING+"Warning: You seem to have requested a "+str(variabledimension)+"-dimensional draw command,"+bcolors.FAIL+currentvar+bcolors.WARNING+"; this is outside the scope of this script, sorry."+bcolors.ENDC
return []
if len(candidates) == 0 and variabledimension<4:
print bcolors.WARNING+"Warning: One of the requested draw commands,"+bcolors.FAIL+currentvar+bcolors.WARNING+", contains a quantity that cannot be found in the files. Please modify it and try again (or you can use --search to look for it)."+bcolors.ENDC
return candidates
if len(candidates) >= 10:
print bcolors.WARNING+"Warning: One of the requested draw commands,"+bcolors.FAIL+currentvar+bcolors.WARNING+", contains a quantity that cannot be found in the files. Please modify it and try again (or you can use --search to look for it)."+bcolors.ENDC
emptycandidates = []
return emptycandidates
if len(candidates) < 10 and len(candidates)>0 :
print bcolors.OKGREEN+"The draw command,"+bcolors.OKBLUE+currentvar+bcolors.OKGREEN+", was added succesfully to the list of plots to be created."+bcolors.ENDC
for candidate in candidates:
oldcandidates.append(candidate)
# return candidates
def create_legend(QUANT,histos,fileList,fileinformation,i) :
leg = ROOT.TLegend(0.7,0.7,0.85,0.85)
leg.SetFillColor(10)
leg.SetLineColor(10)
leg.SetLineWidth(10)
### decide which label to use in the legend
labelKey = ""
nFiles = len(fileList)
if nFiles>1:
if fileinformation[ fileList[0] ].has_key("site"):
if fileinformation[ fileList[0] ]["site"] != fileinformation[ fileList[1] ]["site"]: labelKey="site"
if fileinformation[ fileList[0] ].has_key("config") and labelKey=="":
if fileinformation[ fileList[0] ]["config"] != fileinformation[ fileList[1] ]["config"]: labelKey="config"
for filen in fileList:
if labelKey == "": myLabel = fileList[0]
else: myLabel = fileinformation[filen][labelKey]
SAMPLE = filen[filen.find("SAMPLE")+len("SAMPLE")+1:]
if (i==-1) :
#case: not WN
leg.AddEntry(histos[QUANT][SAMPLE], myLabel,"l")
else :
#case: WN
leg.AddEntry(histos[QUANT][SAMPLE][i], myLabel,"l")
if labelKey=="":
leg.SetHeader("file name")
else: leg.SetHeader( labelKey )
return leg
def draw_multiple(fileList,canvases,candidate,nPads,histos,i) :
xmax=-1;
xmin=-1;
ymax=-1;
ymin=-1;
canvases[candidate].cd(i+1)
filecounter=0
if not myMultiGraphsMultiJob.has_key(candidate): myMultiGraphsMultiJob[candidate] = []
myMultiGraphsMultiJob[candidate].append( ROOT.TMultiGraph() )
for file in fileList :
SAMPLE = file[file.find("SAMPLE")+len("SAMPLE")+1:]
QUANT=candidate
if not isinstance( histos[QUANT][SAMPLE][i] , ROOT.TGraph):
printError(candidate+" is not a TGraph, I don't know how to handle it")
colorconfiguration={}
givenicecolor(filecounter,colorconfiguration)
filecounter+=1
histos[QUANT][SAMPLE][i].SetLineStyle(colorconfiguration["linestyle"])
histos[QUANT][SAMPLE][i].SetLineColor(colorconfiguration["color"])
histos[QUANT][SAMPLE][i].SetMarkerColor(colorconfiguration["color"])
if file==fileList[0]:
ymin=histos[QUANT][SAMPLE][i].GetYaxis().GetXmin()
ymax=histos[QUANT][SAMPLE][i].GetYaxis().GetXmax()
xmax=histos[QUANT][SAMPLE][i].GetXaxis().GetXmax()
xmin=histos[QUANT][SAMPLE][i].GetXaxis().GetXmin()
else :
if histos[QUANT][SAMPLE][i].GetYaxis().GetXmin()<ymin:
ymin=histos[QUANT][SAMPLE][i].GetYaxis().GetXmin()
if histos[QUANT][SAMPLE][i].GetYaxis().GetXmax()>ymax:
ymax=histos[QUANT][SAMPLE][i].GetYaxis().GetXmax()
if histos[QUANT][SAMPLE][i].GetXaxis().GetXmin()<xmin:
xmin=histos[QUANT][SAMPLE][i].GetXaxis().GetXmin()
if histos[QUANT][SAMPLE][i].GetXaxis().GetXmax()>xmax:
xmax=histos[QUANT][SAMPLE][i].GetXaxis().GetXmax()
for file in fileList :
SAMPLE = file[file.find("SAMPLE")+len("SAMPLE")+1:]
QUANT=candidate
myMultiGraphsMultiJob[candidate][i].Add(histos[QUANT][SAMPLE][i])
if file==fileList[0]:
pad2d=ROOT.TH2F("pad2d_"+QUANT+"_"+SAMPLE+"_pad"+str(i),"pad2d_"+QUANT+"_"+SAMPLE+"_pad"+str(i),100,xmin,xmax,100,ymin,ymax)
pad2d.Draw()
if len(candidate.split(":")) ==2:
xLabel = candidate.split(":")[1]
yLabel = candidate.split(":")[0]
else:
xLabel = ""
yLabel = candidate
myMultiGraphsMultiJob[candidate][i].Draw("al")
myMultiGraphsMultiJob[candidate][i].GetXaxis().SetTitle(xLabel)
myMultiGraphsMultiJob[candidate][i].GetYaxis().SetTitle(yLabel)
#myMultiGraphsMultiJob[candidate][i].Draw("al")
# canvases[candidate].SaveAs("exp/pads/test_pad_"+str(i+1)+".png")
def draw_one(fileList,canvases,candidate,nPads,histos ) :
xmax=-1;
xmin=-1;
ymax=-1;
ymin=-1;
#rangeHisto = ROOT.TH1F("rangeHisto","",100,0,1000)
fileNumber = 0
canvases[candidate].cd()
filecounter=0
isTH1F = False
isTGraph = False
QUANT=candidate
for file in fileList :
SAMPLE = file[file.find("SAMPLE")+len("SAMPLE")+1:]
fIsTH1F = False
fIsTGraph = False
if isinstance( histos[QUANT][SAMPLE] , ROOT.TH1F): isTH1F=True
elif isinstance( histos[QUANT][SAMPLE] , ROOT.TGraph): isTGraph=True
colorconfiguration={}
givenicecolor(filecounter,colorconfiguration)
filecounter+=1
histos[QUANT][SAMPLE].SetLineStyle(colorconfiguration["linestyle"])
histos[QUANT][SAMPLE].SetLineColor(colorconfiguration["color"])
histos[QUANT][SAMPLE].SetMarkerColor(colorconfiguration["color"])
if not isTH1F: continue
if file==fileList[0]:
xmax=histos[QUANT][SAMPLE].GetXaxis().GetXmax()
xmin=histos[QUANT][SAMPLE].GetXaxis().GetXmin()
else:
if histos[QUANT][SAMPLE].GetXaxis().GetXmax()>xmax:
xmax=histos[QUANT][SAMPLE].GetXaxis().GetXmax()
if histos[QUANT][SAMPLE].GetXaxis().GetXmin()<xmin:
xmin=histos[QUANT][SAMPLE].GetXaxis().GetXmin()
ROOT.gStyle.SetOptStat(0)
for file in fileList :
if not isTH1F: break
SAMPLE = file[file.find("SAMPLE")+len("SAMPLE")+1:]
QUANT=candidate
histos[QUANT][SAMPLE].SetBit(ROOT.TH1.kCanRebin);
histos[QUANT][SAMPLE].Fill(0.8*xmin,0)
histos[QUANT][SAMPLE].Fill(1.2*xmax,0)
if file==fileList[0]:
ymin=histos[QUANT][SAMPLE].GetMinimum()
ymax=histos[QUANT][SAMPLE].GetMaximum()
else:
if histos[QUANT][SAMPLE].GetMinimum()<ymin:
ymin=histos[QUANT][SAMPLE].GetMinimum()
if histos[QUANT][SAMPLE].GetMaximum()>ymax:
ymax=histos[QUANT][SAMPLE].GetMaximum()
if isTGraph:
myMultiGraphs[candidate] = ROOT.TMultiGraph()
for file in fileList :
SAMPLE = file[file.find("SAMPLE")+len("SAMPLE")+1:]
QUANT=candidate
if isTH1F:
histos[QUANT][SAMPLE].GetXaxis().SetRangeUser(0.8*xmin,1.2*xmax)
histos[QUANT][SAMPLE].GetYaxis().SetRangeUser(0.8*ymin,1.2*ymax)
if file==fileList[0]:
histos[QUANT][SAMPLE].Draw("")
printDevel( SAMPLE+" "+str(histos[QUANT][SAMPLE].GetNbinsX()) )
else:
### FIXME: update for multiple files, as above
histos[QUANT][SAMPLE].Draw("same")
printDevel( SAMPLE+" "+str(histos[QUANT][SAMPLE].GetNbinsX()) )
elif isTGraph:
myMultiGraphs[candidate].Add( histos[QUANT][SAMPLE] )
else:
printWarning(QUANT+" is not a TH1F nor a TGraph, doing nothing...")
if isTGraph:
#myMultiGraphs[candidate].GetXaxis().SetTitle("TEST")
xLabel = QUANT.split(":")[1]
yLabel = QUANT.split(":")[0]
myMultiGraphs[candidate].Draw("A*")
myMultiGraphs[candidate].GetXaxis().SetTitle(xLabel)
myMultiGraphs[candidate].GetYaxis().SetTitle(yLabel)
canvases[candidate].Update()
###########################################
#### The main routine
def processallvariables(variables,variablecollection,filename,performancetree,histos,canvases,fileinformation,legend,stats,
#savepng,saveroot,dosummary,
options,outfile) :
####adds all variables containing the substring to our plotting collection
ROOT.gROOT.SetStyle("Plain")
#c1 = ROOT.TCanvas("c1","c1")
candidates=[]
plotcanvas=ROOT.TCanvas("plot_canvas","plot_canvas")
for currentvar in variables:
print "Called for currentvar="+str(currentvar)+"; canvas currently contains "+str(len(canvases))
new_compute_all_candidates(currentvar,filename,candidates)
candcounter=0
##at this point we've "collected" all the variables that we'd like to draw and are ready for showtime !
if not len(candidates) == 0:
for candidate in candidates:
if candidate == "Error_weight" :
continue ## error weight is just basically the number of times a specific error code was encountered -> only relevant for success rate (handled in "Error" case)
#this has actually been replaced in the latest version.
if not canvases.has_key(candidate):
# print "starting round for candidate",candidate
candcounter+=1
variablecollection.append(candidate)
#variablecollectiondimension[candidate]=currentdimension
canvases[candidate]=ROOT.TCanvas(("canvas_"+str(candcounter)),("canvas for "+str(candidate)))
# print "opened canvases for "+candidate
nPadarray={}
isWnquantity=False;
nPads=1;
if(candidate.find("WN_")!=-1) :
#if candidate.find(":") != -1:
isWnquantity=True;
nPads=get_nPads(filename,nPadarray)
nPadsxy={}
distribute_nPads(nPads,nPadsxy)
canvases[candidate].Divide(nPadsxy[0],nPadsxy[1]);
for file in filename:
SAMPLE = file[file.find("SAMPLE")+len("SAMPLE")+1:]
QUANT=candidate
if not stats.has_key(SAMPLE): stats[SAMPLE]={'Error':{}}
mySTATS = stats[SAMPLE]
if not histos.has_key(QUANT):
histos[QUANT] = {}
if isWnquantity==False:
histos[QUANT][SAMPLE] = makeabstracthisto1d2d3d(candidate,SAMPLE,performancetree[file],plotcanvas,-1)
stats[SAMPLE][QUANT]=(histos[QUANT][SAMPLE].GetMean(),histos[QUANT][SAMPLE].GetRMS())
else:
#dealing with a "WN" quantity => 1 plot for each job
histos[QUANT][SAMPLE] = {}
###FIXME: this seems not to work
if options.DoSummary==True: stats[SAMPLE][QUANT][i] = {}
for i in range (0,nPads) :
histos[QUANT][SAMPLE][i] = makeabstracthisto1d2d3d(candidate,SAMPLE,performancetree[file],plotcanvas,i)
if options.DoSummary==True: stats[SAMPLE][QUANT][i]=(histos[QUANT][SAMPLE][i].GetMean(),histos[QUANT][SAMPLE][i].GetRMS())
### overall distribution
#histos[QUANT][SAMPLE][i+1] = makeabstracthisto1d2d3d(candidate,SAMPLE,performancetree[file],plotcanvas2,-1)
if candidate == "Error" :
stats[SAMPLE]["Failures"] = failurehisto=(makeabstracthisto1d2d3d("Error",SAMPLE,performancetree[file],plotcanvas,-1)).Integral()
if not histos.has_key("TimeJob_CpuPercentage"):
stats[SAMPLE]["Success"] = 0
elif not histos["TimeJob_CpuPercentage"].has_key(SAMPLE):
stats[SAMPLE]["Success"] = 0
else:
stats[SAMPLE]["Success"] = histos["TimeJob_CpuPercentage"][SAMPLE].Integral()
Total = float(stats[SAMPLE]["Success"])+ float(stats[SAMPLE]["Failures"])
print "I've found a total of " + str(Total)
for i in range(histos[QUANT][SAMPLE].GetNbinsX()):
errLabel = histos[QUANT][SAMPLE].GetXaxis().GetBinLabel(i+1)
if errLabel!="":
if not stats[SAMPLE]["Error"].has_key(errLabel):
stats[SAMPLE]["Error"][errLabel] = 0
print errLabel, myH.GetBinContent(i+1)
stats[SAMPLE]["Error"][errLabel] = 100*round(myH.GetBinContent(i+1)/Total,3)
# at this point we are done preparing the histogram(s)
filecounter=0
draw_everything(filename,canvases,candidate,nPads,histos,legend,fileinformation,options.savePng, options.saveRoot,outfile) ## draw the variable!
def SaveCanvasName(candidate) :
return "img/"+candidate+".png"
def draw_everything(fileList,canvases,candidate,nPads,histos,legend,fileinformation,savepng,saveroot,outfile) :
if nPads==1:
draw_one(fileList,canvases,candidate,nPads,histos );
legend[candidate]=create_legend(candidate,histos,fileList,fileinformation,-1,)
legend[candidate].Draw()
else :
legend[candidate]=create_legend(candidate,histos,fileList,fileinformation,0)
for i in range(0,nPads) :
draw_multiple(fileList,canvases,candidate,nPads,histos,i);
legend[candidate].Draw()
canvases[candidate].Update()
if(savepng) :
canvases[candidate].SaveAs(SaveCanvasName(candidate));
if(saveroot) :
outfile.cd()
canvases[candidate].SetName(candidate)
canvases[candidate].Write()
def findPlotTogetherCandidates(plotFilter, plotTogether, histos, toBePlotAlone, toBePlotTogether, drawvar=False):
# print "NOW RUNNING : findPlotTogetherCandidates"
compiledFilters = []
for f in plotFilter:
compiledFilters.append( re.compile(f) )
for quant in histos:
# print "considering : ",quant
selected = False
for f in compiledFilters:
if not f.search(quant) == None:
selected = True
break
if not selected and drawvar==False : continue ## if the variables were defined by the user we don't want any filters to interfere
# print "... still considering ",quant
together=False
for sel in plotTogether:
if not toBePlotTogether.has_key(sel): toBePlotTogether[sel] = []
if quant.find(sel)!=-1 and quant.find("tstorage")==-1:
if quant not in toBePlotTogether[sel]: toBePlotTogether[sel].append(quant)
together = True
break
if not together:
if quant not in toBePlotAlone: toBePlotAlone.append(quant)
def plot_everything(histos,keys,savePng):
for quant in keys:
for sample in histos[quant].keys():
myH = histos[quant][sample]
# print type(myH),quant
# print quant,sample,myH.Integral()
def do_cowbell():
print " "
print " (__)"
print " (oo)"
print " /-\/-\ "
print " / \ "
print " | |"
print " | |"
print " / \ "
print " /__________\ "
print " \\\\ "
print " (_)"
print " "
print " more cowbell? "
print " "
print " (source: http://en.wikiversity.org/wiki/ASCII_art)"
##################################################################################### gray list
def fillLists(branchlist, modevariables, branches, histos, perftree, sitePalette, filename, posFilter, negFilter=""):
c1 = ROOT.TCanvas("c1","c1")
SAMPLE = filename[filename.find("SAMPLE")+len("SAMPLE")+1:]
for branch in branchlist:
histoName = branch.GetName()
QUANT = histoName
if not sitePalette.has_key(SAMPLE):
if len(sitePalette)>0: myColor = sorted(sitePalette.values())[-1] +1
else: myColor=1
if myColor==10: myColor+=1 ###don't want white
sitePalette[SAMPLE] = myColor
toPlot = False
for f in posFilter:
myFilter = re.compile(f)
if not myFilter.search(QUANT) == None:
toPlot = True
break
if negFilter!='':
for f in negFilter:
myFilter = re.compile(f)
if not myFilter.search(QUANT) == None:
toPlot = False
break
if not toPlot: continue
if not branches.has_key(QUANT):
branches[QUANT] = {}
histos[QUANT] = {}
modevariables.append(QUANT);
branches[QUANT][SAMPLE] = branch
# histos[QUANT][SAMPLE] = makehisto(branch,QUANT,SAMPLE,perftree,c1)
return SAMPLE
def setCPTMode(mode):
if mode=="Default":
PNG_NAME_FORMAT= ['Site',"Cfg","Sw"]
legendComposition = ['Site','Cfg']
sampleTitles = [""]
strippedText="" # this in case you want to remove some string from the set name
###filter quantities to be considered
filter = [
".*read.*sec.*",
"Time",
"Percentage",
"Error"
]
negFilter = [
"Time_Delay",
"TimeModule",
"TimeEvent",
"local",
".*read.*m(in|ax).*",
".*open.*m(in|ax).*"
]
###filter quantities to be plotted
plotFilter = [
"read-total-msecs",
"CMSSW_CpuPercentage",
"UserTime",
"Error"
]
### plot these quantities overlapped (excluded)
plotTogether = [
"readv-total-megabytes",
"read-total-megabytes",
"readv-total-msecs",
"read-total-msecs"
]
### they can not be in plotFilter?
summaryPlots = [
"CMSSW_CpuPercentage",
"TimeJob_User",
"TimeJob_Exe",
"tstoragefile-read-total-msecs"#,
#"Error"
]
doSummary = True
elif mode.find("SiteMon")!=-1:
PNG_NAME_FORMAT,legendComposition,sampleTitles,filter,negFilter,plotFilter,plotTogether,summaryPlots,doSummary = setCPTMode("Default")
PNG_NAME_FORMAT= ['Site',"Cfg"]
legendComposition = ["Sw",'Date']
sampleTitles = ["Site","Cfg"]
strippedText=""
elif mode.find("SiteCfr")!=-1 :
PNG_NAME_FORMAT,legendComposition,sampleTitles,filter,negFilter,plotFilter,plotTogether,summaryPlots,doSummary = setCPTMode("Default")
PNG_NAME_FORMAT= ['Site',"Cfg"]
legendComposition = ["Site","Sw",'Date']
sampleTitles = ["Site","Cfg"]
strippedText=""
#summaryPlots.append("Time_Delay")
elif mode.find("CfgCfr")!=-1:
PNG_NAME_FORMAT,legendComposition,sampleTitles,filter,negFilter,plotFilter,plotTogether,summaryPlots,doSummary = setCPTMode("Default")
PNG_NAME_FORMAT= ['Site',"Cfg"]
legendComposition = ["Cfg","Sw","Label"]
sampleTitles = ["Site"]
filter.append(".*read.*num.*")
strippedText=""
#summaryPlots.append("Time_Delay")
else:
print "Mode "+mode+" does not exist"
### extending modes
if mode=="SiteMonExt" or mode=="SiteCfrExt" or mode=="CfgCfrExt":
filter.append("TimeEvent")
filter.append(".*read.*byte.*")
plotFilter.append("TimeEvent")
negFilter.remove("TimeEvent")
filter.append("net-.*RX")
plotFilter.append("net-.*RX")
filter.append("stat-CPU")
plotFilter.append("stat-CPU")
filter.append("stat-DISK_Read")
plotFilter.append("stat-DISK_Read")
#filter.append("stat-MEM")
#plotFilter.append("stat-MEM")
#if mode.find("CfgCfrExt")!=-1:
# plotFilter.remove("Error")
# summaryPlots.remove("Error")
return PNG_NAME_FORMAT,legendComposition,sampleTitles,filter,negFilter,plotFilter,plotTogether,summaryPlots,doSummary
def splitDirName(dirName, strippedText=""):
output = {}
if strippedText!="":
for st in strippedText:
if dirName.find(st)!=-1:
dirName = dirName.replace(st,"")
splittedDirName = dirName.split("-")
isKeyLabel = True
for comp in splittedDirName:
comp = comp.split(".")
if len( comp ) <2:
isKeyLabel = False
output = {}
break
else:
label=""
for i in range(1, len(comp)): label += comp[i]+"_"
if comp[0]=="Date":
output[comp[0]] = label[:-5]
output["Hour"] = label[-5:-1]
else: output[comp[0]] = label[:-1]
###returns dirName if not in the right format
if len(splittedDirName)<6 and not isKeyLabel:
return dirName
elif not isKeyLabel:
output['Site'] = splittedDirName[0]
output['Cfg'] = splittedDirName[1]
output['Dataset'] = splittedDirName[2]
output['EventsJob'] = splittedDirName[3]
if output['EventsJob'][-3:] == '000': output['EventsJob'] = output['EventsJob'][:-3]+"k"
output['Label'] = splittedDirName[4]
output['Date'] = splittedDirName[5][0:8]
output['Hour'] = splittedDirName[5][8:]
return output
### from hh:mm:ss to secs
def translateTime(str):
seconds = 0
mult = 1
time = str.split(':')
i = 1
while i <= len(time):
seconds += float(time[-i])*mult
mult *=60
i +=1
return seconds
##### Print functions
def printError(str):
print "[ERROR]: "+str
exit(1)
def printWarning(str):
print "[WARNING]: "+str
def printDevel(message):
function = sys._getframe( 1 ).f_code.co_name
line = sys._getframe( 1 ).f_lineno
file = sys._getframe( 1 ).f_code.co_filename
print "[DEVEL]: file="+file+" func="+function+" L"+str(line)+" Message: "+message
######################### THIS IS THE GONER AREA -- OLD FUNCTIONS THAT NEED REIMPLEMENTING!
def PrintSummary(DIR_SUMMARY, posFilter, negFilter, legLabels, histoTitle=""):
print "This is the new Wiki Stats Summary function ... will take some time to get this right but oh well :-)"
perc=0
LABELS = []
###Creating header
header = ""
if histoTitle!="":
print "|* "+ histoTitle+" *|"
tasks = DIR_SUMMARY.keys()
tasks.sort()
for dir in tasks:
header += " *"+legLabels[dir]+"* |"
sortedKeys = sorted(DIR_SUMMARY[dir].keys())
for l in sortedKeys:
if not l in LABELS: LABELS.append(l)
print "| |",header
###Success rate
line = "| *Success*|"
for dir in tasks:
total = DIR_SUMMARY[dir]["Success"] + DIR_SUMMARY[dir]["Failures"]
if total==0:
perc = 0
line += "%.1f%% (%.0f / %.0f) |" %(perc,DIR_SUMMARY[dir]["Success"], total)
else:
perc = 100*DIR_SUMMARY[dir]["Success"]/total
line += "%.1f%% (%.0f / %.0f) |" %(perc,DIR_SUMMARY[dir]["Success"], total)
print line
###Error Rate
### [dir][label] not useful here...
pError = {}
line=""
for dir in tasks:
for err in DIR_SUMMARY[dir]["Error"]:
if not pError.has_key(int(err)) :
pError[int(err)] = {}
pError[int(err)][dir] = DIR_SUMMARY[dir]["Error"][int(err)]
for err in pError.keys():
line = "| *Error "+str(err)+"* |"
for dir in tasks:
if not pError[err].has_key(dir): line += " // | "
else:
line += "%s%% |" %( pError[err][dir])
print line
#### Actual quantities
orderedLabels = {}
orderedProducers = []
myPosFilter = re.compile(posFilter)
myNegFilter = re.compile(negFilter)
for label in LABELS:
if myPosFilter.search(label) == None or not myNegFilter.search(label) == None : continue
#orderedProducers = []
lwork = label.split("-")
if lwork[0]=="TimeModule":
quant = lwork[-1]
orderedProducers.append(quant)
elif len(lwork)>2:
tech = lwork[0]
meas = lwork[1]
quant = lwork[-1]
quant2 = label[ label.find(meas):]
char = ""
for x in lwork[2:-1]:
char = x+"-"
char.strip("-")
if not orderedLabels.has_key(meas):
orderedLabels[meas] = {}
if not orderedLabels[meas].has_key(quant): orderedLabels[meas][quant] = {}
if not orderedLabels[meas][quant].has_key(quant2): orderedLabels[meas][quant][quant2] = []
orderedLabels[meas][quant][quant2].append(label)
else:
if label != "ExeExitCode" and label!="Success" and label!="Failures" and label!="Error":
line = ""
line += "| *"+label+"*|"
for dir in tasks:
if DIR_SUMMARY[dir].has_key(label):
if label.find("Module")!=-1:
line += " %.2e +- %.2e |" %(DIR_SUMMARY[dir][label][0], DIR_SUMMARY[dir][label][1])
else:
line += " %.2f +- %.2f |" %(DIR_SUMMARY[dir][label][0], DIR_SUMMARY[dir][label][1])
else:
line += " // |"
print line
#TimeModule printing
if len(orderedProducers)>0:
line = ""
print "| *TimeProducers*||||||"
for producer in sorted(orderedProducers):
line = ""
line += "| *"+producer+"*|"
for dir in tasks:
if DIR_SUMMARY[dir].has_key("TimeModule-"+producer):
line += " %.2e +- %.2e |" %(DIR_SUMMARY[dir]["TimeModule-"+producer][0], DIR_SUMMARY[dir]["TimeModule-"+producer][1])
else:
line += " // |"
print line
# putting tstorage entries at the first place
for meas in sorted(orderedLabels.keys()):
for quant in sorted(orderedLabels[meas].keys()):
for quant2 in sorted(orderedLabels[meas][quant].keys()):
orderedLabels[meas][quant][quant2].sort()
for label in orderedLabels[meas][quant][quant2]:
if label.find('tstoragefile')!=-1:
orderedLabels[meas][quant][quant2].remove(label)
orderedLabels[meas][quant][quant2].insert(0, label)
break
line =""
for meas in sorted(orderedLabels.keys()):
#if not meas in ["read","readv","seek","open"]: continue
print "| *"+meas.upper()+"*|", header #|||||"
for quant in sorted(orderedLabels[meas].keys()):
#for char in orderedLabels[meas][quant].keys():
for quant2 in sorted(orderedLabels[meas][quant].keys()):
for label in orderedLabels[meas][quant][quant2]:
if label != "ExeExitCode" and label!="Success" and label!="Failures":
line = ""
if label.find("tstorage")!=-1: line += "|*"+label+"* |"
else: line += "| * _"+label+"_ *|"
for dir in tasks:
if DIR_SUMMARY[dir].has_key(label):
if DIR_SUMMARY[dir][label][0] <0.1:
line += " %.2e +- %.2e |" %(DIR_SUMMARY[dir][label][0], DIR_SUMMARY[dir][label][1])
else:
line += " %.2f +- %.2f |" %(DIR_SUMMARY[dir][label][0], DIR_SUMMARY[dir][label][1])
else:
line += " // |"
print line
|
993,084 | ed919d39090273360de96c5723c5e5e80fb33916 | from constants import *
from input import Input
from layout import Layout
from paddle import Paddle
from ball import Ball
from brick import *
from colorama import Back, Style
import os
import time
class Main:
'''
Explanation: This class is used as a driver class to play the game
'''
def __init__(self):
'''
Explanation: The initialisation function (constructor). It is called when object of the class is created. Clears the screen and initialises the class variables and creates required objects of other classes
Class Variables:
H, W: Height and Width of the terminal
T: Stores the size of the upper wall
P_T: Stores the thickness of the paddle
pattern: Stores the terminal as 2D array, storing each pixel as a character
tiles: Stores the required relation between HP of bricks and character associated
release: Stores information regarding whether ball has been released or not from the paddle
time: Stores the time passed since the game began
score: Stores the score since the game began
life: Stores the number of life a player has
'''
os.system('clear')
self.H, self.W = os.popen('stty size', 'r').read().split()
self.H = int(self.H)
self.W = int(self.W)
self.T = Y_WALL
self.P_T = PADDLE_THICKNESS
self._layout = Layout(self.H, self.W)
self.pattern = self._layout.layout()
self.tiles = self._layout.getTiles()
self.brick = Brick(self.tiles)
self.paddle = Paddle(self.H, self.W, self.pattern)
self.ball = Ball(self.H, self.W, self.pattern)
self.release = False
self.one = One(self.tiles)
self.two = Two(self.tiles)
self.three = Three(self.tiles)
self.four = Four(self.tiles)
self.five = Five(self.tiles)
self.time = 0
self.score = 0
self.life = 1
def display(self):
'''
Explanation: Used to initially display the whole 2D array, this is done seperately as no where else is the whole array printed
'''
for i in range(self.H):
for j in range(self.W):
if self.pattern[i][j] == '_':
print(UNBREAKABLE_COLOR, sep='', end='')
print(CURSOR % (i, j), self.pattern[i][j], end='', sep='', flush=True)
print(Style.RESET_ALL, end='', sep='')
elif self.pattern[i][j] == '+':
print(BRICK_ONE_COLOR, sep='', end='')
print(CURSOR % (i, j), self.pattern[i][j], end='', sep='', flush=True)
print(Style.RESET_ALL, end='', sep='')
elif self.pattern[i][j] == '-':
print(BRICK_TWO_COLOR, sep='', end='')
print(CURSOR % (i, j), self.pattern[i][j], end='', sep='', flush=True)
print(Style.RESET_ALL, end='', sep='')
elif self.pattern[i][j] == '/':
print(BRICK_THREE_COLOR, sep='', end='')
print(CURSOR % (i, j), self.pattern[i][j], end='', sep='', flush=True)
print(Style.RESET_ALL, end='', sep='')
elif self.pattern[i][j] == '#':
print(BRICK_FOUR_COLOR, sep='', end='')
print(CURSOR % (i, j), self.pattern[i][j], end='', sep='', flush=True)
print(Style.RESET_ALL, end='', sep='')
elif self.pattern[i][j] == '*':
print(BRICK_FIVE_COLOR, sep='', end='')
print(CURSOR % (i, j), self.pattern[i][j], end='', sep='', flush=True)
print(Style.RESET_ALL, end='', sep='')
elif self.pattern[i][j] == '=':
print(PADDLE_COLOR, sep='', end='')
print(CURSOR % (i, j), self.pattern[i][j], end='', sep='', flush=True)
print(Style.RESET_ALL, end='', sep='')
else:
print(CURSOR % (i, j) ,self.pattern[i][j], end='', sep='', flush=True)
def paddle_changes(self, movement):
'''
Explanation: Handle all the changes and movements related to paddle
Parameters:
movement: A Character which holds the user input
'a': move left
'd': move right
' ': release the ball if stuck to paddle
Variables:
start_X: Starting position of the paddle in X axis
end_X: Ending position of the paddle in X axis
thickness: Thickness of the paddle
'''
if movement == ' ':
self.release = True
start_X, end_X, thickness = self.paddle.location()
# Changing the pattern
for i in range(self.H - thickness, self.H):
r = self.pattern[i]
for j in range(len(r)):
if j >= start_X and j < end_X:
r[j] = '='
elif r[j] == '=':
r[j] = ' '
else:
r[j] = r[j]
self.pattern[i] = r
# Printing the pattern
for i in range(self.H - thickness, self.H):
for j in range(self.W):
if self.pattern[i][j] == '=':
print(PADDLE_COLOR, sep='', end='')
print(CURSOR % (i, j), self.pattern[i][j], end='', sep='', flush=True)
print(Style.RESET_ALL, end='', sep='')
else:
print(CURSOR % (i, j), self.pattern[i][j], end='', sep='', flush=True)
def ball_changes(self):
'''
Explanation: Handles all the changes and movement related to the ball
Variables:
X: Current X position of the ball
Y: Current Y position of the ball
prev_X: Old X position of the ball (required for deprinting)
prev_Y: Old Y position of the ball (required for deprinting)
'''
X, Y, prev_X, prev_Y = self.ball.location()
self.pattern[prev_Y][prev_X] = ' '
r = self.pattern[Y]
# Changing the current row to include the ball
for i in range(len(r)):
if i == X:
r[i] = BALL
self.pattern[Y] = r
# Clearing the ball from previous position
for i in range(self.W):
if self.pattern[prev_Y][i] == '+':
print(BRICK_ONE_COLOR, sep='', end='')
print(CURSOR % (prev_Y, i), self.pattern[prev_Y][i], end='', sep='', flush=True)
print(Style.RESET_ALL, end='', sep='')
elif self.pattern[prev_Y][i] == '-':
print(BRICK_TWO_COLOR, sep='', end='')
print(CURSOR % (prev_Y, i), self.pattern[prev_Y][i], end='', sep='', flush=True)
print(Style.RESET_ALL, end='', sep='')
elif self.pattern[prev_Y][i] == '/':
print(BRICK_THREE_COLOR, sep='', end='')
print(CURSOR % (prev_Y, i), self.pattern[prev_Y][i], end='', sep='', flush=True)
print(Style.RESET_ALL, end='', sep='')
elif self.pattern[prev_Y][i] == '#':
print(BRICK_FOUR_COLOR, sep='', end='')
print(CURSOR % (prev_Y, i), self.pattern[prev_Y][i], end='', sep='', flush=True)
print(Style.RESET_ALL, end='', sep='')
elif self.pattern[prev_Y][i] == '*':
print(BRICK_FIVE_COLOR, sep='', end='')
print(CURSOR % (prev_Y, i), self.pattern[prev_Y][i], end='', sep='', flush=True)
print(Style.RESET_ALL, end='', sep='')
else:
print(CURSOR % (prev_Y, i), self.pattern[prev_Y][i], end='', sep='', flush=True)
# Printing the ball in new position
for i in range(self.W):
if self.pattern[Y][i] == '+':
print(BRICK_ONE_COLOR, sep='', end='')
print(CURSOR % (Y, i), self.pattern[Y][i], end='', sep='', flush=True)
print(Style.RESET_ALL, end='', sep='')
elif self.pattern[Y][i] == '-':
print(BRICK_TWO_COLOR, sep='', end='')
print(CURSOR % (Y, i), self.pattern[Y][i], end='', sep='', flush=True)
print(Style.RESET_ALL, end='', sep='')
elif self.pattern[Y][i] == '/':
print(BRICK_THREE_COLOR, sep='', end='')
print(CURSOR % (Y, i), self.pattern[Y][i], end='', sep='', flush=True)
print(Style.RESET_ALL, end='', sep='')
elif self.pattern[Y][i] == '#':
print(BRICK_FOUR_COLOR, sep='', end='')
print(CURSOR % (Y, i), self.pattern[Y][i], end='', sep='', flush=True)
print(Style.RESET_ALL, end='', sep='')
elif self.pattern[Y][i] == '*':
print(BRICK_FIVE_COLOR, sep='', end='')
print(CURSOR % (Y, i), self.pattern[Y][i], end='', sep='', flush=True)
print(Style.RESET_ALL, end='', sep='')
else:
print(CURSOR % (Y, i), self.pattern[Y][i], end='', sep='', flush=True)
def remove_brick(self, arg, temp_x, temp_y):
'''
Explanation: Handles changes in brick color, and removal of bricks when the ball hits any brick
Parameters:
arg: Character with which the ball has collided with
temp_x: Position of arg along the X axis
temp_y: Position of arg along the y axis
Variables:
hp: Stores the HP (hit points) of the brick
start: Stores the index from where the brick starts
'''
hp = 0
start = 0
# Finding HP of the brick
if arg == '[':
hp = self.brick.type(self.pattern[temp_y][temp_x + 1])
hp -= 1
start = temp_x
elif arg == ']':
hp = self.brick.type(self.pattern[temp_y][temp_x - 1])
hp -= 1
start = temp_x - (BRICK_LEN - 1)
else:
hp = self.brick.type(arg)
hp -= 1
c = arg
t = temp_x
while c != '[':
t -= 1
c = self.pattern[temp_y][t]
start = t
# Update the score
self.score += hp + 1
# Get new character based on hp
for i in range(start, start + BRICK_LEN):
if hp == 0:
self.pattern[temp_y][i] = self.one.reduce(self.pattern[temp_y][i])
elif hp == 1:
self.pattern[temp_y][i] = self.two.reduce(self.pattern[temp_y][i])
elif hp == 2:
self.pattern[temp_y][i] = self.three.reduce(self.pattern[temp_y][i])
elif hp == 3:
self.pattern[temp_y][i] = self.four.reduce(self.pattern[temp_y][i])
elif hp == 4:
self.pattern[temp_y][i] = self.five.reduce(self.pattern[temp_y][i])
# Printing new pattern
for i in range(self.W):
if self.pattern[temp_y][i] == '+':
print(BRICK_ONE_COLOR, sep='', end='')
print(CURSOR % (temp_y, i), self.pattern[temp_y][i], end='', sep='', flush=True)
print(Style.RESET_ALL, end='', sep='')
elif self.pattern[temp_y][i] == '-':
print(BRICK_TWO_COLOR, sep='', end='')
print(CURSOR % (temp_y, i), self.pattern[temp_y][i], end='', sep='', flush=True)
print(Style.RESET_ALL, end='', sep='')
elif self.pattern[temp_y][i] == '/':
print(BRICK_THREE_COLOR, sep='', end='')
print(CURSOR % (temp_y, i), self.pattern[temp_y][i], end='', sep='', flush=True)
print(Style.RESET_ALL, end='', sep='')
elif self.pattern[temp_y][i] == '#':
print(BRICK_FOUR_COLOR, sep='', end='')
print(CURSOR % (temp_y, i), self.pattern[temp_y][i], end='', sep='', flush=True)
print(Style.RESET_ALL, end='', sep='')
elif self.pattern[temp_y][i] == '*':
print(BRICK_FIVE_COLOR, sep='', end='')
print(CURSOR % (temp_y, i), self.pattern[temp_y][i], end='', sep='', flush=True)
print(Style.RESET_ALL, end='', sep='')
else:
print(CURSOR % (temp_y, i), self.pattern[temp_y][i], end='', sep='',)
def update_time(self):
'''
Explanation: Updates the time and prints it after each iteration
'''
self.time += 0.1
# Putting time as characters in a list
round(self.time, 1)
row = self.H - PADDLE_THICKNESS - 3
t = ['T', 'I', 'M', 'E', ':']
for char in str(self.time):
t.append(char)
# Updating the pattern
r = []
c = 0
for i in range(self.W):
if i >= (self.W - X_WALL_RIGHT):
if c < len(t):
r.append(t[c])
c += 1
else:
r.append(' ')
else:
r.append(self.pattern[row][i])
# Printing the new pattern
for i in range(len(r)):
print(CURSOR % (row, i), r[i], end='', sep='', flush=True)
def update_score(self):
'''
Explanation: Updates the scores based on hitting of bricks
'''
# Getting the score as a list of characters
row = self.H - PADDLE_THICKNESS - 2
s = ['S', 'C', 'O', 'R', 'E', ':']
for char in str(self.score):
s.append(char)
# Updates the pattern
r = []
c = 0
for i in range(self.W):
if i >= (self.W - X_WALL_RIGHT):
if c < len(s):
r.append(s[c])
c += 1
else:
r.append(' ')
else:
r.append(self.pattern[row][i])
# Prints the new pattern
for i in range(len(r)):
print(CURSOR % (row, i), r[i], end='', sep='', flush=True)
def game(self):
'''
Explanation: Called when a life is lost. Resets the whole game and exits if all lives are lost
'''
# Reduction of Life
self.life -= 1
if self.life == 0:
os.system('clear')
print('Score: ', (self.score + self.time))
exit()
# Resetting the Game
self.paddle = Paddle(self.H, self.W, self.pattern)
self.ball = Ball(self.H, self.W, self.pattern)
self.release = False
# Getting number of lives as a list of characters
row = self.H - PADDLE_THICKNESS - 1
l = ['L', 'I', 'F', 'E', ':']
for char in str(self.life):
l.append(char)
# Updating pattern
r = []
c = 0
for i in range(self.W):
if i >= (self.W - X_WALL_RIGHT):
if c < len(l):
r.append(l[c])
c += 1
else:
r.append(' ')
else:
r.append(self.pattern[row][i])
# Printing the pattern
self.pattern[row] = r
for i in range(len(r)):
print(CURSOR % (row, i), r[i], end='', sep='', flush=True)
def play(self):
'''
Explanation: This is the driver code, which controls all functions and objects
'''
# Initial display
self.display()
i = 0
while True:
# Paddle movement
movement = self.paddle.move()
self.paddle_changes(movement)
# Ball movement
self.ball.start(movement)
self.ball_changes()
if self.release:
# Ball Brick Collision
arg, temp_x, temp_y = self.ball.brick_collide()
self.ball.move()
# Removing Brick if neccessary
if arg != None:
self.remove_brick(arg, temp_x, temp_y)
# Updating required variables
self.update_time()
self.update_score()
# Game Status
status = self.ball.check_life()
if not status:
self.game()
i += 1
# Driver code
m = Main()
m.play() |
993,085 | 7c9f51e19846e0170cb4c95489b8af9dd30a7555 | import sys
import numpy as np
sys.path.append("..")
import tensorflow as tf
import tensorflow.contrib.slim as slim
from Vgg19 import Vgg19
batch_norm = tf.layers.batch_normalization
vgg_file_path = '/home/cgim/桌面/tensorflow_train/GAN/vgg19.npy'
class SRGAN:
def __init__(self,is_training,vgg_weight):
self.is_training = is_training
self.epsilon = 1e-5
self.weight_decay = 0.00001
self.vgg_weight = vgg_weight
self.REAL_LABEL=0.9
def preprocess(self,images,scale=False):
images = tf.to_float(images)
if scale:
images = tf.div(images, 127.5)
images = tf.subtract(images, 1.0)
return images
def sample(self, input, type="down",sample_size=4):
shape = input.get_shape().as_list() # NHWC
if (type == "down"):
h = int(shape[1] // sample_size)
w = int(shape[1] // sample_size)
else:
h = int(shape[1] * sample_size)
w = int(shape[1] * sample_size)
resized_image = tf.image.resize_images(input, [h, w],
tf.image.ResizeMethod.BILINEAR)
return resized_image
def Resblock(self,inputs,scope_name):
with tf.variable_scope(scope_name+"/layer1") as scope:
conv1 = slim.conv2d(inputs,64,[3,3],[1,1])
norm1 = slim.batch_norm(conv1)
relu1 = tf.nn.relu(norm1)
with tf.variable_scope(scope_name+"/layer2") as scope:
conv2 = slim.conv2d(relu1,64,[3,3],[1,1])
norm2 = slim.batch_norm(conv2)
return inputs+norm2
def get_content_loss(self,feature_a,feature_b,type="VGG"):
if type=="VGG":
print("Using VGG loss as content loss!")
vgg_a, vgg_b = Vgg19(vgg_file_path), Vgg19(vgg_file_path)
vgg_a.build(feature_a)
vgg_b.build(feature_b)
VGG_loss = tf.reduce_mean(tf.losses.absolute_difference(vgg_a.conv4_4, vgg_b.conv4_4))
h = tf.cast(tf.shape(vgg_a.conv4_4)[1], tf.float32)
w = tf.cast(tf.shape(vgg_a.conv4_4)[2], tf.float32)
c = tf.cast(tf.shape(vgg_a.conv4_4)[3], tf.float32)
content_loss = VGG_loss/(h*w*c)
else:
print("Using MSE loss of images as content loss!")
content_loss=tf.reduce_mean(tf.losses.absolute_difference(feature_a , feature_b))
return content_loss
def pixel_shuffle_layer(self,x, r, n_split):
def PS(x, r):
bs, a, b, c = x.get_shape().as_list()
x = tf.reshape(x, (-1, a, b, r, r))
x = tf.transpose(x, [0, 1, 2, 4, 3])
x = tf.split(x, a, 1)
x = tf.concat([tf.squeeze(x_) for x_ in x], 2)
x = tf.split(x, b, 1)
x = tf.concat([tf.squeeze(x_) for x_ in x], 2)
return tf.reshape(x, (-1, a * r, b * r, 1))
xc = tf.split(x, n_split, 3)
return tf.concat([PS(x_, r) for x_ in xc], 3)
#(64*64--->256*256)
def generator(self,inputs,name_scope,reuse=False):
print("SRGAN_onlyMSE_generator")
with tf.variable_scope(name_scope,reuse=reuse) as scope:
w_init = tf.truncated_normal_initializer(mean=0.0, stddev=0.02)
with slim.arg_scope([slim.conv2d],weights_initializer=w_init,padding="SAME",activation_fn=None):
with slim.arg_scope([slim.conv2d_transpose],weights_initializer=w_init,padding="SAME",activation_fn=None):
with slim.arg_scope([slim.batch_norm], decay=0.9, epsilon=1e-5, scale=True,
activation_fn=None,is_training=self.is_training):
print("inputs:",inputs)
net = slim.conv2d(inputs,64,[3,3],[1,1])
net = tf.nn.relu(net)
short_cut = net
print("net1:", net)
#8 Resblock
for i in range(6):
net = self.Resblock(net, "ResBlock{}".format(i))
#DeConv
net = slim.conv2d_transpose(net, 64, [3,3], [1,1])
net = slim.batch_norm(net)
net = net+short_cut
print("net2:",net)
net = slim.conv2d_transpose(net, 256, [3, 3], [1, 1])
print("net3:",net)
net = self.pixel_shuffle_layer(net, 2, 64)
net = tf.nn.relu(net)
print("net4:",net)
net = slim.conv2d_transpose(net, 256, [3, 3], [1, 1])
print("net5:",net)
net = self.pixel_shuffle_layer(net, 2, 64)
net = tf.nn.relu(net)
net = slim.conv2d(net, 3, [3, 3], [1, 1],activation_fn=tf.nn.tanh)
return net
# (256*256--->1)
def discriminator(self,inputs,name_scope,reuse=False):
print("SRGAN_onlyMSE_discriminator")
with tf.variable_scope(name_scope,reuse=reuse) as scope:
w_init = tf.truncated_normal_initializer(mean=0.0, stddev=0.02)
with slim.arg_scope([slim.conv2d], weights_initializer=w_init, padding="SAME", activation_fn=None):
with slim.arg_scope([slim.conv2d_transpose], weights_initializer=w_init, padding="SAME",activation_fn=None):
with slim.arg_scope([slim.batch_norm], decay=0.9, epsilon=1e-5, scale=True,activation_fn=None,is_training=self.is_training):
nfg = 64
net = slim.conv2d(inputs,nfg,[3,3],[1,1])
net = tf.nn.leaky_relu(net)
print("net:",net)
for i in range(1,5):
net = slim.conv2d(net, nfg, [3, 3], [2, 2])
net = slim.batch_norm(net)
net = tf.nn.leaky_relu(net)
net = slim.conv2d(net, nfg*2, [3, 3], [1, 1])
net = slim.batch_norm(net)
net = tf.nn.leaky_relu(net)
nfg *= 2
print("dis{}:".format(i),net)
net = slim.conv2d(net, nfg, [3, 3], [2, 2])
net = slim.batch_norm(net)
logits = tf.nn.leaky_relu(net)
net_flat = tf.layers.flatten(net)
dense_net = slim.fully_connected(net_flat,1024)
dense_net = tf.nn.leaky_relu(dense_net)
logits = slim.fully_connected(dense_net, 1)
return logits
def get_vars(self):
all_vars = tf.trainable_variables()
dis_vars = [var for var in all_vars if 'discriminator' in var.name]
gen_vars = [var for var in all_vars if 'generator' in var.name]
return gen_vars,dis_vars
def build_CartoonGAN(self,LR,HR):
#归一化
LR_pre = self.preprocess(LR, scale=True)
HR_pre = self.preprocess(HR, scale=True)
#reality --> cartoon
fake_HR = self.generator(LR_pre,"generator")
fake_HR_logits = self.discriminator(fake_HR, "discriminator", reuse=False)
real_HR_logits = self.discriminator(HR_pre, "discriminator", reuse=True)
#GAN损失
real_dis_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
logits=real_HR_logits,labels=tf.ones_like(real_HR_logits)))
fake_dis_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
logits=fake_HR_logits,labels=tf.zeros_like(fake_HR_logits)))
fake_gen_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
logits=fake_HR_logits,labels=tf.ones_like(fake_HR_logits)))
dis_loss = real_dis_loss+fake_dis_loss
print("size:",HR_pre,fake_HR)
content_loss = self.get_content_loss(HR_pre, fake_HR,"no_VGG")
psnr = self.get_PSNR(HR_pre,fake_HR)
gen_loss = fake_gen_loss + self.vgg_weight*content_loss
return gen_loss,dis_loss,content_loss,psnr
def get_PSNR(self,real, fake):
mse = tf.reduce_mean(tf.square(127.5 * (real - fake) + 127.5), axis=(-3, -2, -1))
psnr = tf.reduce_mean(10 * (tf.log(255 * 255 / tf.sqrt(mse)) / np.log(10)))
return psnr
def sample_generate(self,LR):
LR_pre = self.preprocess(LR, scale=True)
HR_out = self.generator(LR_pre,name_scope="generator",reuse=True)
return HR_out
|
993,086 | 0842329c37bb72d611f5ccda6a4d6b8752fe1422 | import unittest
from GenesLists import *
TEMPDIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'temp/')
class GenesListsTests(unittest.TestCase):
def setUp(self):
"""
make a temp folder with two files, set all assert expectations
"""
if not os.path.exists(TEMPDIR):
os.makedirs(TEMPDIR)
self.file1 = os.path.join(TEMPDIR, 'list1')
tempfile = open(self.file1, 'w')
tempfile.write('123\r\n'
'ggg\n'
'456\n'
'789\n'
'7 77\r\n'
'7; 77\r\n'
'7.77\r\n'
'777a\r\n')
tempfile.close()
self.geneList1 = MetabolicList(self.file1)
self.expected_length = 3
self.expected_name = 'list1'
self.expected_genes = [123, 456, 789]
self.file2 = os.path.join(TEMPDIR, 'list2')
tempfile = open(self.file2, 'w')
tempfile.write('789\n'
'456\n'
'777')
tempfile.close()
self.geneList2 = GeneList(self.file2)
self.expected_intersection = [456, 789]
self.expected_intersection_length = [2]
self.expected_hypergeometric_score = 0.15170278637770918
def tearDown(self):
"""
destroy the temp folder
"""
for root, dirs, files in os.walk(TEMPDIR, topdown=False):
for name in files:
os.remove(os.path.join(root, name))
for name in dirs:
os.rmdir(os.path.join(root, name))
os.rmdir(root)
def test_initial_read(self):
self.assertEqual(self.geneList1.initialLength, self.expected_length,
'read size: '+str(self.geneList1.initialLength)+' != '+str(self.expected_length))
self.assertEqual(self.geneList1.name, self.expected_name,
'read name: '+str(self.geneList1.name)+' != '+str(self.expected_name))
self.assertEqual(self.geneList1.geneIds, self.expected_genes,
'read genes: '+str(self.geneList1.geneIds)+' != '+str(self.expected_genes))
self.assertEqual(self.geneList1.afterIntersectionLength, [],
'intersection array not empty')
self.assertEqual(self.geneList1.hypergeometricScore, None,
'hypergeometric score not empty')
def test_count_genes(self):
self.assertEqual(self.geneList1.count_genes(), self.expected_length,
'count size: '+str(self.geneList1.count_genes())+' != '+str(self.expected_length))
self.geneList1.geneIds = self.geneList1.geneIds[:-1]
self.assertEqual(self.geneList1.count_genes(), self.expected_length-1,
'count size: '+str(self.geneList1.count_genes())+' != '+str(self.expected_length-1))
def test_intersection(self):
self.geneList1.intersect_with(self.geneList2)
self.assertEqual(self.geneList1.geneIds, self.expected_intersection,
'intersection genes: '+str(self.geneList1.geneIds)+' != '+str(self.expected_intersection))
self.assertEqual(self.geneList1.afterIntersectionLength, self.expected_intersection_length,
'intersection genes: '+str(self.geneList1.afterIntersectionLength)+' != ' +
str(self.expected_intersection_length))
def test_hypergeometric_scoring(self):
self.fakeCompleteGeneList = GeneList(self.file1)
self.fakeCompleteGeneList.initialLength = 20
self.fakeTargetGeneList = GeneList(self.file1)
self.fakeTargetGeneList.initialLength = 10
self.fakeMetabolicGeneList = MetabolicList(self.file1)
self.fakeMetabolicGeneList.afterIntersectionLength.extend([5, 4])
temp = self.fakeMetabolicGeneList.compute_hypergeometric_score(self.fakeCompleteGeneList, self.fakeTargetGeneList)
self.assertAlmostEqual(self.fakeMetabolicGeneList.hypergeometricScore, self.expected_hypergeometric_score,
places=16,
msg='scoring went wrong: '+repr(temp)+' != '+repr(self.expected_hypergeometric_score))
suite = unittest.TestLoader().loadTestsFromTestCase(GenesListsTests)
unittest.TextTestRunner().run(suite)
|
993,087 | d60cbd41aa7627041d353ee84037cef874486518 | """
decoder.py: When passed a list, simply returns objects as-is unless they are
integers between 1 and 26, in which case it converts that number to the
corresponding letter. The integer-to-letter correspondence is 1=A, 2=B, 3=C...
"""
from string import ascii_uppercase
def alphabator(s=None):
for o in s:
if o in range(1, 27):
yield ascii_uppercase[o-1]
# yield chr(o+64) # another solution
else:
yield o
#def alphabator(generator):
# for i in generator:
# yield ascii_uppercase[i - 1] if isinstance(i, int) and 0 < i <= 26 else i
if __name__ == "__main__":
a = alphabator(['python', object, ascii_uppercase, 10, alphabator, 15])
for i in a:
print(i) |
993,088 | 2f9a37013df4d11e6bde889d8a30612455313b8d | def cap_name(nom):
return nom.capitalize()
def lower_word(wordy):
return wordy.lower()
def want_to_play():
answer = input("Do you want to play MadLibs? (y/n): ")
answer = answer.lower()
if answer == "y":
print("Yay!")
return make_madlib()
elif answer == "n":
return print("Oh, ok. Have a nice day!")
else:
return print("That\'s not a 'y' or a 'n'.")
def make_madlib():
name = input("Please provide a name: ")
profession = input("Please provide a job title: ")
animal = input("Please provide an animal: ")
item = input("Please provide an inanimate item: ")
group = input("Please provide a plural noun (ex: dogs): ")
noun = input("Please provide a singular noun: ")
fix_name = cap_name(name)
fix_animal = lower_word(animal)
fix_group = lower_word(group)
fix_noun = cap_name(noun)
return print("%s the %s defeated the %s with the %s and made the %s respect %s." % (fix_name, profession, fix_animal, item, fix_group, fix_noun))
want_to_play() |
993,089 | 82cefbb5d74e288d87a03f87cda4dd2ac284b08d | import psycopg2 as pg
choice = int(input("Enter Your choice:\n"
"1. Find\n"
"2. insert\n"
"3. delete\n"
"4. Quit\n"))
def Conneciton():
connection = ""
# use postgres
try:
connection = pg.connect(user="postgres",
password = "sotherny",
port="5432",
host ="127.0.0.1",
database = "DataStructure")
# cursor = connection.cursor()
# print("connected")
return connection
except (Exception) as e:
print(e)
con = Conneciton()
cursor = con.cursor()
while choice!=0:
if choice == 1:
choice_find = input("Enter choice you want to find star:\n"
"a. by star's id\n"
"b. by star's name\n")
if choice_find == 'a':
id = input("Enter star's id:")
cursor.execute(f"select star_name from star where id = '{id}'")
con.commit()
result = cursor.fetchone()
print(result)
if choice_find == 'b':
name = input("Enter star's name:")
cursor.execute(f"select star_name from star where name = '{name}'")
con.commit()
result = cursor.fetchone()
print(result)
# continue
elif choice==2:
id = input("Enter star's id:")
star_name = input("Enter star's name:")
cursor.execute(f"insert into star values('{id}','{star_name}')")
con.commit()
print("inserted successfully")
elif choice==3:
id = input("Enter star's id:")
cursor.execute(f"delete from star where id = '{id}'")
con.commit()
print("deleted successfully")
elif choice == 4:
if (con):
cursor.close()
con.close
print("connection is closed")
break
print("____________________________")
choice = int(input("Enter Your choice:\n"
"1. Find\n"
"2. insert\n"
"3. delete\n"
"4. Quit\n"))
# finally:
# if(connection):
# cursor.close()
# connection.close
# print("connection is closed")
# li = [x for x in range(2)]
# print(li) |
993,090 | a2e0a444edc14cb6ce9a0ed69ca1d90405038926 | from openpyxl.formula.tokenizer import Tokenizer, Token
from xlsx2html.utils.cell import parse_cell_location
class HyperlinkType:
__slots__ = ["location", "target", "title"]
def __init__(self, location=None, target=None, title=None):
self.location = location
self.target = target
self.title = title
def __bool__(self):
return bool(self.location or self.target)
def resolve_cell(worksheet, coord):
if "!" in coord:
sheet_name, coord = coord.split("!", 1)
worksheet = worksheet.parent[sheet_name.lstrip("$")]
return worksheet[coord]
def resolve_hyperlink_formula(cell, f_cell):
if not f_cell or f_cell.data_type != "f" or not f_cell.value.startswith("="):
return None
tokens = Tokenizer(f_cell.value).items
if not tokens:
return None
hyperlink = HyperlinkType(title=cell.value)
func_token = tokens[0]
if func_token.type == Token.FUNC and func_token.value == "HYPERLINK(":
target_token = tokens[1]
if target_token.type == Token.OPERAND:
target = target_token.value
if target_token.subtype == Token.TEXT:
hyperlink.target = target[1:-1]
elif target_token.subtype == Token.RANGE:
hyperlink.target = resolve_cell(cell.parent, target).value
if hyperlink:
return hyperlink
return None
def format_hyperlink(value, cell, f_cell=None):
hyperlink = HyperlinkType(title=value)
if cell.hyperlink:
hyperlink.location = cell.hyperlink.location
hyperlink.target = cell.hyperlink.target
# Parse function
if not hyperlink:
hyperlink = resolve_hyperlink_formula(cell, f_cell)
if not hyperlink:
return value
if hyperlink.location is not None:
href = "{}#{}".format(hyperlink.target or "", hyperlink.location)
else:
href = hyperlink.target
# Maybe link to cell
if href and href.startswith("#"):
location_info = parse_cell_location(href)
if location_info:
href = "#{}.{}".format(
location_info["sheet_name"] or cell.parent.title, location_info["coord"]
)
return '<a href="{href}">{value}</a>'.format(href=href, value=value)
|
993,091 | 9d8bf2ee9f8262113ac45088d5b53c30e06bd875 | import streamlit as st
# To make things easier later, we're also importing numpy and pandas for
# working with sample data.
# import numpy as np
import pandas as pd
# import matlab.engine
# import io
import numpy as np
# import midi
#from scipy.io import wavfile
import scipy
#import matplotlib.pyplot as plt
from feature2pred import get_pred
from getfeature_py import getfeature
from predict import get_score
from get_pred import get_pred_real
import time
st.title(':musical_note: 帕金森检测')
# st.header('请选择要检测的声音文件,我们将预测您的健康状况')
st.subheader('请选择要检测的声音文件,我们将预测您的健康状况。')
# st.text('请选择要检测的声音文件,我们将预测您的健康状况')
# st.title(":musical_note: Convert a MIDI file to WAV")
uploaded_file = st.file_uploader("请上传 WAV 格式的音频文件", type=["wav"])
# st.write(type(uploaded_file))
if uploaded_file is None:
# st.info("请上传 WAV 格式的音频文件")
st.stop()
# dalay module
with st.spinner('Wait for it...'):
samplerate, data = scipy.io.wavfile.read(uploaded_file)
# st.write(uploaded_file)
wav_name = 'audios/temp_audio.wav'
scipy.io.wavfile.write(wav_name, samplerate, data)
# st.write(data.shape[0])
# length = data.shape[0] / samplerate
# audio_test = 'D:\datas\parkinson\\1\\raw\\NC_001_1_2.wav'
# Wave_write.writeframes(data)
## matlab part
# eng = matlab.engine.start_matlab()
# resultt = eng.get_feature(7.7)
# feature_m = np.array([0,0,0,0])
# feature_m = eng.getfeature_m(samplerate, data)
# feature_m = eng.getfeature_m(wav_name)
feature_p = getfeature(wav_name)
# print(resultt)
# eng.quit()
# pred = get_pred(resultt)
# time.sleep(1)
st.success('Done!')
# st.write(f"feature from matlab = {feature_m }")
feature_p = np.array(feature_p)
pred = get_pred_real(feature_p)
# st.write(f"feature from python = {type(feature_p)}")
# st.write(f"feature from python = {len(feature_p)}")
# st.write(f"feature from python = {feature_p[0]}")
# st.write(f"feature from python = {type(feature_p)}")
# st.write(f"feature from matlab = {feature_m2}")
# print(samplerate)
# st.write([1,2,3,4])
# st.write(f"data shape :{data.shape[0]}")
# st.write(f"length = {length}s")
# st.write(f"num of feature = {resultt}")
# st.write(f"predict score = {pred}")
# progress show bar
# my_bar = st.progress(0)
# for percent_complete in range(100):
# my_bar.progress(percent_complete + 1)
st.header(':musical_note: 您的预测结果为: ')
pred = int(pred*100)
st.title(pred)
if pred > 50:
warn_text = '您有很大的可能患有帕金森综合征,建议及时至医院复查就诊。'
st.subheader(warn_text)
st.write('(分数越高,患病几率越大;分数越低,患病几率越小。)')
else:
warn_text = '您的检测结果非常健康!'
st.subheader(warn_text)
st.write('(分数越高,患病几率越大;分数越低,患病几率越小。)')
time.sleep(5)
st.balloons()
st.header(':musical_note: 您的声音图像为: ')
chart_data = pd.DataFrame(
np.random.randn(2009, 1),
columns=['voice'])
st.line_chart(chart_data)
# use matplotlib
# fig, ax = plt.subplots()
# ax.scatter([1, 2, 3], [1, 2, 3])
#
# st.pyplot(fig)
st.header(':musical_note: 您的声音信号指标为: ')
feature_list = ['mF0', 'sF0', 'mmF0', 'mlogE', 'slogE', 'mmlogE', 'Vrate', 'avgdurv', 'stddurv', 'Silrate',
'avgdurs', 'stddurs', 'F0varsemi', 'mjitter', 'mshimmer', 'apq', 'ppq',
'mlogE', 'degreeU', 'varF0', 'avgdurs', 'stddurs']
df1 = pd.DataFrame(
# np.random.randn(1, 6),
feature_p[:6].reshape(1, -1),
# columns=('col %d' % i for i in range(20)))
columns=feature_list[:6])
df2 = pd.DataFrame(
# np.random.randn(6, 13),
feature_p[6:12].reshape(1, -1),
# columns=('col %d' % i for i in range(20)))
columns=feature_list[6:12])
df3 = pd.DataFrame(
# np.random.randn(1, 6),
feature_p[13:19].reshape(1, -1),
# columns=('col %d' % i for i in range(20)))
columns=feature_list[13:19])
df4 = pd.DataFrame(
# np.random.randn(1, 6),
feature_p[19:].reshape(1, -1),
# columns=('col %d' % i for i in range(20)))
columns=feature_list[19:])
st.dataframe(df1) # Same as st.write(df)
st.dataframe(df2) # Same as st.write(df)
st.dataframe(df3) # Same as st.write(df)
st.dataframe(df4) # Same as st.write(df)
# ui = wavfile.read(uploaded_file, mmap=False)
# st.write(type(ui))
# st.write(uploaded_file.getparams())
# midi_data = midi.PrettyMIDI(uploaded_file)
# audio_data = midi_data.fluidsynth()
# audio_data = np.int16(audio_data / np.max(np.abs(
# audio_data)) * 32767 * 0.9) # -- Normalize for 16 bit audio https://github.com/jkanner/streamlit-audio/blob/main/helper.py
#
# virtualfile = io.BytesIO()
# wavfile.write(virtualfile, 44100, audio_data)
#
# st.audio(virtualfile)
# st.markdown("Download the audio by right-clicking on the media player")
# print('dada')
# bonus
#st.write('您的检测结果非常健康!')
#time.sleep(8)
#st.balloons()
|
993,092 | 53988e30d309017f08d8be6641b5fb05188ad9e8 | """Integration test for plotting data with non-gregorian calendar."""
import unittest
import warnings
import matplotlib
matplotlib.use("agg")
import cftime
import matplotlib.pyplot as plt
import numpy as np
import pytest
import nc_time_axis
class Test(unittest.TestCase):
def setUp(self):
# Make sure we have no unclosed plots from previous tests before
# generating this one.
plt.close("all")
def tearDown(self):
# If a plotting test bombs out it can leave the current figure
# in an odd state, so we make sure it's been disposed of.
plt.close("all")
@pytest.mark.filterwarnings("ignore::DeprecationWarning")
def test_360_day_calendar_CalendarDateTime(self):
calendar = "360_day"
datetimes = [
cftime.datetime(1986, month, 30, calendar=calendar)
for month in range(1, 6)
]
cal_datetimes = [
nc_time_axis.CalendarDateTime(dt, calendar) for dt in datetimes
]
(line1,) = plt.plot(cal_datetimes)
result_ydata = line1.get_ydata()
np.testing.assert_array_equal(result_ydata, cal_datetimes)
def test_360_day_calendar_raw_dates(self):
datetimes = [
cftime.Datetime360Day(1986, month, 30) for month in range(1, 6)
]
(line1,) = plt.plot(datetimes)
result_ydata = line1.get_ydata()
np.testing.assert_array_equal(result_ydata, datetimes)
def test_360_day_calendar_raw_universal_dates(self):
datetimes = [
cftime.datetime(1986, month, 30, calendar="360_day")
for month in range(1, 6)
]
(line1,) = plt.plot(datetimes)
result_ydata = line1.get_ydata()
np.testing.assert_array_equal(result_ydata, datetimes)
def test_no_calendar_raw_universal_dates(self):
datetimes = [
cftime.datetime(1986, month, 30, calendar=None)
for month in range(1, 6)
]
with self.assertRaisesRegex(ValueError, "defined"):
plt.plot(datetimes)
@pytest.mark.filterwarnings("ignore::DeprecationWarning")
def test_fill_between(self):
calendar = "360_day"
dt = [
cftime.datetime(year=2017, month=2, day=day, calendar=calendar)
for day in range(1, 31)
]
cdt = [nc_time_axis.CalendarDateTime(item, calendar) for item in dt]
temperatures = [
np.round(np.random.uniform(0, 12), 3) for _ in range(len(cdt))
]
plt.fill_between(cdt, temperatures, 0)
def setup_function(function):
plt.close()
def teardown_function(function):
plt.close()
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
TICKS = {
"List[cftime.datetime]": [cftime.Datetime360Day(1986, 2, 1)],
"List[CalendarDateTime]": [
nc_time_axis.CalendarDateTime(
cftime.Datetime360Day(1986, 2, 1), "360_day"
)
],
}
@pytest.mark.filterwarnings("ignore::DeprecationWarning")
@pytest.mark.parametrize("axis", ["x", "y"])
@pytest.mark.parametrize("ticks", TICKS.values(), ids=list(TICKS.keys()))
def test_set_ticks(axis, ticks):
times = [cftime.Datetime360Day(1986, month, 30) for month in range(1, 6)]
data = range(len(times))
fig, ax = plt.subplots(1, 1)
if axis == "x":
ax.plot(times, data)
ax.set_xticks(ticks)
fig.canvas.draw()
ticklabels = ax.get_xticklabels()
else:
ax.plot(data, times)
ax.set_yticks(ticks)
fig.canvas.draw()
ticklabels = ax.get_yticklabels()
result_labels = [label.get_text() for label in ticklabels]
expected_labels = ["1986-02-01"]
assert result_labels == expected_labels
@pytest.mark.parametrize("axis", ["x", "y"])
@pytest.mark.parametrize("ticks", TICKS.values(), ids=list(TICKS.keys()))
def test_set_ticks_with_CFTimeFormatter(axis, ticks):
times = [cftime.Datetime360Day(1986, month, 30) for month in range(1, 6)]
data = range(len(times))
fig, ax = plt.subplots(1, 1)
formatter = nc_time_axis.CFTimeFormatter("%Y-%m", "360_day")
if axis == "x":
ax.plot(times, data)
ax.set_xticks(ticks)
ax.xaxis.set_major_formatter(formatter)
fig.canvas.draw()
ticklabels = ax.get_xticklabels()
else:
ax.plot(data, times)
ax.set_yticks(ticks)
ax.yaxis.set_major_formatter(formatter)
fig.canvas.draw()
ticklabels = ax.get_yticklabels()
result_labels = [label.get_text() for label in ticklabels]
expected_labels = ["1986-02"]
assert result_labels == expected_labels
@pytest.mark.parametrize("axis", ["x", "y"])
def test_set_format_with_CFTimeFormatter_with_default_ticks(axis):
times = [cftime.Datetime360Day(1986, month, 30) for month in range(1, 6)]
data = range(len(times))
fig, ax = plt.subplots(1, 1)
formatter = nc_time_axis.CFTimeFormatter("%Y", "360_day")
if axis == "x":
ax.plot(times, data)
ax.xaxis.set_major_formatter(formatter)
fig.canvas.draw()
ticklabels = ax.get_xticklabels()
else:
ax.plot(data, times)
ax.yaxis.set_major_formatter(formatter)
fig.canvas.draw()
ticklabels = ax.get_yticklabels()
result_labels = [label.get_text() for label in ticklabels]
expected_labels = ["1986", "1986", "1986", "1986", "1986"]
assert result_labels == expected_labels
if __name__ == "__main__":
unittest.main()
|
993,093 | 3024d5a6841260ca849aa29f29c560397861beb5 | from flask import request
from flask_restx import Resource
from ..util.dto import TransactionListDataDto
from ..service.transaction_list_data_service import save_transaction_list_data, \
get_transaction_list_data, get_all_transaction_lists_with_pagination, \
get_all_transaction_list_data, update_transaction_list_data, \
delete_transaction_list_data, get_all_products_lastest_price
api = TransactionListDataDto.api
_transaction_list_data = TransactionListDataDto.transaction_list_data
@api.route('/')
class TransactionListsData(Resource):
def post(self):
data = request.json
return save_transaction_list_data(data)
def get(self):
args = request.args
return get_all_transaction_lists_with_pagination(args)
@api.route('/all')
class AllTransactionLists(Resource):
def get(self):
return get_all_transaction_list_data()
@api.route('/latest-price')
class ProductLatestPrice(Resource):
def get(self):
return get_all_products_lastest_price()
@api.route('/<id>')
class TransactionListData(Resource):
def get(self, id):
"""Get invoice data with a given id"""
return get_transaction_list_data(id)
def put(self, id):
data = request.json
return update_transaction_list_data(id, data)
def delete(self, id):
return delete_transaction_list_data(id)
|
993,094 | d6b6c2d417ebcc92e4402a0fe028ec98c02e1d9b | #!/usr/bin/python
import commands
def exec_ssh_cmd(target, cmd, user="root"):
"""
"""
ssh_cmd_prefix = "ssh -o StrictHostKeyChecking=no -T root"
return exec_cmd("%s@%s 'sudo -u %s sh -c \"%s\"'" % (ssh_cmd_prefix, target, user, cmd))
def exec_cmd(cmd):
"""
"""
result = commands.getstatusoutput(cmd)
print result
if __name__=="__main__":
conf = "ni hao\nworld"
exec_ssh_cmd("nova80.sh.intel.com","echo %s > /tmp/test.txt" % (conf))
|
993,095 | 36ed7b864733ea9fc67f666ea219f50540f8ce7e | import os,sys,shutil
import importlib
import warnings
import datetime
import polychromosims.paramproc
import polychromosims.globalvars as globalvars
def init(sysargv=sys.argv):
"""
Initialize the polychromosims wrapper. This function should be called
before importing polychromosims.sim, because it loads the params module.
Parameters
----------
sysargv : list
a list of parameters, default is sys.argv
Returns
-------
the loaded params module
"""
# The paramscript is given as commandline argument, so we have to load it dynamically.
# NOTE: the GPU=... argument was replaced by giving params.GPU = ...
paramsfile_default = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'params_default.py')
myparamsfile = ([paramsfile_default] + [arg[12:] for arg in sysargv if 'paramscript=' in arg])[-1]
params_modspec = importlib.util.spec_from_file_location("params", myparamsfile)
params = importlib.util.module_from_spec(params_modspec)
params_modspec.loader.exec_module(params)
sys.modules[params.__name__] = params
globalvars.params_modspec = params_modspec
polychromosims.paramproc.update_from_cmd(params)
globalvars.params_allow_execution = True
polychromosims.paramproc.proc(params)
globalvars.params_module = params
import polychromosims.sim as sim
# Save relevant files with the data
shutil.copyfile(myparamsfile, os.path.join(params.folder, 'params.py'))
shutil.copyfile(os.path.abspath(sysargv[0]), os.path.join(params.folder, 'simscript.py'))
return params, sim
def finalize():
print("saved everything to "+globalvars.params_module.folder)
print("It is now "+str(datetime.datetime.now()))
|
993,096 | b7c507f2b309af13013415e7c6e02cab93faddb3 | from flask import Blueprint, jsonify, request
from pystock.app.services.report_service import get_report
from pystock.app.config import API_KEY
report_controller = Blueprint('report_controller', __name__)
@report_controller.route('/getReport')
def report():
auth = request.headers.get("X-Api-Key")
if auth != API_KEY:
return jsonify({"message": "ERROR: Unauthorized"}), 401
return jsonify(get_report()), 201
|
993,097 | 663e6c164586d93c5c8e93390f28c0ff3bb9bad3 | from typing import Optional
import bpy
def get_preferences(context: bpy.types.Context) -> Optional[bpy.types.AddonPreferences]:
addon_name = ".".join(__name__.split(".")[:-3])
addon = context.preferences.addons.get(addon_name)
if addon:
return addon.preferences
print(f"WARNING: Failed to read addon preferences for {addon_name}")
return None
|
993,098 | 33afcc1c7273018dad3ad6832e0370f38df877a2 | ## Newton's Law of Cooling.
temperature = 212
count = 0
while temperature > 150:
count += 1
temperature -= (temperature - 70) * 0.079
print("The coffee will cool to below")
print("150 degrees in", count, "minutes.")
|
993,099 | e460a5bcf1bff809249198dcb82e2218d34fc874 | # Generated by Django 2.2 on 2020-03-28 11:21
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('labApp', '0002_auto_20200328_1629'),
]
operations = [
migrations.AlterField(
model_name='activityperiod',
name='end_time',
field=models.DateTimeField(blank=True, default=datetime.datetime(2020, 3, 28, 16, 51, 35, 158177)),
),
migrations.AlterField(
model_name='activityperiod',
name='start_time',
field=models.DateTimeField(blank=True, default=datetime.datetime(2020, 3, 28, 16, 51, 35, 158177)),
),
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.